diff --git a/.cargo/config b/.cargo/config new file mode 100644 index 0000000000..730c3831e6 --- /dev/null +++ b/.cargo/config @@ -0,0 +1,2 @@ +[target.x86_64-unknown-linux-musl] +linker = "x86_64-linux-musl-gcc" \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..8a8f42360a --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +target/ +**/*.rs.bk +.DS_Store +.docker-cargo +target-docker +.vscode/tasks.json diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000000..0ed381ea8a --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3193 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "aho-corasick" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "annotate-snippets" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ansi_term" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "arrayref" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "arrayvec" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "async-test-derive" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "atty" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "autocfg" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "backtrace" +version = "0.3.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "base64" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bitflags" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "blake2b_simd" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "block-padding 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "block-padding" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "bstr" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bytecount" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "byteorder" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "bytes" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "c2-chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cargo_metadata" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cc" +version = "1.0.38" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "cfg-if" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "chashmap" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "chrono" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "clap" +version = "2.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "content_inspector" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crc32c" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crossbeam" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-channel" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-deque" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-deque" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-deque" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-epoch" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-queue" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "crossbeam-utils" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "csv" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bstr 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "csv-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ct-logs" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "sct 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ctrlc" +version = "3.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "data-encoding" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "derive-new" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "deunicode" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "diff" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dirs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dirs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dirs-sys" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dtoa" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "either" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ena" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "encode_unicode" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "env_logger" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "error-chain" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.34 (registry+https://github.com/rust-lang/crates.io-index)", + "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "failure" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "backtrace 0.3.34 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "failure_derive" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fluvio" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "k8-metadata 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "prettytable-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-api 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "spu-api 0.1.0-alpha.1", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "flv-integration-test" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "internal-api 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "kf-service 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata 0.1.0-alpha.1", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "fnv" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "fuchsia-zircon" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "fuchsia-zircon-sys" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "future-aio" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "future-helper 0.1.0-alpha.1", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "future-helper" +version = "0.1.0-alpha.1" +dependencies = [ + "async-test-derive 0.1.0-alpha.1", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "futures" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-channel-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-core-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-cpupool" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-executor-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-channel-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-io-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "futures-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-channel-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-executor-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-util-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-select-macro-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-sink-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "futures-util-preview" +version = "0.3.0-alpha.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-channel-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-io-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-select-macro-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-sink-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "getopts" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "getrandom" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "glob" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "globset" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "bstr 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "h2" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "http-body" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "httparse" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "humansize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "humantime" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper" +version = "0.12.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "hyper-rustls" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "ct-logs 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-rustls 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki-roots 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "idna" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ignore" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "indexmap" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "internal-api" +version = "0.1.0-alpha.1" +dependencies = [ + "kf-protocol 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata 0.1.0-alpha.1", + "types 0.1.0-alpha.1", +] + +[[package]] +name = "iovec" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itertools" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "itoa" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "jobserver" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "k8-client" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper-rustls 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)", + "k8-config 0.1.0-alpha.1", + "k8-diff 0.1.0-alpha.1", + "k8-fixtures 0.1.0-alpha.1", + "k8-metadata 0.1.0-alpha.1", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_qs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", + "webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "k8-config" +version = "0.1.0-alpha.1" +dependencies = [ + "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "k8-diff" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "k8-fixtures" +version = "0.1.0-alpha.1" +dependencies = [ + "k8-client 0.1.0-alpha.1", + "k8-metadata 0.1.0-alpha.1", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "k8-metadata" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-auth-token 0.1.0-alpha.1", + "metadata-core 0.1.0-alpha.1", + "metadata-partition 0.1.0-alpha.1", + "metadata-spg 0.1.0-alpha.1", + "metadata-spu 0.1.0-alpha.1", + "metadata-topic 0.1.0-alpha.1", +] + +[[package]] +name = "kernel32-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "kf-protocol" +version = "0.1.0-alpha.1" +dependencies = [ + "kf-protocol-api 0.1.0-alpha.1", + "kf-protocol-core 0.1.0-alpha.1", + "kf-protocol-derive 0.1.0-alpha.1", + "kf-protocol-message 0.1.0-alpha.1", + "kf-protocol-transport 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-protocol-api" +version = "0.1.0-alpha.1" +dependencies = [ + "content_inspector 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crc32c 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol-build 0.1.0-alpha.1", + "kf-protocol-core 0.1.0-alpha.1", + "kf-protocol-derive 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "paste 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-protocol-build" +version = "0.1.0-alpha.1" +dependencies = [ + "Inflector 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfmt-nightly 1.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "tera 0.11.20 (registry+https://github.com/rust-lang/crates.io-index)", + "textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "kf-protocol-core" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-protocol-derive" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "kf-protocol-dump" +version = "0.1.0-alpha.1" +dependencies = [ + "kf-protocol-api 0.1.0-alpha.1", + "kf-protocol-core 0.1.0-alpha.1", + "kf-protocol-message 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-protocol-message" +version = "0.1.0-alpha.1" +dependencies = [ + "content_inspector 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crc32c 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol-api 0.1.0-alpha.1", + "kf-protocol-build 0.1.0-alpha.1", + "kf-protocol-core 0.1.0-alpha.1", + "kf-protocol-derive 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "paste 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-protocol-transport" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol-core 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-service" +version = "0.1.0-alpha.1" +dependencies = [ + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "kf-socket" +version = "0.1.0-alpha.1" +dependencies = [ + "chashmap 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "lazy_static" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.60" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "linked-hash-map" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "lock_api" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "maplit" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "matches" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "memoffset" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memoffset" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metadata" +version = "0.1.0-alpha.1" +dependencies = [ + "k8-metadata 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", +] + +[[package]] +name = "metadata-auth-token" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-core 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metadata-core" +version = "0.1.0-alpha.1" +dependencies = [ + "hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_qs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metadata-partition" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-core 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "metadata-spg" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-core 0.1.0-alpha.1", + "metadata-spu 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_qs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", +] + +[[package]] +name = "metadata-spu" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-core 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", +] + +[[package]] +name = "metadata-topic" +version = "0.1.0-alpha.1" +dependencies = [ + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata-core 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mio" +version = "0.6.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mio-uds" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "net2" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nix" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nix" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "nodrop" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "num-derive" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-integer" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num-traits" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "num_cpus" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "owning_ref" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "owning_ref" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot_core" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "parking_lot_core" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "paste" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "paste-impl 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "paste-impl" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "percent-encoding" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "pest" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-trie 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pest_derive" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pest 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_generator 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pest_generator" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pest 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_meta 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pest_meta" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pin-utils" +version = "0.1.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ppv-lite86" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "prettytable-rs" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "encode_unicode 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro-nested" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "proc-macro2" +version = "0.4.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quick-error" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "quote" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_chacha" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rand_core" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "redox_users" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "rust-argon2 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-automata" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "ring" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cc 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "spin 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rust-argon2" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2b_simd 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-arena" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-graphviz" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-ap-rustc_data_structures" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "ena 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", + "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-graphviz 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_errors" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "annotate-snippets 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_lexer" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-ap-rustc_macros" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-rustc_target" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-serialize" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-syntax" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_errors 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_lexer 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_macros 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-ap-syntax_pos" +version = "546.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-arena 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_macros 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc-hash" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-rayon" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-rayon-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustc-workspace-hack" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustfmt-config_proc_macro" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustfmt-nightly" +version = "1.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "annotate-snippets 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)", + "bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "derive-new 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", + "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "getopts 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "ignore 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-workspace-hack 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfmt-config_proc_macro 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rustls" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ring 0.14.6 (registry+https://github.com/rust-lang/crates.io-index)", + "sct 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ryu" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "same-file" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sc-api" +version = "0.1.0-alpha.1" +dependencies = [ + "k8-metadata 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "sc-server" +version = "0.1.0-alpha.1" +dependencies = [ + "base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "chashmap 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "internal-api 0.1.0-alpha.1", + "k8-client 0.1.0-alpha.1", + "k8-config 0.1.0-alpha.1", + "k8-fixtures 0.1.0-alpha.1", + "k8-metadata 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "kf-service 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata 0.1.0-alpha.1", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-api 0.1.0-alpha.1", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "scoped-tls" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "scopeguard" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "scopeguard" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "sct" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ring 0.14.6 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "serde" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_json" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_qs" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_qs" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_yaml" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "sha-1" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", + "digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "slug" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "deunicode 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "smallvec" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "spin" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "spu-api" +version = "0.1.0-alpha.1" +dependencies = [ + "kf-protocol 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "spu-server" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "chashmap 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "internal-api 0.1.0-alpha.1", + "k8-client 0.1.0-alpha.1", + "kf-protocol 0.1.0-alpha.1", + "kf-service 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "metadata 0.1.0-alpha.1", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)", + "spu-api 0.1.0-alpha.1", + "storage 0.1.0-alpha.1", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "stable_deref_trait" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "storage" +version = "0.1.0-alpha.1" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "future-aio 0.1.0-alpha.1", + "future-helper 0.1.0-alpha.1", + "futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)", + "kf-protocol 0.1.0-alpha.1", + "kf-socket 0.1.0-alpha.1", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", + "utils 0.1.0-alpha.1", +] + +[[package]] +name = "string" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "structopt" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", + "structopt-derive 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "structopt-derive" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "syn" +version = "0.15.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "synstructure" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tera" +version = "0.11.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "humansize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)", + "slug 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-segment 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "term" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "term" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "termcolor" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "time" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-buf" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-codec" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-current-thread" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-executor" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-fs" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-io" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-reactor" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-sync 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-rustls" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "rustls 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-sync" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-tcp" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-threadpool" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-timer" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-udp" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-uds" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "try-lock" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "typenum" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "types" +version = "0.1.0-alpha.1" + +[[package]] +name = "ucd-trie" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unic-char-property" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unic-char-range 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unic-char-range" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unic-common" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unic-segment" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unic-ucd-segment 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unic-ucd-segment" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unic-char-property 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-char-range 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "unic-ucd-version 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unic-ucd-version" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unic-common 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-segmentation" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-width" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode-xid" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "untrusted" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "url" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "utils" +version = "0.1.0-alpha.1" +dependencies = [ + "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "types 0.1.0-alpha.1", +] + +[[package]] +name = "vec_map" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "version_check" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "walkdir" +version = "2.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "want" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "webpki" +version = "0.19.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ring 0.14.6 (registry+https://github.com/rust-lang/crates.io-index)", + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "webpki-roots" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-build" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-util" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "wincolor" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "yaml-rust" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum Inflector 0.11.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" +"checksum annotate-snippets 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c7021ce4924a3f25f802b2cccd1af585e39ea1a363a1aa2e72afe54b67a3a7a7" +"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" +"checksum arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b8d73f9beda665eaa98ab9e4f7442bd4e7de6652587de55b2525e52e29c1b0ba" +"checksum atty 0.2.13 (registry+https://github.com/rust-lang/crates.io-index)" = "1803c647a3ec87095e7ae7acfca019e98de5ec9a7d01343f611cf3152ed71a90" +"checksum autocfg 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "22130e92352b948e7e82a49cdb0aa94f2211761117f29e052dd397c1ac33542b" +"checksum backtrace 0.3.34 (registry+https://github.com/rust-lang/crates.io-index)" = "b5164d292487f037ece34ec0de2fcede2faa162f085dd96d2385ab81b12765ba" +"checksum backtrace-sys 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)" = "82a830b4ef2d1124a711c71d263c5abdc710ef8e907bd508c88be475cebc422b" +"checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" +"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" +"checksum blake2b_simd 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "461f4b879a8eb70c1debf7d0788a9a5ff15f1ea9d25925fea264ef4258bed6b2" +"checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +"checksum block-padding 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6d4dc3af3ee2e12f3e5d224e5e1e3d73668abbeb69e566d361f7d5563a4fdf09" +"checksum bstr 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94cdf78eb7e94c566c1f5dbe2abf8fc70a548fc902942a48c4b3a98b48ca9ade" +"checksum byte-tools 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +"checksum bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be0fdd54b507df8f22012890aadd099979befdba27713c767993f8380112ca7c" +"checksum byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +"checksum bytes 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)" = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" +"checksum c2-chacha 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d64d04786e0f528460fc884753cf8dddcc466be308f6026f8e355c41a0e4101" +"checksum cargo_metadata 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e904f164f39cae0c3a4f2713eb97a47ba64676a071e99a69ddfef4994694d2c" +"checksum cc 1.0.38 (registry+https://github.com/rust-lang/crates.io-index)" = "ce400c638d48ee0e9ab75aef7997609ec57367ccfe1463f21bf53c3eca67bf46" +"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33" +"checksum chashmap 2.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ff41a3c2c1e39921b9003de14bf0439c7b63a9039637c291e1a64925d8ddfa45" +"checksum chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "77d81f58b7301084de3b958691458a53c3f7e0b1d702f77e550b6a88e3a88abe" +"checksum clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5067f5bb2d80ef5d68b4c87db81601f0b75bca627bc2ef76b141d7b846a3c6d9" +"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e" +"checksum content_inspector 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b7bda66e858c683005a53a9a60c69a4aca7eeaa45d124526e389f7aec8e62f38" +"checksum crc32c 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77ba37ef26c12988c1cee882d522d65e1d5d2ad8c3864665b88ee92767ed84c5" +"checksum crossbeam 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d1c92ff2d7a202d592f5a412d75cf421495c913817781c1cb383bf12a77e185f" +"checksum crossbeam-channel 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c8ec7fcd21571dc78f96cc96243cab8d8f035247c3efd16c687be154c3fa9efa" +"checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" +"checksum crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13" +"checksum crossbeam-deque 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b18cd2e169ad86297e6bc0ad9aa679aee9daa4f19e8163860faf7c164e4f5a71" +"checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" +"checksum crossbeam-epoch 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2449aaa4ec7ef96e5fb24db16024b935df718e9ae1cec0a1e68feeca2efca7b8" +"checksum crossbeam-epoch 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "fedcd6772e37f3da2a9af9bf12ebe046c0dfe657992377b4df982a2b54cd37a9" +"checksum crossbeam-queue 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" +"checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" +"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +"checksum csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "37519ccdfd73a75821cac9319d4fce15a81b9fcf75f951df5b9988aa3a0af87d" +"checksum csv-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" +"checksum ct-logs 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b4660f8b07a560a88c02d76286edb9f0d5d64e495d2b0f233186155aa51be1f" +"checksum ctrlc 3.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c7dfd2d8b4c82121dfdff120f818e09fc4380b0b7e17a742081a89b94853e87f" +"checksum data-encoding 2.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" +"checksum derive-new 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c3fd04571b29c91cfbe1e7c9a228e069ac8635f180ffb4ccd6a6907617ee8bb0" +"checksum deunicode 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "850878694b7933ca4c9569d30a34b55031b9b139ee1fc7b94a527c4ef960d690" +"checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" +"checksum digest 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +"checksum dirs 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" +"checksum dirs 2.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" +"checksum dirs-sys 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" +"checksum dtoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ea57b42383d091c85abcc2706240b94ab2a8fa1fc81c10ff23c4de06e2a90b5e" +"checksum either 1.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5527cfe0d098f36e3f8839852688e63c8fff1c90b2b405aef730615f9a7bcf7b" +"checksum ena 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dc01d68e08ca384955a3aeba9217102ca1aa85b6e168639bf27739f1d749d87" +"checksum encode_unicode 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "90b2c9496c001e8cb61827acdefad780795c42264c137744cae6f7d9e3450abd" +"checksum env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" +"checksum error-chain 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ab49e9dcb602294bc42f9a7dfc9bc6e936fca4418ea300dbfb84fe16de0b7d9" +"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" +"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" +"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" +"checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" +"checksum fuchsia-cprng 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +"checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" +"checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +"checksum futures 0.1.28 (registry+https://github.com/rust-lang/crates.io-index)" = "45dc39533a6cae6da2b56da48edae506bb767ec07370f86f70fc062e9d435869" +"checksum futures-channel-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "21c71ed547606de08e9ae744bb3c6d80f5627527ef31ecf2a7210d0e67bc8fae" +"checksum futures-core-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "4b141ccf9b7601ef987f36f1c0d9522f76df3bba1cf2e63bfacccc044c4558f5" +"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" +"checksum futures-executor-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "87ba260fe51080ba37f063ad5b0732c4ff1f737ea18dcb67833d282cdc2c6f14" +"checksum futures-io-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "082e402605fcb8b1ae1e5ba7d7fdfd3e31ef510e2a8367dd92927bb41ae41b3a" +"checksum futures-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "bf25f91c8a9a1f64c451e91b43ba269ed359b9f52d35ed4b3ce3f9c842435867" +"checksum futures-select-macro-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "469d86239defe297ebcaf385ac5e999a77147f2f20eaf2a3aee7bff9e58e20a9" +"checksum futures-sink-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "4309a25a1069a1f3c10647b227b9afe6722b67a030d3f00a9cbdc171fc038de4" +"checksum futures-util-preview 0.3.0-alpha.17 (registry+https://github.com/rust-lang/crates.io-index)" = "af8198c48b222f02326940ce2b3aa9e6e91a32886eeaad7ca3b8e4c70daa3f4e" +"checksum generic-array 0.12.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +"checksum getopts 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)" = "72327b15c228bfe31f1390f93dd5e9279587f0463836393c9df719ce62a3e450" +"checksum getrandom 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "34f33de6f0ae7c9cb5e574502a562e2b512799e32abb801cd1e79ad952b62b49" +"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum globset 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "925aa2cac82d8834e2b2a4415b6f6879757fb5c0928fc445ae76461a12eed8f2" +"checksum h2 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b34c246847f938a410a03c5458c7fee2274436675e76d8b903c08efc29c462" +"checksum heck 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +"checksum http 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "372bcb56f939e449117fb0869c2e8fd8753a8223d92a172c6e808cf123a5b6e4" +"checksum http-body 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6741c859c1b2463a423a1dbce98d418e6c3c3fc720fb0d45528657320920292d" +"checksum httparse 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +"checksum humansize 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6cab2627acfc432780848602f3f558f7e9dd427352224b0d9324025796d2a5e" +"checksum humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca7e5f2e110db35f93b837c81797f3714500b81d517bf20c431b16d3ca4f114" +"checksum hyper 0.12.33 (registry+https://github.com/rust-lang/crates.io-index)" = "7cb44cbce9d8ee4fb36e4c0ad7b794ac44ebaad924b9c8291a63215bb44c2c8f" +"checksum hyper-rustls 0.16.1 (registry+https://github.com/rust-lang/crates.io-index)" = "15b66d1bd4864ef036adf2363409caa3acd63ebb4725957b66e621c8a36631a3" +"checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" +"checksum ignore 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0ec16832258409d571aaef8273f3c3cc5b060d784e159d1a0f3b0017308f84a7" +"checksum indexmap 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7e81a7c05f79578dbc15793d8b619db9ba32b4577003ef3af1a91c416798c58d" +"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" +"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" +"checksum itoa 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "501266b7edd0174f8530248f87f99c88fbe60ca4ef3dd486835b8d8d53136f7f" +"checksum jobserver 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "f74e73053eaf95399bf926e48fc7a2a3ce50bd0eaaa2357d391e95b2dcdd4f10" +"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" +"checksum lazy_static 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bc5729f27f159ddd61f4df6228e827e86643d4d3e7c32183cb30a1c08f604a14" +"checksum libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)" = "d44e80633f007889c7eff624b709ab43c92d708caad982295768a7b13ca3b5eb" +"checksum linked-hash-map 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ae91b68aebc4ddb91978b11a1b02ddd8602a05ec19002801c5666000e05e0f83" +"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c" +"checksum log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +"checksum maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "08cbb6b4fef96b6d77bfc40ec491b1690c779e77b05cd9f07f787ed376fd4c43" +"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e" +"checksum memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" +"checksum memoffset 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ce6075db033bbbb7ee5a0bbd3a3186bbae616f57fb001c485c7ff77955f8177f" +"checksum mio 0.6.19 (registry+https://github.com/rust-lang/crates.io-index)" = "83f51996a3ed004ef184e16818edc51fadffe8e7ca68be67f9dee67d84d0ff23" +"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" +"checksum nix 0.14.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6c722bee1037d430d0f8e687bbdbf222f27cc6e4e68d5caf630857bb2b6dbdce" +"checksum nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229" +"checksum nodrop 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9667ddcc6cc8a43afc9b7917599d7216aa09c463919ea32c59ed6cac8bc945" +"checksum num-derive 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "eafd0b45c5537c3ba526f79d3e75120036502bebacbb3f3220914067ce39dbf2" +"checksum num-integer 0.1.41 (registry+https://github.com/rust-lang/crates.io-index)" = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09" +"checksum num-traits 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "6ba9a427cfca2be13aa6f6403b0b7e7368fe982bfa16fccc450ce74c46cd9b32" +"checksum num_cpus 1.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bcef43580c035376c0705c42792c294b66974abbfd2789b511784023f71f3273" +"checksum opaque-debug 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +"checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" +"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" +"checksum parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)" = "149d8f5b97f3c1133e3cfcd8886449959e856b557ff281e292b733d7c69e005e" +"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5" +"checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" +"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa" +"checksum parking_lot_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad7f7e6ebdc79edff6fdcb87a55b620174f7a989e3eb31b65231f4af57f00b8c" +"checksum parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" +"checksum paste 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "1f4a4a1c555c6505821f9d58b8779d0f630a6b7e4e1be24ba718610acf01fa79" +"checksum paste-impl 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "26e796e623b8b257215f27e6c80a5478856cae305f5b59810ff9acdaa34570e6" +"checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" +"checksum pest 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "933085deae3f32071f135d799d75667b63c8dc1f4537159756e3d4ceab41868c" +"checksum pest_derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +"checksum pest_generator 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "63120576c4efd69615b5537d3d052257328a4ca82876771d6944424ccfd9f646" +"checksum pest_meta 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f249ea6de7c7b7aba92b4ff4376a994c6dbd98fd2166c89d5c4947397ecb574d" +"checksum pin-utils 0.1.0-alpha.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" +"checksum ppv-lite86 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e3cbf9f658cdb5000fcf6f362b8ea2ba154b9f146a61c7a20d647034c6b6561b" +"checksum prettytable-rs 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0fd04b170004fa2daccf418a7f8253aaf033c27760b5f225889024cf66d7ac2e" +"checksum proc-macro-hack 0.5.8 (registry+https://github.com/rust-lang/crates.io-index)" = "982a35d1194084ba319d65c4a68d24ca28f5fdb5b8bc20899e4eef8641ea5178" +"checksum proc-macro-nested 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" +"checksum proc-macro2 0.4.30 (registry+https://github.com/rust-lang/crates.io-index)" = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" +"checksum quote 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" +"checksum rand 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +"checksum rand 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" +"checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +"checksum rand 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d47eab0e83d9693d40f825f86948aa16eff6750ead4bdffc4ab95b8b3a7f052c" +"checksum rand_chacha 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +"checksum rand_chacha 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +"checksum rand_core 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +"checksum rand_core 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +"checksum rand_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "615e683324e75af5d43d8f7a39ffe3ee4a9dc42c5c701167a71dc59c3a493aca" +"checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +"checksum rand_hc 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +"checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +"checksum rand_jitter 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +"checksum rand_os 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +"checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +"checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +"checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +"checksum redox_users 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4ecedbca3bf205f8d8f5c2b44d83cd0690e39ee84b951ed649e9f1841132b66d" +"checksum regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88c3d9193984285d544df4a30c23a4e62ead42edf70a4452ceb76dac1ce05c26" +"checksum regex-automata 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" +"checksum regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b143cceb2ca5e56d5671988ef8b15615733e7ee16cd348e064333b251b89343f" +"checksum ring 0.14.6 (registry+https://github.com/rust-lang/crates.io-index)" = "426bc186e3e95cac1e4a4be125a4aca7e84c2d616ffc02244eef36e2a60a093c" +"checksum rust-argon2 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "81ed8d04228b44a740c8d46ff872a28e50fff3d659f307ab4da2cc502e019ff3" +"checksum rustc-ap-arena 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4dc2e1e68b64268c543bfa6e63e3c0d9ea58074c71396f42f76931f35a9287f9" +"checksum rustc-ap-graphviz 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c108d647ce0dd46477b048eafff5a6273b5652e02d47424b0cd684147379c811" +"checksum rustc-ap-rustc_data_structures 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "656771744e0783cb8e4481e3b8b1f975687610aaf18833b898018111a0e0e582" +"checksum rustc-ap-rustc_errors 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e37064f6624bc799bfaa2968b61ee6880926dea2a8bba69f18aef6c8e69c9604" +"checksum rustc-ap-rustc_lexer 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef5bc0a971823637ea23a857f0ef1467f44b1e05d71968821f83a0abe53e0fe3" +"checksum rustc-ap-rustc_macros 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b90037e3336fe8835f468db44d0848ae10d9cc8533ae89b55828883f905b7e80" +"checksum rustc-ap-rustc_target 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cadf9ca07315eab3a7a21f63872f9cc81e250fd6ede0419c24f8926ade73a45d" +"checksum rustc-ap-serialize 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "61673783f2089e01033ffa82d1988f55175402071b31253a358292e1624d4602" +"checksum rustc-ap-syntax 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "28f3dd1346d5b0269c07a4a78855e309a298ab569c9c1302d4d4f57f8eee4e84" +"checksum rustc-ap-syntax_pos 546.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "45e67b526dbda3a0c7dab91c8947d43685e7697f52686a4949da3c179cd7c979" +"checksum rustc-demangle 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "a7f4dccf6f4891ebcc0c39f9b6eb1a83b9bf5d747cb439ec6fba4f3b977038af" +"checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" +"checksum rustc-rayon 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0d2e07e19601f21c59aad953c2632172ba70cb27e685771514ea66e4062b3363" +"checksum rustc-rayon-core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "79d38ca7cbc22fa59f09d8534ea4b27f67b0facf0cbe274433aceea227a02543" +"checksum rustc-workspace-hack 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc71d2faa173b74b232dedc235e3ee1696581bb132fc116fa3626d6151a1a8fb" +"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +"checksum rustfmt-config_proc_macro 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "faae807d1bde4688d2046ebb7bcf3896ce5b766bd0f1120a526cec89c3331b9a" +"checksum rustfmt-nightly 1.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a74323fbbe92ac656bb94b77e236cbb6e34efb601d9b1d494e514746b02902be" +"checksum rustls 0.15.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f271e3552cd835fa28c541c34a7e8fdd8cdff09d77fe4eb8f6c42e87a11b096e" +"checksum ryu 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c92464b447c0ee8c4fb3824ecc8383b81717b9f1e74ba2e72540aef7b9f82997" +"checksum same-file 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "585e8ddcedc187886a30fa705c47985c3fa88d06624095856b36ca0b82ff4421" +"checksum scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +"checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" +"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +"checksum sct 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f5adf8fbd58e1b1b52699dc8bed2630faecb6d8c7bee77d009d6bbe4af569b9" +"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" +"checksum serde 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)" = "7fe5626ac617da2f2d9c48af5515a21d5a480dbd151e01bb1c355e26a3e68113" +"checksum serde_derive 1.0.98 (registry+https://github.com/rust-lang/crates.io-index)" = "01e69e1b8a631f245467ee275b8c757b818653c6d704cdbcaeb56b56767b529c" +"checksum serde_json 1.0.40 (registry+https://github.com/rust-lang/crates.io-index)" = "051c49229f282f7c6f3813f8286cc1e3323e8051823fce42c7ea80fe13521704" +"checksum serde_qs 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "35965fa1d2413717053d67c2df1f5c3e1763fbf77200ea7e767523707bd5a0af" +"checksum serde_qs 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b232c387059b62b17eb0487bf23de3ddf21b648ad2206fadc6ff3af9e2f3c07" +"checksum serde_yaml 0.8.9 (registry+https://github.com/rust-lang/crates.io-index)" = "38b08a9a90e5260fe01c6480ec7c811606df6d3a660415808c3c3fa8ed95b582" +"checksum sha-1 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "23962131a91661d643c98940b20fcaffe62d776a823247be80a48fcb8b6fce68" +"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" +"checksum slug 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "b3bc762e6a4b6c6fcaade73e77f9ebc6991b676f88bb2358bddb56560f073373" +"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7" +"checksum spin 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbdb51a221842709c2dd65b62ad4b78289fc3e706a02c17a26104528b6aa7837" +"checksum stable_deref_trait 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dba1a27d3efae4351c8051072d619e3ade2820635c3958d826bfea39d59b54c8" +"checksum string 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d24114bfcceb867ca7f71a0d3fe45d45619ec47a6fbfa98cb14e14250bfa5d6d" +"checksum strsim 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +"checksum structopt 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "16c2cdbf9cc375f15d1b4141bc48aeef444806655cd0e904207edc8d68d86ed7" +"checksum structopt-derive 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "53010261a84b37689f9ed7d395165029f9cc7abb9f56bbfe86bee2597ed25107" +"checksum syn 0.15.44 (registry+https://github.com/rust-lang/crates.io-index)" = "9ca4b3b69a77cbe1ffc9e198781b7acb0c7365a883670e8f1c1bc66fba79a5c5" +"checksum synstructure 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)" = "02353edf96d6e4dc81aea2d8490a7e9db177bf8acb0e951c24940bf866cb313f" +"checksum tera 0.11.20 (registry+https://github.com/rust-lang/crates.io-index)" = "4b505279e19d8f7d24b1a9dc58327c9c36174b1a2c7ebdeac70792d017cb64f3" +"checksum term 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "edd106a334b7657c10b7c540a0106114feadeb4dc314513e97df481d5d966f42" +"checksum term 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c0863a3345e70f61d613eab32ee046ccd1bcc5f9105fe402c61fcd0c13eeb8b5" +"checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e" +"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +"checksum time 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)" = "db8dcfca086c1143c9270ac42a2bbd8a7ee477b78ac8e45b19abfb0cbede4b6f" +"checksum tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)" = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" +"checksum tokio-buf 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8fb220f46c53859a4b7ec083e41dec9778ff0b1851c0942b211edb89e0ccdc46" +"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" +"checksum tokio-current-thread 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "d16217cad7f1b840c5a97dfb3c43b0c871fef423a6e8d2118c604e843662a443" +"checksum tokio-executor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "0f27ee0e6db01c5f0b2973824547ce7e637b2ed79b891a9677b0de9bd532b6ac" +"checksum tokio-fs 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "3fe6dc22b08d6993916647d108a1a7d15b9cd29c4f4496c62b92c45b5041b7af" +"checksum tokio-io 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "5090db468dad16e1a7a54c8c67280c5e4b544f3d3e018f0b913b400261f85926" +"checksum tokio-reactor 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "6af16bfac7e112bea8b0442542161bfc41cbfa4466b580bdda7d18cb88b911ce" +"checksum tokio-rustls 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1600e90b2602df28ff54ae842519b408fbb25378c3c5aee1b795593e9263dc80" +"checksum tokio-sync 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2162248ff317e2bc713b261f242b69dbb838b85248ed20bb21df56d60ea4cae7" +"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" +"checksum tokio-threadpool 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)" = "90ca01319dea1e376a001e8dc192d42ebde6dd532532a5bad988ac37db365b19" +"checksum tokio-timer 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "f2106812d500ed25a4f38235b9cae8f78a09edf43203e16e59c3b769a342a60e" +"checksum tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "66268575b80f4a4a710ef83d087fdfeeabdce9b74c797535fbac18a2cb906e92" +"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" +"checksum toml 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b8c96d7873fa7ef8bdeb3a9cda3ac48389b4154f32b9803b4bc26220b677b039" +"checksum try-lock 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" +"checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" +"checksum ucd-trie 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8f00ed7be0c1ff1e24f46c3d2af4859f7e863672ba3a6e92e7cff702bf9f06c2" +"checksum unic-char-property 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ce36d3f7ce754afdbccccf8ff0dd0134e50fb44aaae579f96218856e9e5dbd1e" +"checksum unic-char-range 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9ab85fab42ad1b26cafc03bf891f69cb4d6e15f491030e89a0122197baa8ae8" +"checksum unic-common 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff8d4a7ade929ef7d971e16ced21a8cd56a63869aa6032dfb8cb083cf7d077bf" +"checksum unic-segment 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c9ca47cbb09fb5fcd066b5867d11dc528302fa465277882797d6a836e1ee6f9e" +"checksum unic-ucd-segment 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "48f1a08ce0409a9e391b88d1930118eec48af12742fc538bcec55f775865776e" +"checksum unic-ucd-version 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1f5e6c6c53c2d0ece4a5964bc55fcff8602153063cb4fab20958ff32998ff6" +"checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +"checksum unicode-normalization 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "141339a08b982d942be2ca06ff8b076563cbe223d1befd5450716790d44e2426" +"checksum unicode-segmentation 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1967f4cdfc355b37fd76d2a954fb2ed3871034eb4f26d60537d88795cfc332a9" +"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" +"checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +"checksum unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +"checksum untrusted 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "55cd1f4b4e96b46aeb8d4855db4a7a9bd96eeeb5c6a1ab54593328761642ce2f" +"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" +"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" +"checksum version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" +"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +"checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e" +"checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" +"checksum webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4f7e1cd7900a3a6b65a3e8780c51a3e6b59c0e2c55c6dc69578c288d69f7d082" +"checksum webpki-roots 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c10fa4212003ba19a564f25cd8ab572c6791f99a03cc219c13ed35ccab00de0e" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" +"checksum winapi 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "f10e386af2b13e47c89e7236a7a14a086791a2b88ebad6df9bf42040195cf770" +"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-util 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7168bab6e1daee33b4557efd0e95d5ca70a03706d39fa5f3fe7a236f584b03c9" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" +"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +"checksum yaml-rust 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65923dd1784f44da1d2c3dbbc5e822045628c590ba72123e1c73d3c230c4434d" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000000..8e44d91520 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,32 @@ +[workspace] +members = [ + "cli", + "future-aio", + "future-helper", + "api/internal-api", + "k8-client", + "k8-config", + "k8-diff", + "kf-protocol", + "kf-protocol/kf-protocol-dump", + "kf-protocol/kf-protocol-build", + "kf-socket", + "kf-service", + "k8-metadata", + "metadata", + "api/sc-api", + "sc-server", + "api/spu-api", + "spu-server", + "storage", + "test-helper", + "types", + "utils" +] + +# profile to make image sizer smaller +# comment out for now +#[profile.release] +#lto = true +#codegen-units = 1 +#incremental = false \ No newline at end of file diff --git a/DEVELOPER.md b/DEVELOPER.md new file mode 100644 index 0000000000..1724830cc9 --- /dev/null +++ b/DEVELOPER.md @@ -0,0 +1,113 @@ +# Fluvio for Developers + +Thank you for joining Fluvio community. The goal of this document is to provide everything you need to get started with developing Fluvio. + +## Assumptions + +Familiarity with +- [Rust](https://www.rust-lang.org) +- [Kubernetes](https://kubernetes.io) + +Developer guide and examples should work with the following platforms: +- macOS X +- Linux +Other platforms such as Windows can be made to work, but we haven't tried them yet. + +To test and run services, you need to get access to development Kubernetes cluster. Our guide uses Minikube as examples because it is easy to it get it started, but you can use other Kubernetes cluster as well. Please see [Kubernetes](https://kubernetes.io) for setting up a development cluster. + +# Rust futures and nightly + +Currently, Fluvio is using the nightly version of Rust because it is using unstable version of the Futures library. We expect to switch to the stable version of Rust in [1.39](https://github.com/rust-lang/rust/pull/63209) + + +# Fluvio components +Fluvio platform consists of the following components. + +## Streaming Controller (SC) +Streaming Controller implements control plane operations for data-in-motion. It is responsible for organizing and coordinating data streams between SPU's. It uses the declarative model to self-heal and recover much as possible during failures. + +## Streaming Processing Engine (SPU) +SPU's are the engine for data-in-motion. Each SPU can handle multiple data streams. SPU uses reactive and asynchronous architecture to ensure efficient handling of data. + +## CLI +Fluvio CLI provides +manages SPU +manages streams (topics and partitions) +produce and consume streams + + +# Building Fluvio + +## Set up Rust + +Please follow [setup](https://www.rust-lang.org/tools/install) instructions to install Rust and Cargo. + +## Checkout and build + +This will build Fluvio for your environment: + +``` +$ git clone https://github.com/infinyon/fluvio.git +$ cd fluvio +$ cargo build +$ cargo test +``` + +# Running SC and SPU in development mode + +It is recommended to use custom SPU instead of managed SPU which allow SPU to run locally in your local machine. + + + +## Setting up development env for Minikube + +Due to limitation of third party library, we need to apply DNS name for minikube cluster. + +This requires 2 steps. + +- Add host entry ```minikubeCA``` in your /etc/hosts file. +- Add Kube config context to use host-based configuration + +### Add minikubeCA entry + +First find IP address of the minikube +```minikube ip``` + +then paste output to ```/etc/hosts```. You probably need to perform as sudo + +The host name must be ```minikubeCA``` as shown below + +```192.168.64.9 minikubeCA``` + +### Set up custom context + +Here we set up new context use hostname for minikube. + +``` +kubectl config set-cluster mycube --server=https://minikubeCA:8443 --certificate-authority=.minikube/ca.crt +kubectl config set-context mycube --user=minikube --cluster=mycube +kubectl config use-context mycube +``` + +## Registering Custom SPU + +In order to run custom spu, we must register them. To register 3 SPU: +``` +kubectl create -f k8-util/samples/crd/spu_5001.yaml +kubectl create -f k8-util/samples/crd/spu_5002.yaml +kubectl create -f k8-util/samples/crd/spu_5003.yaml +``` + +## Starting custom SPU +``` +./dev-tools/log/debug-spu-min 5001 9005 9006 +./dev-tools/log/debug-spu-min 5002 9007 9008 +./dev-tools/log/debug-spu-min 5003 9009 9010 +``` + + + + + + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..53e75e9553 --- /dev/null +++ b/Makefile @@ -0,0 +1,169 @@ +TOOLCHAIN = "./rust-toolchain" +RUSTV = $(shell cat ${TOOLCHAIN}) +RUST_DOCKER_IMAGE=fluvio/rust-tool:${RUSTV} +CARGO_BUILD=build +BIN_NAME=debug +PUSH=push +GITHUB_USER=infinyon +GITHUB_REPO=fluvio +GITHUB_TAG=0.1.0-alpha +TARGET_LINUX=x86_64-unknown-linux-musl +TARGET_DARWIN=x86_64-apple-darwin +CLI_BUILD=fluvio_cli + +run-all-unit-test: + cargo test --all + +install_musl: + rustup target add ${TARGET_LINUX} + +clean_build: + rm -rf /tmp/cli-* + +# create binaries for CLI +release_cli_darwin: + cargo build --release --bin fluvio --target ${TARGET_DARWIN} + mkdir -p /tmp/$(CLI_BUILD)_${TARGET_DARWIN} + cp target/${TARGET_DARWIN}/release/fluvio /tmp/$(CLI_BUILD)_${TARGET_DARWIN} + cd /tmp;tar -czvf cli-${TARGET_DARWIN}-release.tar.gz $(CLI_BUILD)_${TARGET_DARWIN};rm -rf $(CLI_BUILD)_${TARGET_DARWIN} + +release_cli_linux: + cargo build --release --bin fluvio --target ${TARGET_LINUX} + mkdir -p /tmp/$(CLI_BUILD)_${TARGET_LINUX} + cp target/${TARGET_LINUX}/release/fluvio /tmp/$(CLI_BUILD)_${TARGET_LINUX} + cd /tmp;tar -czvf cli-${TARGET_LINUX}-release.tar.gz $(CLI_BUILD)_${TARGET_LINUX};rm -rf $(CLI_BUILD)_${TARGET_LINUX} + + + +# create docker images for release +release_image: CARGO_BUILD=build --release +release_image: PUSH=push_release +release_image: BIN_NAME=release + +debug_image: linux-spu-server spu_image linux-sc-server sc_image +release_image: linux-spu-server spu_image spu_image linux-sc-server sc_image + + + +linux-sc-server: + cargo $(CARGO_BUILD) --bin sc-server --target ${TARGET_LINUX} + +linux-spu-server: + cargo $(CARGO_BUILD) --bin spu-server --target ${TARGET_LINUX} + + +spu_image: install_musl linux-spu-server + make build BIN_NAME=$(BIN_NAME) $(PUSH) -C k8-util/docker/spu + +sc_image: install_musl linux-spu-server + make build BIN_NAME=$(BIN_NAME) $(PUSH) -C k8-util/docker/sc + + +cargo_cache_dir: + mkdir -p .docker-cargo + + +# run test in docker +docker_linux_test: cargo_cache_dir + docker run --rm --volume ${PWD}:/src --workdir /src \ + -e USER -e CARGO_HOME=/src/.docker-cargo \ + -e CARGO_TARGET_DIR=/src/target-docker \ + ${RUST_DOCKER_IMAGE} cargo test + + +# create releases +# release CLI can be downloaded from https://github.com/aktau/github-release/releases +create_release: + github-release release \ + --user ${GITHUB_USER} \ + --repo ${GITHUB_REPO} \ + --tag ${GITHUB_TAG} \ + --name "${GITHUB_TAG}" \ + --description "${GITHUB_TAG}" + + +upload_release: release_cli_darwin release_cli_linux + github-release upload \ + --user ${GITHUB_USER} \ + --repo ${GITHUB_REPO} \ + --tag ${GITHUB_TAG} \ + --name "cli-${TARGET_DARWIN}-release.tar.gz" \ + --file /tmp/cli-${TARGET_DARWIN}-release.tar.gz + github-release upload \ + --user ${GITHUB_USER} \ + --repo ${GITHUB_REPO} \ + --tag ${GITHUB_TAG} \ + --name "cli-${TARGET_LINUX}-release.tar.gz" \ + --file /tmp/cli-${TARGET_LINUX}-release.tar.gz + + +delete_release: + github-release delete \ + --user ${GITHUB_USER} \ + --repo ${GITHUB_REPO} \ + --tag ${GITHUB_TAG} + + +## Helper targets to compile specific crate + + +build-sc-test: + cd sc-server;cargo test --no-run + + +build-spu-test: + cd spu-server;cargo test --no-run + +build-storage-test: + cd storage;cargo test --no-run + +build-internal-test: + cd internal-api;cargo test --no-run + + +test-spu: + cd spu-server;cargo test + +test-spu-offset: + cd spu-server;RUST_LOG=spu_server=trace cargo test flv_offset_fetch_test + +test-sc-connection: + cd sc-server;RUST_LOG=sc_server=trace cargo test connection_test + +test-sc-partition: + cd sc-server;RUST_LOG=sc_server=trace cargo test partition_test + +test-sc-controller: + cd sc-server; cargo test test_controller_basic + +test-sc: + cd sc-server;cargo test + +test-storage: + cd storage;cargo test + +test-internal-api: + cd api/internal-api;cargo test + +test-cli: + cd cli;cargo test + +test-helper: + cd future-helper;cargo test + +test-aio: + cd future-aio;cargo test + +test-kfsocket: + cd kf-socket;cargo test + +test-kfservice: + cd kf-service;cargo test + +test-k8client: + cd k8-client;cargo test + +test-k8config: + cd k8-config;cargo test + +.PHONY: test-helper teste-aio test-kfsocket test-kfservice test-k8client test-k8config \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000..94fdca9e06 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +# Fluvio + +* Website: https://www.fluvio.io + +Welcome to the Fluvio project! + +Fluvio is a cloud-native platform for data in motion, built from the ground up to run on top of Kubernetes. +It brings centralized control to connect, transform, and distribute real-time data across the organization. + +The repository contains all the code necessary to run the Fluvio platform: Services, APIs, and the CLI. +Powerful CLI - User-friendly and easy to use Command Line Interface. + +## **Features** + +- Declarative Management - A unique approach to data management, you specify intent and fluvio does the rest. +- Cloud Native - Built for Kubernetes. +- Real-time architecture - Fully asynchronous by design, suitable for low latency and high throughput environments. +- Powerful CLI - User-friendly and easy to use Command Line Interface. +- Written in [Rust](https://www.rust-lang.org). Rust is becoming the [language of choice](https://msrc-blog.microsoft.com/2019/07/22/why-rust-for-safe-systems-programming) for building safe and high performant distributed systems. Goodbye Garbage Collection penalties. +- Kafka compatibility - Works with Kafka client and server APIs. + + +## Release Status +Fluvio is at Alpha and should be suitable for lab environment. APIs, Schema, CLI, and Services are continually evolving and subject to change before R1. + + +## Documentation + +Full, comprehensive documentation is viewable on the Fluvio website: + +https://www.fluvio.io/docs + + + +## For Developers + +To learn about the Fluvio Architecture and contribute to Fluvio project, please visit the [Developer](DEVELOPER.md) section. diff --git a/api/internal-api/Cargo.toml b/api/internal-api/Cargo.toml new file mode 100644 index 0000000000..32c9be91f2 --- /dev/null +++ b/api/internal-api/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "internal-api" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +kf-protocol = { path = "../../kf-protocol"} +metadata = { path = "../../metadata"} +types = { path= "../../types"} diff --git a/api/internal-api/rust-toolchain b/api/internal-api/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/api/internal-api/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/api/internal-api/src/lib.rs b/api/internal-api/src/lib.rs new file mode 100644 index 0000000000..b80b14992b --- /dev/null +++ b/api/internal-api/src/lib.rs @@ -0,0 +1,20 @@ +mod sc_api; +mod spu_api; +mod requests; + +pub mod messages; + +pub use self::sc_api::InternalScKey; +pub use self::sc_api::InternalScRequest; +pub use self::spu_api::InternalSpuApi; +pub use self::spu_api::InternalSpuRequest; + +pub use self::requests::update_spu::*; +pub use self::requests::update_replica::*; +pub use self::requests::register_spu::*; +pub use self::requests::update_lrs::*; +pub use self::requests::update_all::*; + +use kf_protocol::api::RequestMessage; + +pub type UpdateSpuRequestMessage = RequestMessage; diff --git a/api/internal-api/src/messages/mod.rs b/api/internal-api/src/messages/mod.rs new file mode 100644 index 0000000000..2156eea7ae --- /dev/null +++ b/api/internal-api/src/messages/mod.rs @@ -0,0 +1,14 @@ + +mod msg_type; +mod replica_msg; + +pub use self::msg_type::MsgType; +pub use self::msg_type::Message; + + +pub use self::replica_msg::Replica; +pub use self::replica_msg::ReplicaMsg; +pub use self::replica_msg::ReplicaMsgs; + +use metadata::spu::SpuSpec; +pub type SpuMsg = Message; \ No newline at end of file diff --git a/api/internal-api/src/messages/msg_type.rs b/api/internal-api/src/messages/msg_type.rs new file mode 100644 index 0000000000..caea8c1749 --- /dev/null +++ b/api/internal-api/src/messages/msg_type.rs @@ -0,0 +1,75 @@ +//! +//! # Message Type +//! +//! Message Type is used in Action-Centric messages to label the operation request. +//! +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt; + +use kf_protocol::derive::{Decode, Encode}; +use kf_protocol::{Decoder, Encoder}; + +#[derive(Decode, Encode, Debug, PartialEq, Clone)] +pub enum MsgType { + UPDATE, + DELETE, +} + +impl ::std::default::Default for MsgType { + fn default() -> Self { + MsgType::UPDATE + } +} + + + + +#[derive(Decode, Encode, Debug, PartialEq, Clone, Default)] +pub struct Message +where + C: Encoder + Decoder + Debug, +{ + pub header: MsgType, + pub content: C, +} + + +impl fmt::Display for Message + where C: Encoder + Decoder + Debug + Display, + { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{:#?} {}",self.header,self.content) + + } +} + + +impl Message +where + C: Encoder + Decoder+ Debug, +{ + pub fn new(typ: MsgType, content: C) -> Self { + Message { + header: typ, + content: content, + } + } + + pub fn delete(content: C) -> Self { + Self::new(MsgType::DELETE,content) + } + + pub fn update(content: C) -> Self { + Self::new(MsgType::UPDATE,content) + } +} + +impl From for Message +where + C: Encoder + Decoder + Debug + Default, +{ + fn from(content: C) -> Message { + Message::update(content) + } +} \ No newline at end of file diff --git a/api/internal-api/src/messages/replica_msg.rs b/api/internal-api/src/messages/replica_msg.rs new file mode 100644 index 0000000000..c847e4fd33 --- /dev/null +++ b/api/internal-api/src/messages/replica_msg.rs @@ -0,0 +1,110 @@ +//! +//! # Replica Messages +//! +//! Replicas are sent from SC to all live SPUs that participate in this replica group. +//! This message is sent for any changes in the live replica group. +//! +//! UPDATE/DEL operation is computed at sender by comparing KV notification with +//! internal metadata cache. Receiver translates UPDATE operations into an ADD/DEL +//! operation the comparing message with internal metadata. +//! +use std::fmt; + + +use kf_protocol::derive::{Decode, Encode}; + +use types::SpuId; + +use metadata::partition::ReplicaKey; + +use super::MsgType; +use super::Message; + +pub type ReplicaMsg = Message; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, PartialEq, Clone, Default)] +pub struct ReplicaMsgs { + pub messages: Vec, +} + + + +impl fmt::Display for ReplicaMsgs { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"[")?; + for replica in &self.messages { + write!(f,"{},",replica)?; + } + write!(f,"]") + + } +} + + +// ----------------------------------- +// ReplicaMsgs +// ----------------------------------- + +impl ReplicaMsgs { + pub fn new(replica_msgs: Vec) -> Self { + ReplicaMsgs { + messages: replica_msgs, + } + } + + pub fn push(&mut self, msg: ReplicaMsg) { + self.messages.push(msg); + } +} + + + +#[derive(Decode, Encode, Debug, PartialEq, Clone, Default)] +pub struct Replica { + pub id: ReplicaKey, + pub leader: SpuId, + pub replicas: Vec, +} + + +impl fmt::Display for Replica { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{} leader: {} replicas: [",self.id,self.leader)?; + for replica in &self.replicas { + write!(f,"{},",replica)?; + } + write!(f,"]") + } +} + + +// ----------------------------------- +// ReplicaMsg +// ----------------------------------- + +impl ReplicaMsg { + pub fn create_delete_msg(name: ReplicaKey, leader: SpuId) -> Self { + ReplicaMsg { + header: MsgType::DELETE, + content: Replica::new(name, leader, vec![]), + } + } +} + +// ----------------------------------- +// Replica +// ----------------------------------- + +impl Replica { + pub fn new(id: ReplicaKey, leader: SpuId, replicas: Vec) -> Self { + Replica { + id, + leader, + replicas, + } + } +} diff --git a/api/internal-api/src/requests/mod.rs b/api/internal-api/src/requests/mod.rs new file mode 100644 index 0000000000..71b76c50ea --- /dev/null +++ b/api/internal-api/src/requests/mod.rs @@ -0,0 +1,5 @@ +pub mod update_all; +pub mod update_spu; +pub mod update_replica; +pub mod register_spu; +pub mod update_lrs; diff --git a/api/internal-api/src/requests/register_spu.rs b/api/internal-api/src/requests/register_spu.rs new file mode 100644 index 0000000000..fd442c6999 --- /dev/null +++ b/api/internal-api/src/requests/register_spu.rs @@ -0,0 +1,90 @@ +//! +//! # Register SPU +//! +//! SPU sends Register message to the SC to ask permission to join the cluster. +//! SC matches spu-id, token-name and token-secret to ensure this SPU can be authorized. +//! +//! Authorization result: +//! * FlvErrorCode::None - for success +//! * FLVErrorCode::SpuNotAuthorized - for error +//! +//! In subsequent releases, Register SPU will carry additional credentials for mTLS +//! +use kf_protocol::api::Request; +use kf_protocol::api::FlvErrorCode; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + +use types::SpuId; + +use crate::InternalScKey; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Default)] +pub struct RegisterSpuRequest { + spu: SpuId +} + +impl Request for RegisterSpuRequest { + const API_KEY: u16 = InternalScKey::RegisterSpu as u16; + type Response = RegisterSpuResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct RegisterSpuResponse { + error_code: FlvErrorCode, + error_message: Option, +} + +// ----------------------------------- +// RegisterSpuRequest +// ----------------------------------- + +impl RegisterSpuRequest { + pub fn new(spu: SpuId) -> Self { + Self { spu } + } + + pub fn spu(&self) -> SpuId { + self.spu + } + + +} + +// ----------------------------------- +// RegisterSpuResponse +// ----------------------------------- + +impl RegisterSpuResponse { + + pub fn ok() -> Self { + RegisterSpuResponse { + error_code: FlvErrorCode::None, + error_message: None, + } + } + + pub fn failed_registeration() -> Self { + RegisterSpuResponse { + error_code: FlvErrorCode::SpuRegisterationFailed, + error_message: None + } + } + + + pub fn is_error(&self) -> bool { + self.error_code.is_error() + } + + pub fn error_message(&self) -> String { + if let Some(ref err_msg) = &self.error_message { + err_msg.clone() + } else { + self.error_code.to_sentence() + } + } +} diff --git a/api/internal-api/src/requests/update_all.rs b/api/internal-api/src/requests/update_all.rs new file mode 100644 index 0000000000..821c4f41ec --- /dev/null +++ b/api/internal-api/src/requests/update_all.rs @@ -0,0 +1,105 @@ +use std::collections::BTreeMap; + +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use metadata::partition::ReplicaKey; +use metadata::spu::SpuSpec; +use types::SpuId; + +use crate::InternalSpuApi; +use crate::messages::Replica; + + +/// All specs. Listener can use this to sync their own metadata store. +#[derive(Decode, Encode, Debug, Default)] +pub struct UpdateAllRequest { + pub spus: Vec, + pub replicas: Vec +} + +impl Request for UpdateAllRequest { + const API_KEY: u16 = InternalSpuApi::UpdateAll as u16; + type Response = UpdateAllResponse; +} + +impl UpdateAllRequest { + + pub fn new(spus: Vec, replicas: Vec) -> Self { + Self { + spus, + replicas + } + } + + /// Used when only SPU spec changes + pub fn new_with_spu(spus: Vec) -> Self { + Self::new(spus,vec![]) + } + + pub fn spus_ref(&self) -> &Vec { + &self.spus + } + + pub fn spus(self) -> Vec { + self.spus + } + + pub fn spus_to_map(&self) -> BTreeMap { + let mut res = BTreeMap::new(); + for spu in self.spus.iter() { + res.insert(spu.id.clone(), spu.clone()); + } + res + } + + pub fn replicas_to_map(&self) -> BTreeMap { + let mut res: BTreeMap = BTreeMap::new(); + for replica in self.replicas.iter() { + res.insert(replica.id.clone(), replica.clone()); + } + res + } + + pub fn push_spu(&mut self, msg: SpuSpec) { + self.spus.push(msg); + } + + pub fn add_spu(mut self, spu: S) -> Self + where + S: Into, + { + self.spus.push(spu.into()); + self + } + + + + pub fn mut_add_spu(&mut self, spu: S) + where + S: Into, + { + self.spus.push(spu.into()); + } + + + pub fn add_replica(mut self, replica: R) -> Self + where + R: Into, + { + self.replicas.push(replica.into()); + self + } + + pub fn add_replica_by_ref(&mut self, replica: R) + where + R: Into, + { + self.replicas.push(replica.into()); + } +} + + + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateAllResponse {} diff --git a/api/internal-api/src/requests/update_lrs.rs b/api/internal-api/src/requests/update_lrs.rs new file mode 100644 index 0000000000..ce38422737 --- /dev/null +++ b/api/internal-api/src/requests/update_lrs.rs @@ -0,0 +1,48 @@ +use std::fmt; + +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + + +use crate::InternalScKey; +use metadata::partition::ReplicaKey; +use metadata::partition::ReplicaStatus; + +/// Live Replica Status +/// First lrs is leader by convention but should not be relied upon +#[derive(Decode, Encode, Debug, Default, PartialEq, Clone)] +pub struct UpdateLrsRequest { + pub id: ReplicaKey, + pub leader: ReplicaStatus, + pub replicas: Vec +} + + +impl fmt::Display for UpdateLrsRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"LrsUpdate {}",self.id) + } +} + + + +impl UpdateLrsRequest { + pub fn new(id: ReplicaKey,leader: ReplicaStatus, replicas: Vec) -> Self { + Self { + id, + leader, + replicas + } + } +} + +impl Request for UpdateLrsRequest { + const API_KEY: u16 = InternalScKey::UpdateLrs as u16; + type Response = UpdateLrsResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateLrsResponse {} + + diff --git a/api/internal-api/src/requests/update_replica.rs b/api/internal-api/src/requests/update_replica.rs new file mode 100644 index 0000000000..60140f5a52 --- /dev/null +++ b/api/internal-api/src/requests/update_replica.rs @@ -0,0 +1,36 @@ +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::Request; + +use crate::messages::ReplicaMsgs; +use crate::InternalSpuApi; + +/// Changes in the Replica Specs +#[derive(Decode, Encode, Debug, Default)] +pub struct UpdateReplicaRequest { + replicas: ReplicaMsgs, +} + +impl Request for UpdateReplicaRequest{ + const API_KEY: u16 = InternalSpuApi::UpdateReplica as u16; + type Response = UpdateReplicaResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateReplicaResponse {} + +impl UpdateReplicaRequest { + pub fn encode_request(replica_msgs: ReplicaMsgs) -> Self { + UpdateReplicaRequest { + replicas: replica_msgs, + } + } + + pub fn decode_request(&self) -> &ReplicaMsgs { + &self.replicas + } + + pub fn replicas(self) -> ReplicaMsgs { + self.replicas + } +} diff --git a/api/internal-api/src/requests/update_replica_leaders.rs b/api/internal-api/src/requests/update_replica_leaders.rs new file mode 100644 index 0000000000..9f0c02586b --- /dev/null +++ b/api/internal-api/src/requests/update_replica_leaders.rs @@ -0,0 +1,39 @@ +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::api::Request; + +use crate::messages::LeaderMsgs; +use crate::InternalKfApiKey; + + +#[derive(Decode, Encode, Debug, Default)] +pub struct UpdateReplicaLeaderRequest { + leaders: LeaderMsgs, +} + +impl Request for UpdateReplicaLeaderRequest{ + const API_KEY: u16 = InternalKfApiKey::UpdateReplicaLeaders as u16; + type Response = UpdateReplicaLeaderResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateReplicaLeaderResponse {} + +impl UpdateReplicaLeaderRequest { + + pub fn leaders(self) -> LeaderMsgs { + self.leaders + } + + pub fn encode_request(leader_msgs: LeaderMsgs) -> Self { + UpdateReplicaLeaderRequest { + leaders: leader_msgs, + } + } + + pub fn decode_request(&self) -> &LeaderMsgs { + &self.leaders + } +} diff --git a/api/internal-api/src/requests/update_spu.rs b/api/internal-api/src/requests/update_spu.rs new file mode 100644 index 0000000000..4226f1d804 --- /dev/null +++ b/api/internal-api/src/requests/update_spu.rs @@ -0,0 +1,63 @@ +use std::collections::BTreeMap; + +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use types::SpuId; +use metadata::spu::SpuSpec; + +use crate::InternalSpuApi; +use crate::messages::SpuMsg; + + +/// Changes to Spu specs +#[derive(Decode, Encode, Debug, Default)] +pub struct UpdateSpuRequest { + pub spus: Vec, +} + +impl Request for UpdateSpuRequest { + const API_KEY: u16 = InternalSpuApi::UpdateSpu as u16; + type Response = UpdateSpuResponse; +} + + +impl UpdateSpuRequest { + + pub fn new(spus: Vec) -> Self { + Self { + spus + } + } + + pub fn spus_ref(&self) -> &Vec { + &self.spus + } + + pub fn spus(self) -> Vec { + self.spus + } + + pub fn spus_to_map(&self) -> BTreeMap { + let mut res = BTreeMap::new(); + for spu in self.spus.iter() { + res.insert(spu.content.id.clone(), spu.content.clone()); + } + res + } + + + pub fn add(mut self, spu: S) -> Self + where + S: Into, + { + self.spus.push(spu.into()); + self + } + +} + + + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateSpuResponse {} \ No newline at end of file diff --git a/api/internal-api/src/sc_api.rs b/api/internal-api/src/sc_api.rs new file mode 100644 index 0000000000..42a0133828 --- /dev/null +++ b/api/internal-api/src/sc_api.rs @@ -0,0 +1,67 @@ +use std::io::Error as IoError; +use std::convert::TryInto; + +use kf_protocol::api::api_decode; +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::RequestMessage; +use kf_protocol::bytes::Buf; +use kf_protocol::derive::Encode; + +use kf_protocol::derive::Decode; + +use super::RegisterSpuRequest; +use super::UpdateLrsRequest; + + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum InternalScKey { + RegisterSpu = 2000, + UpdateLrs = 2001 +} + + +impl Default for InternalScKey { + fn default() -> InternalScKey { + InternalScKey::RegisterSpu + } +} + + + + +/// Request made to Spu from Sc +#[derive(Debug,Encode)] +pub enum InternalScRequest { + RegisterSpuRequest(RequestMessage), + UpdateLrsRequest(RequestMessage), +} + +// Added to satisfy Encode/Decode traits +impl Default for InternalScRequest { + fn default() -> InternalScRequest { + InternalScRequest::RegisterSpuRequest(RequestMessage::default()) + } +} + + + +impl KfRequestMessage for InternalScRequest { + type ApiKey = InternalScKey; + + fn decode_with_header( + src: &mut T, + header: RequestHeader, + ) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + match header.api_key().try_into()? { + InternalScKey::RegisterSpu => api_decode!(InternalScRequest, RegisterSpuRequest, src, header), + InternalScKey::UpdateLrs => api_decode!(InternalScRequest,UpdateLrsRequest, src, header) + } + } +} diff --git a/api/internal-api/src/spu_api.rs b/api/internal-api/src/spu_api.rs new file mode 100644 index 0000000000..a54af5ccf8 --- /dev/null +++ b/api/internal-api/src/spu_api.rs @@ -0,0 +1,74 @@ +use std::io::Error as IoError; +use std::convert::TryInto; + +use kf_protocol::api::api_decode; +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::RequestMessage; +use kf_protocol::bytes::Buf; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + +use super::UpdateSpuRequest; +use super::UpdateReplicaRequest; +use super::UpdateAllRequest; + + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum InternalSpuApi { + UpdateAll = 1000, + UpdateSpu = 1001, + UpdateReplica = 1003, +} + + +impl Default for InternalSpuApi { + fn default() -> InternalSpuApi { + InternalSpuApi::UpdateSpu + } +} + + + +#[derive(Debug,Encode)] +pub enum InternalSpuRequest { + UpdateAllRequest(RequestMessage), + UpdateSpuRequest(RequestMessage), + UpdateReplicaRequest(RequestMessage), +} + +// Added to satisfy Encode/Decode traits +impl Default for InternalSpuRequest { + fn default() -> InternalSpuRequest { + InternalSpuRequest::UpdateSpuRequest(RequestMessage::default()) + } +} + +impl InternalSpuRequest { + pub fn new_update_spu_req(msg: UpdateSpuRequest) -> InternalSpuRequest { + InternalSpuRequest::UpdateSpuRequest(RequestMessage::new_request(msg)) + } + +} + + +impl KfRequestMessage for InternalSpuRequest { + type ApiKey = InternalSpuApi; + + fn decode_with_header( + src: &mut T, + header: RequestHeader, + ) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + match header.api_key().try_into()? { + InternalSpuApi::UpdateAll => api_decode!(InternalSpuRequest, UpdateAllRequest, src, header), + InternalSpuApi::UpdateSpu => api_decode!(InternalSpuRequest, UpdateSpuRequest, src, header), + InternalSpuApi::UpdateReplica => api_decode!(InternalSpuRequest,UpdateReplicaRequest,src,header) + } + } +} diff --git a/api/sc-api/Cargo.toml b/api/sc-api/Cargo.toml new file mode 100644 index 0000000000..f2fb63c356 --- /dev/null +++ b/api/sc-api/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sc-api" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +kf-protocol = { path = "../../kf-protocol"} +metadata = { path = "../../metadata"} +utils = { path= "../../utils"} +types = { path= "../../types"} +serde = { version ="1.0.82", features = ['derive'] } +k8-metadata = { path = "../../k8-metadata"} \ No newline at end of file diff --git a/api/sc-api/src/api_key.rs b/api/sc-api/src/api_key.rs new file mode 100644 index 0000000000..ba0acb5081 --- /dev/null +++ b/api/sc-api/src/api_key.rs @@ -0,0 +1,40 @@ +//! +//! # SC Api Keys +//! +//! Stores Api Keys supported by the SC. +//! + +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + +#[derive(Encode, Decode, PartialEq, Debug, Clone, Copy)] +#[repr(u16)] +pub enum ScApiKey { + // Mixed + ApiVersion = 18, + + // Kafka + KfMetadata = 3, + + // Topics + FlvCreateTopics = 2001, + FlvDeleteTopics = 2002, + FlvFetchTopics = 2003, + FlvTopicComposition = 2004, + + // Custom SPUs + FlvCreateCustomSpus = 2005, + FlvDeleteCustomSpus = 2006, + FlvFetchSpus = 2007, + + // SPU Groups + FlvCreateSpuGroups = 2008, + FlvDeleteSpuGroups = 2009, + FlvFetchSpuGroups = 2010, +} + +impl Default for ScApiKey { + fn default() -> ScApiKey { + ScApiKey::ApiVersion + } +} diff --git a/api/sc-api/src/api_versions.rs b/api/sc-api/src/api_versions.rs new file mode 100644 index 0000000000..c6e8639f94 --- /dev/null +++ b/api/sc-api/src/api_versions.rs @@ -0,0 +1,47 @@ +//! +//! # API Versions +//! +//! Public API to retrive a list of APIs and their version numbers from the SC. +//! SC supports Kafka as well as Fluvio specific APIs. +//! + +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; +use kf_protocol::api::FlvErrorCode; + +use crate::ScApiKey; + +pub type ApiVersions = Vec; + +// ----------------------------------- +// ApiVersionsRequest +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionsRequest {} + +// ----------------------------------- +// ApiVersionsResponse +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionsResponse { + pub error_code: FlvErrorCode, + pub api_keys: Vec, +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionKey { + pub api_key: i16, + pub min_version: i16, + pub max_version: i16, +} + +// ----------------------------------- +// Implementation - ApiVersionsRequest +// ----------------------------------- + +impl Request for ApiVersionsRequest { + const API_KEY: u16 = ScApiKey::ApiVersion as u16; + type Response = ApiVersionsResponse; +} diff --git a/api/sc-api/src/common/flv_auth_tokens.rs b/api/sc-api/src/common/flv_auth_tokens.rs new file mode 100644 index 0000000000..82bceff3cf --- /dev/null +++ b/api/sc-api/src/common/flv_auth_tokens.rs @@ -0,0 +1,29 @@ +//! +//! # AuthTokens +//! +//! Fields used by multiple Auth Token APIs +//! + +use kf_protocol::derive::{Encode, Decode}; + +// ----------------------------------- +// FlvTokenType +// ----------------------------------- + +/// Fluvio SPU type: Custom, Managed, or Any +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum FlvTokenType { + Any, + Custom, + Managed, +} + +// ----------------------------------- +// Defaults +// ----------------------------------- + +impl ::std::default::Default for FlvTokenType { + fn default() -> Self { + FlvTokenType::Any + } +} diff --git a/api/sc-api/src/common/flv_response_message.rs b/api/sc-api/src/common/flv_response_message.rs new file mode 100644 index 0000000000..31b784bec2 --- /dev/null +++ b/api/sc-api/src/common/flv_response_message.rs @@ -0,0 +1,37 @@ +//! +//! # Response Message +//! +//! Response sent to client. Sends entity name, error code and error message. +//! +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::FlvErrorCode; + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvResponseMessage { + pub name: String, + pub error_code: FlvErrorCode, + pub error_message: Option, +} + +impl FlvResponseMessage { + pub fn new_ok(name: String) -> Self { + FlvResponseMessage { + name: name, + error_code: FlvErrorCode::None, + error_message: None, + } + } + + pub fn new(name: String, code: FlvErrorCode, msg: Option) -> Self { + FlvResponseMessage { + name: name, + error_code: code, + error_message: msg, + } + } + + pub fn is_error(&self) -> bool { + self.error_code != FlvErrorCode::None + } +} diff --git a/api/sc-api/src/common/flv_spus.rs b/api/sc-api/src/common/flv_spus.rs new file mode 100644 index 0000000000..125aed32bf --- /dev/null +++ b/api/sc-api/src/common/flv_spus.rs @@ -0,0 +1,437 @@ +//! +//! # Custom Spus +//! +//! Fields used by multiple Custom Spu APIs +//! +use std::io::{Error, ErrorKind}; + +use kf_protocol::Version; +use kf_protocol::{Decoder, Encoder}; +use kf_protocol::bytes::{Buf, BufMut}; +use kf_protocol::derive::{Encode, Decode}; + +// ----------------------------------- +// Data Structures - FlvEndPointMetadata +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvEndPointMetadata { + pub port: u16, + pub host: String, +} + +// ----------------------------------- +// Data Structures - FlvRequestSpuType +// ----------------------------------- + +#[derive(Debug)] +pub enum FlvRequestSpuType { + All, + Custom, +} + +// ----------------------------------- +// Implementation - FlvRequestSpuType +// ----------------------------------- +impl Default for FlvRequestSpuType { + fn default() -> FlvRequestSpuType { + FlvRequestSpuType::All + } +} + +impl Encoder for FlvRequestSpuType { + // compute size + fn write_size(&self, version: Version) -> usize { + (0 as u8).write_size(version) + } + + // encode match + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for spu request type {}", + self.write_size(version) + ), + )); + } + + match self { + FlvRequestSpuType::All => { + let typ: u8 = 0; + typ.encode(dest, version)?; + } + FlvRequestSpuType::Custom => { + let typ: u8 = 1; + typ.encode(dest, version)?; + } + } + + Ok(()) + } +} + +impl Decoder for FlvRequestSpuType { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut value: u8 = 0; + value.decode(src, version)?; + match value { + 0 => *self = FlvRequestSpuType::All, + 1 => *self = FlvRequestSpuType::Custom, + _ => { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for spu request type: {}", value), + )) + } + } + + Ok(()) + } +} + +// ----------------------------------- +// Data Structures - FlvSpuType +// ----------------------------------- + +#[derive(Debug)] +pub enum FlvSpuType { + Custom, + Managed, +} + +// ----------------------------------- +// Implementation - FlvSpuType +// ----------------------------------- +impl Default for FlvSpuType { + fn default() -> FlvSpuType { + FlvSpuType::Custom + } +} + +impl Encoder for FlvSpuType { + // compute size + fn write_size(&self, version: Version) -> usize { + (0 as u8).write_size(version) + } + + // encode match + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for spu type {}", + self.write_size(version) + ), + )); + } + + match self { + FlvSpuType::Custom => { + let typ: u8 = 0; + typ.encode(dest, version)?; + } + FlvSpuType::Managed => { + let typ: u8 = 1; + typ.encode(dest, version)?; + } + } + + Ok(()) + } +} + +impl Decoder for FlvSpuType { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut value: u8 = 0; + value.decode(src, version)?; + match value { + 0 => *self = FlvSpuType::Custom, + 1 => *self = FlvSpuType::Managed, + _ => { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for spu type: {}", value), + )) + } + } + + Ok(()) + } +} + +// ----------------------------------- +// Data Structures - FlvSpuResolution +// ----------------------------------- + +#[derive(Debug)] +pub enum FlvSpuResolution { + Online, + Offline, + Init, +} + +// ----------------------------------- +// Implementation - FlvSpuResolution +// ----------------------------------- +impl Default for FlvSpuResolution { + fn default() -> FlvSpuResolution { + FlvSpuResolution::Init + } +} + +impl Encoder for FlvSpuResolution { + // compute size + fn write_size(&self, version: Version) -> usize { + (0 as u8).write_size(version) + } + + // encode match + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for spu resolution {}", + self.write_size(version) + ), + )); + } + + match self { + FlvSpuResolution::Online => { + let typ: u8 = 0; + typ.encode(dest, version)?; + } + FlvSpuResolution::Offline => { + let typ: u8 = 1; + typ.encode(dest, version)?; + } + FlvSpuResolution::Init => { + let typ: u8 = 2; + typ.encode(dest, version)?; + } + } + + Ok(()) + } +} + +impl Decoder for FlvSpuResolution { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut value: u8 = 0; + value.decode(src, version)?; + match value { + 0 => *self = FlvSpuResolution::Online, + 1 => *self = FlvSpuResolution::Offline, + 2 => *self = FlvSpuResolution::Init, + _ => { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for spu resolution: {}", value), + )) + } + } + + Ok(()) + } +} + +// ----------------------------------- +// Data Structures - FlvSpuGroupResolution +// ----------------------------------- + +#[derive(Debug)] +pub enum FlvSpuGroupResolution { + Init, + Invalid, + Reserved, +} + +// ----------------------------------- +// Implementation - FlvSpuGroupResolution +// ----------------------------------- +impl Default for FlvSpuGroupResolution { + fn default() -> FlvSpuGroupResolution { + FlvSpuGroupResolution::Init + } +} + +impl Encoder for FlvSpuGroupResolution { + // compute size + fn write_size(&self, version: Version) -> usize { + (0 as u8).write_size(version) + } + + // encode match + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for group spu resolution {}", + self.write_size(version) + ), + )); + } + + match self { + FlvSpuGroupResolution::Init => { + let typ: u8 = 0; + typ.encode(dest, version)?; + } + FlvSpuGroupResolution::Invalid => { + let typ: u8 = 1; + typ.encode(dest, version)?; + } + FlvSpuGroupResolution::Reserved => { + let typ: u8 = 2; + typ.encode(dest, version)?; + } + } + + Ok(()) + } +} + +impl Decoder for FlvSpuGroupResolution { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut value: u8 = 0; + value.decode(src, version)?; + match value { + 0 => *self = FlvSpuGroupResolution::Init, + 1 => *self = FlvSpuGroupResolution::Invalid, + 2 => *self = FlvSpuGroupResolution::Reserved, + _ => { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for group spu resolution: {}", value), + )) + } + } + + Ok(()) + } +} + +// ----------------------------------- +// Data Structures - CustomSpu +// ----------------------------------- + +#[derive(Debug)] +pub enum FlvCustomSpu { + Name(String), + Id(i32), +} + +// ----------------------------------- +// Implementation - CustomSpu +// ----------------------------------- +impl Default for FlvCustomSpu { + fn default() -> FlvCustomSpu { + FlvCustomSpu::Name("".to_string()) + } +} + +impl Encoder for FlvCustomSpu { + // compute size + fn write_size(&self, version: Version) -> usize { + let type_size = (0 as u8).write_size(version); + match self { + FlvCustomSpu::Name(name) => type_size + name.write_size(version), + FlvCustomSpu::Id(id) => type_size + id.write_size(version), + } + } + + // encode match + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for custom spu len of {}", + self.write_size(version) + ), + )); + } + + match self { + FlvCustomSpu::Name(name) => { + let typ: u8 = 0; + typ.encode(dest, version)?; + name.encode(dest, version)?; + } + FlvCustomSpu::Id(id) => { + let typ: u8 = 1; + typ.encode(dest, version)?; + id.encode(dest, version)?; + } + } + + Ok(()) + } +} + +impl Decoder for FlvCustomSpu { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut value: u8 = 0; + value.decode(src, version)?; + match value { + 0 => { + let mut name: String = String::default(); + name.decode(src, version)?; + *self = FlvCustomSpu::Name(name) + } + 1 => { + let mut id: i32 = 0; + id.decode(src, version)?; + *self = FlvCustomSpu::Id(id) + } + _ => { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for Custom Spu: {}", value), + )) + } + } + + Ok(()) + } +} diff --git a/api/sc-api/src/common/mod.rs b/api/sc-api/src/common/mod.rs new file mode 100644 index 0000000000..b768fd491f --- /dev/null +++ b/api/sc-api/src/common/mod.rs @@ -0,0 +1,3 @@ +pub mod flv_auth_tokens; +pub mod flv_spus; +pub mod flv_response_message; diff --git a/api/sc-api/src/flv_create_custom_spus.rs b/api/sc-api/src/flv_create_custom_spus.rs new file mode 100644 index 0000000000..2a1e21986d --- /dev/null +++ b/api/sc-api/src/flv_create_custom_spus.rs @@ -0,0 +1,61 @@ +//! +//! # Create Custom SPU +//! +//! Public API to request the SC to create one or more custom spus +//! +//! +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use crate::FlvResponseMessage; +use crate::ScApiKey; + +use super::spu::FlvEndPointMetadata; + +// ----------------------------------- +// FlvCreateCustomSpusRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateCustomSpusRequest { + /// A list of one or more custom spus to be created. + pub custom_spus: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateCustomSpuRequest { + /// The id of the custom spu (globally unique id) + pub id: i32, + + /// The name of the custom spu + pub name: String, + + /// Server host and port number of the public server + pub public_server: FlvEndPointMetadata, + + /// Server host and port number of the private server + pub private_server: FlvEndPointMetadata, + + /// Rack name (optional) + pub rack: Option, +} + +// ----------------------------------- +// FlvCreateCustomSpusResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateCustomSpusResponse { + /// The custom spu creation result messages. + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvCreateCustomSpusRequest +// ----------------------------------- + +impl Request for FlvCreateCustomSpusRequest { + const API_KEY: u16 = ScApiKey::FlvCreateCustomSpus as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvCreateCustomSpusResponse; +} diff --git a/api/sc-api/src/flv_create_spu_groups.rs b/api/sc-api/src/flv_create_spu_groups.rs new file mode 100644 index 0000000000..055f66e3c1 --- /dev/null +++ b/api/sc-api/src/flv_create_spu_groups.rs @@ -0,0 +1,94 @@ +//! +//! # Create SPU Groups +//! +//! Public API to request the SC to create managed spu groups +//! +//! +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use crate::FlvResponseMessage; +use crate::ScApiKey; + +// ----------------------------------- +// FlvCreateSpuGroupsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateSpuGroupsRequest { + /// A list of one or more spu groups to be created. + pub spu_groups: Vec, +} + +// quick way to convert a single group into groups requests +impl From for FlvCreateSpuGroupsRequest { + fn from(group: FlvCreateSpuGroupRequest) -> Self { + let mut groups = Self::default(); + groups.spu_groups.push(group); + groups + } +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateSpuGroupRequest { + /// The name of the managed spu group + pub name: String, + + /// The number of replicas for the spu group + pub replicas: u16, + + /// The base spu id that the spu group uses to increment the spu ids + /// Note: Spu id is a globally unique resource and it cannot be shared + pub min_id: Option, + + /// Configuration elements to be applied to each SPUs in the group + pub config: FlvGroupConfig, + + /// The rack to be used for all SPUs in the group. Racks are used by + /// replication assignment algorithm + pub rack: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvGroupConfig { + pub storage: Option, + pub replication: Option, + pub env: Vec +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvStorageConfig { + pub log_dir: Option, + pub size: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvReplicationConfig { + pub in_sync_replica_min: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvEnvVar { + pub name: String, + pub value: String, +} + +// ----------------------------------- +// FlvCreateSpuGroupsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateSpuGroupsResponse { + /// The spu group creation result messages. + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvCreateSpuGroupsRequest +// ----------------------------------- + +impl Request for FlvCreateSpuGroupsRequest { + const API_KEY: u16 = ScApiKey::FlvCreateSpuGroups as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvCreateSpuGroupsResponse; +} diff --git a/api/sc-api/src/flv_create_topics.rs b/api/sc-api/src/flv_create_topics.rs new file mode 100644 index 0000000000..3313f037d1 --- /dev/null +++ b/api/sc-api/src/flv_create_topics.rs @@ -0,0 +1,57 @@ +//! +//! # Create Topics +//! +//! Public API to request the SC to create one or more topics. +//! +//! + +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use metadata::topic::TopicSpec as TopicConfigMetadata; + +use crate::FlvResponseMessage; +use crate::ScApiKey; + +// ----------------------------------- +// FlvCreateTopicsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateTopicsRequest { + /// A list of one or more topics to be created. + pub topics: Vec, + + /// Validate-only flag to prevent topic generation. Method is particularly useful + /// to validate custom replicas. + pub validate_only: bool, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateTopicRequest { + /// The name of the topic. + pub name: String, + + /// The Topic Metadata + pub topic: TopicConfigMetadata, +} + +// ----------------------------------- +// FlvCreateTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvCreateTopicsResponse { + /// The topic creation result messages. + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvCreateTopicsRequest +// ----------------------------------- + +impl Request for FlvCreateTopicsRequest { + const API_KEY: u16 = ScApiKey::FlvCreateTopics as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvCreateTopicsResponse; +} diff --git a/api/sc-api/src/flv_delete_custom_spus.rs b/api/sc-api/src/flv_delete_custom_spus.rs new file mode 100644 index 0000000000..47874894a9 --- /dev/null +++ b/api/sc-api/src/flv_delete_custom_spus.rs @@ -0,0 +1,42 @@ +//! +//! # Delete Custon SPUs +//! +//! Public API to request the SC to delete one or more custom spus. +//! +//! +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use crate::FlvResponseMessage; +use crate::ScApiKey; +use crate::common::flv_spus::FlvCustomSpu; + +// ----------------------------------- +// FlvDeleteCustomSpusRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteCustomSpusRequest { + /// Each spu name or id to be deleted. + pub custom_spus: Vec, +} + +// ----------------------------------- +// FlvDeleteTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteCustomSpusResponse { + /// A response message for each delete request + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvDeleteTopicsRequest +// ----------------------------------- + +impl Request for FlvDeleteCustomSpusRequest { + const API_KEY: u16 = ScApiKey::FlvDeleteCustomSpus as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvDeleteCustomSpusResponse; +} diff --git a/api/sc-api/src/flv_delete_spu_groups.rs b/api/sc-api/src/flv_delete_spu_groups.rs new file mode 100644 index 0000000000..55a1bf5478 --- /dev/null +++ b/api/sc-api/src/flv_delete_spu_groups.rs @@ -0,0 +1,42 @@ +//! +//! # Delete Spu Groups +//! +//! Public API to request the SC to delete one or more managed spu groups. +//! +//! + +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use crate::FlvResponseMessage; +use crate::ScApiKey; + +// ----------------------------------- +// FlvDeleteSpuGroupsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteSpuGroupsRequest { + /// Each spu group in the delete request. + pub spu_groups: Vec, +} + +// ----------------------------------- +// FlvDeleteSpuGroupsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteSpuGroupsResponse { + /// A response message for each delete spu group request + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvDeleteSpuGroupsRequest +// ----------------------------------- + +impl Request for FlvDeleteSpuGroupsRequest { + const API_KEY: u16 = ScApiKey::FlvDeleteSpuGroups as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvDeleteSpuGroupsResponse; +} diff --git a/api/sc-api/src/flv_delete_topics.rs b/api/sc-api/src/flv_delete_topics.rs new file mode 100644 index 0000000000..75eacb436a --- /dev/null +++ b/api/sc-api/src/flv_delete_topics.rs @@ -0,0 +1,42 @@ +//! +//! # Delete Topics +//! +//! Public API to request the SC to delete one or more topics. +//! +//! + +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; + +use crate::FlvResponseMessage; +use crate::ScApiKey; + +// ----------------------------------- +// FlvDeleteTopicsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteTopicsRequest { + /// Each topic in the request. + pub topics: Vec, +} + +// ----------------------------------- +// FlvDeleteTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvDeleteTopicsResponse { + /// A response message for each topic request + pub results: Vec, +} + +// ----------------------------------- +// Implementation - FlvDeleteTopicsRequest +// ----------------------------------- + +impl Request for FlvDeleteTopicsRequest { + const API_KEY: u16 = ScApiKey::FlvDeleteTopics as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvDeleteTopicsResponse; +} diff --git a/api/sc-api/src/flv_fetch_spu_groups.rs b/api/sc-api/src/flv_fetch_spu_groups.rs new file mode 100644 index 0000000000..6b634a6e94 --- /dev/null +++ b/api/sc-api/src/flv_fetch_spu_groups.rs @@ -0,0 +1,138 @@ +//! +//! # Fetch SPU Groups +//! +//! Public API to fetch SPU Group metadata from the SC +//! +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::spg::SpuGroupStatus; +use k8_metadata::spg::SpuGroupStatusResolution; +use k8_metadata::spg::SpuTemplate; +use k8_metadata::core::metadata::TemplateSpec; +use k8_metadata::core::metadata::K8Obj; +use k8_metadata::spg::StorageConfig; + +use crate::ScApiKey; +use crate::FlvResponseMessage; + +use super::spu::FlvSpuGroupResolution; + +/// Fetch SPU Groups by type +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvFetchSpuGroupsRequest {} + + +impl Request for FlvFetchSpuGroupsRequest { + const API_KEY: u16 = ScApiKey::FlvFetchSpuGroups as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvFetchSpuGroupsResponse; +} + + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchSpuGroupsResponse { + pub error: FlvResponseMessage, + /// Each spu in the response. + pub spu_groups: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchSpuGroup { + + pub name: String, + + /// The number of replicas for the spu group + pub replicas: u16, + + // The base spu id for the spu group + pub min_id: i32, + + /// Rack label, optional parameter used by replica assignment algorithm. + pub rack: Option, + + /// storage size + pub size: String, + + /// Status resolution + pub resolution: FlvSpuGroupResolution, + + /// Reason for Status resolution (if applies) + pub reason: Option, +} + +impl Into<(String,SpuGroupSpec,SpuGroupStatus)> for FlvFetchSpuGroup { + + fn into(self) -> (String,SpuGroupSpec,SpuGroupStatus) { + + ( + self.name, + SpuGroupSpec { + replicas: self.replicas, + min_id: Some(self.min_id), + template: TemplateSpec { + spec: SpuTemplate { + rack: self.rack, + storage: Some(StorageConfig { + size: Some(self.size), + ..Default::default() + }), + ..Default::default() + }, + ..Default::default() + } + + }, + SpuGroupStatus { + resolution: self.resolution.into(), + ..Default::default() + } + ) + } +} + + +impl From> for FlvFetchSpuGroup { + + fn from(item: K8Obj) -> Self { + + let (name,spec,status) = (item.metadata.name,item.spec,item.status.unwrap_or_default()); + let min_id = spec.min_id(); + let (replicas,template) = (spec.replicas,spec.template.spec); + let (rack,storage) = (template.rack,template.storage.unwrap_or_default()); + Self { + name, + replicas, + min_id, + rack, + size: storage.size(), + resolution: status.resolution.into(), + reason: None, + } + } + +} + +impl From for FlvSpuGroupResolution { + + fn from(res: SpuGroupStatusResolution) -> Self { + + match res { + SpuGroupStatusResolution::Init => FlvSpuGroupResolution::Init, + SpuGroupStatusResolution::Invalid => FlvSpuGroupResolution::Invalid, + SpuGroupStatusResolution::Reserved => FlvSpuGroupResolution::Reserved, + } + } +} + + +impl Into for FlvSpuGroupResolution { + fn into(self) -> SpuGroupStatusResolution { + match self { + Self::Init => SpuGroupStatusResolution::Init, + Self::Invalid => SpuGroupStatusResolution::Invalid, + Self::Reserved => SpuGroupStatusResolution::Reserved, + } + } +} diff --git a/api/sc-api/src/flv_fetch_spus.rs b/api/sc-api/src/flv_fetch_spus.rs new file mode 100644 index 0000000000..78d34c1445 --- /dev/null +++ b/api/sc-api/src/flv_fetch_spus.rs @@ -0,0 +1,80 @@ +//! +//! # Fetch SPUs +//! +//! Public API to fetch SPU metadata from the SC +//! +use kf_protocol::api::Request; +use kf_protocol::api::FlvErrorCode; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + +use crate::ScApiKey; +use crate::common::flv_spus::FlvRequestSpuType; +use crate::common::flv_spus::FlvSpuType; +use crate::common::flv_spus::FlvSpuResolution; + +use super::spu::FlvEndPointMetadata; + +// ----------------------------------- +// FlvFetchSpusRequest +// ----------------------------------- + +/// Fetch SPUs by type +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvFetchSpusRequest { + /// SPU type All or Custom + pub req_spu_type: FlvRequestSpuType, +} + +// ----------------------------------- +// FlvFetchSpusResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchSpusResponse { + /// Each spu in the response. + pub spus: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchSpuResponse { + /// The error code, None for no errors + pub error_code: FlvErrorCode, + + /// The spu name + pub name: String, + + /// Spu parameters, None if error + pub spu: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchSpu { + /// Spu globally unique id. + pub id: i32, + + /// Spu type: true for managed, false for custom. + pub spu_type: FlvSpuType, + + /// Public endpoint server interface. + pub public_ep: FlvEndPointMetadata, + + /// Private endpoint server interface. + pub private_ep: FlvEndPointMetadata, + + /// Rack label, optional parameter used by replica assignment algorithm. + pub rack: Option, + + /// Status resolution + pub resolution: FlvSpuResolution, +} + +// ----------------------------------- +// Implementation - FlvFetchSpusRequest +// ----------------------------------- + +impl Request for FlvFetchSpusRequest { + const API_KEY: u16 = ScApiKey::FlvFetchSpus as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvFetchSpusResponse; +} diff --git a/api/sc-api/src/flv_fetch_topics.rs b/api/sc-api/src/flv_fetch_topics.rs new file mode 100644 index 0000000000..56f23d1d18 --- /dev/null +++ b/api/sc-api/src/flv_fetch_topics.rs @@ -0,0 +1,123 @@ +//! +//! # Fetch Topics +//! +//! Public API to retrieve Topics from the SC. +//! +use kf_protocol::api::Request; +use kf_protocol::api::FlvErrorCode; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + +use metadata::topic::{TopicSpec, TopicStatus}; + +use crate::ScApiKey; + +// ----------------------------------- +// FlvFetchTopicsRequest +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvFetchTopicsRequest { + /// A list of one or more topics to be retireved. + /// None retrieves all topics. + pub names: Option>, +} + +// ----------------------------------- +// FlvFetchTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchTopicsResponse { + /// The list of topics that have been retrieved. + pub topics: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchTopicResponse { + /// The error code, None for no errors + pub error_code: FlvErrorCode, + + /// The name of the topic. + pub name: String, + + /// Topic parameters, None if error + pub topic: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchTopic { + /// Topic spec + pub spec: TopicSpec, + + /// Topic status + pub status: TopicStatus, + + /// Replica assignment for each partition + pub partition_replicas: Option>, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvPartitionReplica { + /// Partition id + pub id: i32, + + /// Replica leader + pub leader: i32, + + /// Replica assignment + pub replicas: Vec, + + /// Only live replicas in replica assignment + pub live_replicas: Vec, +} + +// ----------------------------------- +// Implementation - FlvFetchTopicsRequest +// ----------------------------------- + +impl Request for FlvFetchTopicsRequest { + const API_KEY: u16 = ScApiKey::FlvFetchTopics as u16; + type Response = FlvFetchTopicsResponse; +} + +// ----------------------------------- +// Implementation - FlvFetchTopicResponse +// ----------------------------------- +impl FlvFetchTopicResponse { + /// Constructor for topics found + pub fn new( + name: String, + spec: TopicSpec, + status: TopicStatus, + partition_replicas: Option>, + ) -> Self { + FlvFetchTopicResponse { + name: name, + error_code: FlvErrorCode::None, + topic: Some(FlvFetchTopic { + spec, + status, + partition_replicas, + }), + } + } + + /// Constructor for topics that are not found + pub fn new_not_found(name: String) -> Self { + FlvFetchTopicResponse { + name: name, + error_code: FlvErrorCode::TopicNotFound, + topic: None, + } + } + + /// Update topic partitions. + /// Requirements: + /// * Must be called with valid topic, otherwise, update will fail silently + pub fn update_partitions(&mut self, partition_replicas: Option>) { + if self.topic.is_some() { + self.topic.as_mut().unwrap().partition_replicas = partition_replicas; + } + } +} diff --git a/api/sc-api/src/flv_topic_composition.rs b/api/sc-api/src/flv_topic_composition.rs new file mode 100644 index 0000000000..071994cf89 --- /dev/null +++ b/api/sc-api/src/flv_topic_composition.rs @@ -0,0 +1,89 @@ +//! +//! # Topic Composition +//! +//! API that allows CLI to fetch topic composition: Live Replicas and SPUs +//! +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::FlvErrorCode; + +use crate::ScApiKey; + +// ----------------------------------- +// FlvTopicCompositionRequest +// ----------------------------------- + +/// Use id to fetch one entry, None to fetch all +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvTopicCompositionRequest { + pub topic_names: Vec, +} + +// ----------------------------------- +// FlvTopicCompositionResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvTopicCompositionResponse { + /// The topics requested + pub topics: Vec, + + /// The SPUs associated with replica assignment of the topics + pub spus: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FetchTopicReponse { + /// The error code, None for no errors + pub error_code: FlvErrorCode, + + /// The topic name + pub name: String, + + /// The partitions associated with the topic + pub partitions: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FetchPartitionResponse { + /// The error code, None for no errors + pub error_code: FlvErrorCode, + + /// The partition index. + pub partition_idx: i32, + + /// The id of the leader SPU. + pub leader_id: i32, + + /// The set of all spus that host this partition. + pub replicas: Vec, + + /// The set of all live replica spus that host this partition. + pub live_replicas: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FetchSpuReponse { + /// The error code, None for no errors + pub error_code: FlvErrorCode, + + /// The spu ID. + pub spu_id: i32, + + /// The spu public hostname. + pub host: String, + + /// The spu public port. + pub port: u16, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl Request for FlvTopicCompositionRequest { + const API_KEY: u16 = ScApiKey::FlvTopicComposition as u16; + const DEFAULT_API_VERSION: i16 = 1; + type Response = FlvTopicCompositionResponse; +} diff --git a/api/sc-api/src/lib.rs b/api/sc-api/src/lib.rs new file mode 100644 index 0000000000..906d88541a --- /dev/null +++ b/api/sc-api/src/lib.rs @@ -0,0 +1,54 @@ +mod api_key; +mod flv_create_topics; +mod flv_delete_topics; +mod flv_create_custom_spus; +mod flv_delete_custom_spus; +mod flv_fetch_spus; +mod flv_create_spu_groups; +mod flv_delete_spu_groups; +mod flv_fetch_spu_groups; +mod flv_fetch_topics; +mod flv_topic_composition; +mod api_versions; +mod public_api; +mod common; + +pub use api_key::ScApiKey; +pub use public_api::PublicRequest; + +pub use crate::common::flv_response_message::FlvResponseMessage; + +pub mod apis { + pub use crate::api_key::*; +} + +pub mod versions { + pub use crate::api_versions::*; +} + +pub mod errors { + pub use kf_protocol::api::FlvErrorCode; +} + +pub mod spu { + pub use crate::flv_create_custom_spus::*; + pub use crate::flv_delete_custom_spus::*; + pub use crate::flv_fetch_spus::*; + + pub use crate::flv_create_spu_groups::*; + pub use crate::flv_delete_spu_groups::*; + pub use crate::flv_fetch_spu_groups::*; + + pub use crate::common::flv_spus::*; +} + +pub mod topic { + pub use crate::flv_create_topics::*; + pub use crate::flv_delete_topics::*; + pub use crate::flv_fetch_topics::*; + pub use crate::flv_topic_composition::*; + + pub use metadata::topic::TopicSpec as FlvTopicSpecMetadata; + pub use metadata::topic::PartitionMap as FlvTopicPartitionMap; + pub use metadata::topic::TopicResolution as FlvTopicResolution; +} diff --git a/api/sc-api/src/public_api.rs b/api/sc-api/src/public_api.rs new file mode 100644 index 0000000000..6992c56d40 --- /dev/null +++ b/api/sc-api/src/public_api.rs @@ -0,0 +1,118 @@ +//! +//! # API Requests +//! +//! Maps SC Api Requests with their associated Responses. +//! + +use std::convert::TryInto; +use std::io::Error as IoError; + +use log::trace; + +use kf_protocol::bytes::Buf; + +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::RequestMessage; + +use kf_protocol::api::api_decode; +use kf_protocol::derive::Encode; + +use kf_protocol::message::metadata::KfMetadataRequest; + +use crate::versions::ApiVersionsRequest; +use crate::spu::FlvCreateCustomSpusRequest; +use crate::spu::FlvDeleteCustomSpusRequest; +use crate::spu::FlvFetchSpusRequest; +use crate::spu::FlvCreateSpuGroupsRequest; +use crate::spu::FlvFetchSpuGroupsRequest; +use crate::spu::FlvDeleteSpuGroupsRequest; +use crate::topic::FlvCreateTopicsRequest; +use crate::topic::FlvDeleteTopicsRequest; +use crate::topic::FlvFetchTopicsRequest; +use crate::topic::FlvTopicCompositionRequest; + +use super::ScApiKey; + +#[derive(Debug, Encode)] +pub enum PublicRequest { + // Mixed + ApiVersionsRequest(RequestMessage), + + // Kafka + KfMetadataRequest(RequestMessage), + + // Fluvio - Topics + FlvCreateTopicsRequest(RequestMessage), + FlvDeleteTopicsRequest(RequestMessage), + FlvFetchTopicsRequest(RequestMessage), + FlvTopicCompositionRequest(RequestMessage), + + // Fluvio - Spus + FlvCreateCustomSpusRequest(RequestMessage), + FlvDeleteCustomSpusRequest(RequestMessage), + FlvFetchSpusRequest(RequestMessage), + + FlvCreateSpuGroupsRequest(RequestMessage), + FlvDeleteSpuGroupsRequest(RequestMessage), + FlvFetchSpuGroupsRequest(RequestMessage), +} + +impl Default for PublicRequest { + fn default() -> PublicRequest { + PublicRequest::ApiVersionsRequest(RequestMessage::::default()) + } +} + +impl KfRequestMessage for PublicRequest { + type ApiKey = ScApiKey; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + trace!("decoding header: {:#?}", header); + match header.api_key().try_into()? { + // Mixed + ScApiKey::ApiVersion => api_decode!(PublicRequest, ApiVersionsRequest, src, header), + + //Kafka + ScApiKey::KfMetadata => api_decode!(PublicRequest, KfMetadataRequest, src, header), + + // Fluvio - Topics + ScApiKey::FlvCreateTopics => { + api_decode!(PublicRequest, FlvCreateTopicsRequest, src, header) + } + ScApiKey::FlvDeleteTopics => { + api_decode!(PublicRequest, FlvDeleteTopicsRequest, src, header) + } + ScApiKey::FlvFetchTopics => { + api_decode!(PublicRequest, FlvFetchTopicsRequest, src, header) + } + ScApiKey::FlvTopicComposition => { + api_decode!(PublicRequest, FlvTopicCompositionRequest, src, header) + } + + // Fluvio - Custom Spus / Spu Groups + ScApiKey::FlvCreateCustomSpus => { + api_decode!(PublicRequest, FlvCreateCustomSpusRequest, src, header) + } + ScApiKey::FlvDeleteCustomSpus => { + api_decode!(PublicRequest, FlvDeleteCustomSpusRequest, src, header) + } + ScApiKey::FlvFetchSpus => api_decode!(PublicRequest, FlvFetchSpusRequest, src, header), + + ScApiKey::FlvCreateSpuGroups => { + api_decode!(PublicRequest, FlvCreateSpuGroupsRequest, src, header) + } + ScApiKey::FlvDeleteSpuGroups => { + api_decode!(PublicRequest, FlvDeleteSpuGroupsRequest, src, header) + } + ScApiKey::FlvFetchSpuGroups => { + api_decode!(PublicRequest, FlvFetchSpuGroupsRequest, src, header) + } + } + } +} diff --git a/api/spu-api/Cargo.toml b/api/spu-api/Cargo.toml new file mode 100644 index 0000000000..ef0a000e2e --- /dev/null +++ b/api/spu-api/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "spu-api" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +kf-protocol = { path = "../../kf-protocol"} +kf-socket = {path = "../../kf-socket"} +utils = { path= "../../utils"} +serde = { version ="1.0.82", features = ['derive'] } \ No newline at end of file diff --git a/api/spu-api/src/api_key.rs b/api/spu-api/src/api_key.rs new file mode 100644 index 0000000000..f927c70947 --- /dev/null +++ b/api/spu-api/src/api_key.rs @@ -0,0 +1,24 @@ + +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum SpuApiKey { + // Mixed + ApiVersion = 18, + + // Kafka + KfProduce = 0, + KfFetch = 1, + + // Fluvio + FlvFetchLocalSpu = 1001, + FlvFetchOffsets = 1002 +} + +impl Default for SpuApiKey { + fn default() -> SpuApiKey { + SpuApiKey::ApiVersion + } +} diff --git a/api/spu-api/src/api_versions.rs b/api/spu-api/src/api_versions.rs new file mode 100644 index 0000000000..9942e3aaa4 --- /dev/null +++ b/api/spu-api/src/api_versions.rs @@ -0,0 +1,47 @@ +//! +//! # API Versions +//! +//! Public API to retrive a list of APIs and their version numbers from the SPU. +//! SPU supports Kafka as well as Fluvio specific APIs. +//! + +use kf_protocol::api::Request; +use kf_protocol::derive::{Decode, Encode}; +use kf_protocol::api::FlvErrorCode; + +use crate::SpuApiKey; + +pub type ApiVersions = Vec; + +// ----------------------------------- +// ApiVersionsRequest +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionsRequest {} + +// ----------------------------------- +// ApiVersionsResponse +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionsResponse { + pub error_code: FlvErrorCode, + pub api_keys: Vec, +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct ApiVersionKey { + pub api_key: i16, + pub min_version: i16, + pub max_version: i16, +} + +// ----------------------------------- +// Implementation - ApiVersionsRequest +// ----------------------------------- + +impl Request for ApiVersionsRequest { + const API_KEY: u16 = SpuApiKey::ApiVersion as u16; + type Response = ApiVersionsResponse; +} diff --git a/api/spu-api/src/flv_fetch_local_spu.rs b/api/spu-api/src/flv_fetch_local_spu.rs new file mode 100644 index 0000000000..eb0e3c5029 --- /dev/null +++ b/api/spu-api/src/flv_fetch_local_spu.rs @@ -0,0 +1,65 @@ +//! +//! # Fetch Local SPU +//! +//! Public API to fetch local SPU metadata from the SPU +//! +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + +use crate::SpuApiKey; +use crate::errors::FlvErrorCode; + +// ----------------------------------- +// FlvFetchLocalSpuRequest +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvFetchLocalSpuRequest {} + +// ----------------------------------- +// FlvFetchLocalSpuResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchLocalSpuResponse { + /// Spu lookup error code, None for no error + pub error_code: FlvErrorCode, + + /// Spu name. A unique key in Key/Value stores such as Kubernetes. + pub name: String, + + /// Spu id. Managed Spu ids start from 0. Custom SPU ids start from 5000. + pub id: i32, + + /// Spu type: true for managed, false for custom. + pub managed: bool, + + /// Public endpoint server interface. + pub public_ep: EndPointMetadata, + + /// Private endpoint server interface. + pub private_ep: EndPointMetadata, + + /// Rack label, optional parameter used by replica assignment algorithm. + pub rack: Option, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct EndPointMetadata { + /// Port of the endpoint + pub port: u16, + + /// Host name of the endoint + pub host: String, +} + +// ----------------------------------- +// Implementation - FlvFetchLocalSpuRequest +// ----------------------------------- + +impl Request for FlvFetchLocalSpuRequest { + const API_KEY: u16 = SpuApiKey::FlvFetchLocalSpu as u16; + const DEFAULT_API_VERSION: i16 = 0; + type Response = FlvFetchLocalSpuResponse; +} diff --git a/api/spu-api/src/flv_fetch_offset.rs b/api/spu-api/src/flv_fetch_offset.rs new file mode 100644 index 0000000000..f29f0d226e --- /dev/null +++ b/api/spu-api/src/flv_fetch_offset.rs @@ -0,0 +1,80 @@ +//! +//! # Fetch Topic Offsets +//! +//! API that allows CLI to fetch topic offsets. +//! +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; + +use crate::SpuApiKey; +use crate::errors::FlvErrorCode; + +// ----------------------------------- +// FlvFetchOffsetsRequest +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug)] +pub struct FlvFetchOffsetsRequest { + /// Each topic in the request. + pub topics: Vec, +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct FetchOffsetTopic { + /// The topic name. + pub name: String, + + /// Each partition in the request. + pub partitions: Vec, +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct FetchOffsetPartition { + /// The partition index. + pub partition_index: i32, +} + +// ----------------------------------- +// FlvFetchOffsetsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Default, Debug)] +pub struct FlvFetchOffsetsResponse { + /// Each topic offset in the response. + pub topics: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FetchOffsetTopicResponse { + /// The topic name + pub name: String, + + /// Each partition in the response. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Default, Debug)] +pub struct FetchOffsetPartitionResponse { + /// The partition error code, None for no error + pub error_code: FlvErrorCode, + + /// The partition index. + pub partition_index: i32, + + /// First readable offset. + pub start_offset: i64, + + /// Last readable offset + pub last_stable_offset: i64, +} + +// ----------------------------------- +// Implementation - KfListOffsetRequest +// ----------------------------------- + +impl Request for FlvFetchOffsetsRequest { + const API_KEY: u16 = SpuApiKey::FlvFetchOffsets as u16; + const DEFAULT_API_VERSION: i16 = 0; + type Response = FlvFetchOffsetsResponse; +} diff --git a/api/spu-api/src/lib.rs b/api/spu-api/src/lib.rs new file mode 100644 index 0000000000..fdbdb0a16f --- /dev/null +++ b/api/spu-api/src/lib.rs @@ -0,0 +1,24 @@ +mod api_key; +mod public_api; +mod api_versions; +mod flv_fetch_offset; +mod flv_fetch_local_spu; + +pub use self::api_key::SpuApiKey; +pub use self::public_api::PublicRequest; + +pub mod versions { + pub use crate::api_versions::*; +} + +pub mod errors { + pub use kf_protocol::api::FlvErrorCode; +} + +pub mod spus { + pub use crate::flv_fetch_local_spu::*; +} + +pub mod offsets { + pub use crate::flv_fetch_offset::*; +} \ No newline at end of file diff --git a/api/spu-api/src/public_api.rs b/api/spu-api/src/public_api.rs new file mode 100644 index 0000000000..1dc9469910 --- /dev/null +++ b/api/spu-api/src/public_api.rs @@ -0,0 +1,77 @@ +// ApiRequest and Response that has all request and response +// use for generic dump and client + +use log::trace; +use std::convert::TryInto; +use std::io::Error as IoError; + +use kf_protocol::bytes::Buf; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; + +use kf_protocol::api::KfRequestMessage; + +use kf_protocol::api::api_decode; +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::RequestMessage; +use kf_socket::KfFileFetchRequest; + +use crate::SpuApiKey; +use crate::spus::FlvFetchLocalSpuRequest; +use crate::offsets::FlvFetchOffsetsRequest; +use crate::versions::ApiVersionsRequest; + +#[derive(Debug, Encode)] +pub enum PublicRequest { + // Mixed + ApiVersionsRequest(RequestMessage), + + // Kafka + KfProduceRequest(RequestMessage), + KfFileFetchRequest(RequestMessage), + + // Fluvio + FlvFetchLocalSpuRequest(RequestMessage), + FlvFetchOffsetsRequest(RequestMessage), +} + +impl Default for PublicRequest { + fn default() -> PublicRequest { + PublicRequest::ApiVersionsRequest(RequestMessage::::default()) + } +} + +impl KfRequestMessage for PublicRequest { + type ApiKey = SpuApiKey; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + trace!("decoding with header: {:#?}", header); + match header.api_key().try_into()? { + // Mixed + SpuApiKey::ApiVersion => api_decode!(PublicRequest, ApiVersionsRequest, src, header), + + // Kafka + SpuApiKey::KfProduce => { + let request = DefaultKfProduceRequest::decode_from(src, header.api_version())?; + Ok(PublicRequest::KfProduceRequest(RequestMessage::new( + header, request, + ))) + } + SpuApiKey::KfFetch => api_decode!(PublicRequest, KfFileFetchRequest, src, header), + + // Fluvio + SpuApiKey::FlvFetchLocalSpu => { + api_decode!(PublicRequest, FlvFetchLocalSpuRequest, src, header) + } + SpuApiKey::FlvFetchOffsets => { + api_decode!(PublicRequest, FlvFetchOffsetsRequest, src, header) + } + } + } +} diff --git a/cli/Cargo.toml b/cli/Cargo.toml new file mode 100644 index 0000000000..1954d2cac7 --- /dev/null +++ b/cli/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "fluvio" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + + +[[bin]] +name = "fluvio" +path = "src/bin/main.rs" +doc = false + + +[dependencies] +log = "0.4.6" +dirs = "1.0.2" +bytes = "0.4.12" +structopt = { version = "0.2.14", default-features = false } +toml = "0.5.0" +prettytable-rs = "0.8.0" +rand = "0.6.0" +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.39" +serde_yaml = "0.8.8" +ctrlc = "3.1.3" +regex = "1.1.6" +futures-preview = { version = "0.3.0-alpha.17", features = ['nightly','async-await'] } +future-helper = { path = "../future-helper" } +kf-protocol = { path = "../kf-protocol"} +kf-socket = { path = "../kf-socket" } +sc-api = { path = "../api/sc-api"} +spu-api = { path = "../api/spu-api"} +utils = { path= "../utils"} +types = { path ="../types"} +k8-metadata = { path = "../k8-metadata"} diff --git a/cli/README.md b/cli/README.md new file mode 100644 index 0000000000..d94cc70acc --- /dev/null +++ b/cli/README.md @@ -0,0 +1,94 @@ +# Command Line Interface + +Fluvio Command Line Interface (aka CLI) is the primary communication mechanism for a Fluvio cluster. A cluster has binaries: +* Command Line Interface (fluvio) +* Streaming Controller Server (sc-server) +* Streaming Processing Unit (spu-server) + +#### Kafka +Fluvio CLI is also compatible with Kafka 2.x. Fluvio makes it easy for system administrators to provision Kafka and Fluvio environments through a common user friendly command line interface. + + +#### Profiles +Fluvio CLI uses Profile files to store most common parameters. This approach offers administrators the convenience to communicate with SC, SPU, or a Kafka environment. + + +## Produce CLI + +Produce CLI can ingest one or more log records in a single session. Topic and Partitions are mandatory parmameters, others are optional. + +``` +Write log records to a topic/partition + +USAGE: + fluvio produce [OPTIONS] --partition --topic + +FLAGS: + -h, --help Prints help information + +OPTIONS: + -t, --topic Topic name + -p, --partition Partition id + -l, --record-per-line Each line is a Record + -r, --record-file ... Entire file is a Record (multiple) + -c, --sc Address of Streaming Controller + -s, --spu Address of Streaming Processing Unit + -k, --kf Address of Kafka Controller + -e, --profile Profile name +``` + +Produce Topic/Partition command should be sent to SC to look-up the location of SPU that hosts the leader. Log records are then sent directly to the SPU. + +Produce command can also send log messages to Kafka. Choose any Broker addresss and the system will identify the Broker that hosts the leader and forwards log record accordingly. + +Log records sent directly to the SPU and are accepted if SPU is the leader for Topic/Partition; rejected otherwise. + + +#### Interactive CLI + + Produce command called with Topic/Partition opens an interactive session, where each line is interpreted as a record: + +``` +> fluvio produce -t topic-1 -p 0 +line 1 +> Ok! +line 2 +> Ok! +-C or -D to exit (interactive session) +``` + +#### File Records + +Fluvio Produce can ingest log records from files as follows: +* Record per File +* Record per Line + +Record encapsulation is important for binary or JSON objects where the object must be stored in its entirety to be interpreted correctly by the Consumer. + + +#### Record per File + +Use ***record-file*** parameter to ingest an entire file in a single record. One or more files may be sent in a single instance. + +``` +> fluvio produce -t topic-1 -p 0 -r json-file1.txt -r json-file2.txt +{"name": "john doe"} +> Ok! +{"name": "jane doe"} +> Ok! +``` + +#### Record per Line + +Use ***record-per-line*** parameter to ingest records per line until the end of file is reached. Note, use text-based file to property interpret end of line. + +``` +> fluvio produce -t topic-1 -p 0 -l my-file.txt +Lorem Ipsum is simply dummy text +> Ok! +Lorem Ipsum has been the industry's standard since 1500's. +> Ok! +It has survived over five centuries +> Ok! +``` + diff --git a/cli/src/advanced/generate.rs b/cli/src/advanced/generate.rs new file mode 100644 index 0000000000..7378afff3b --- /dev/null +++ b/cli/src/advanced/generate.rs @@ -0,0 +1,69 @@ +//! +//! # Generate Template +//! +//! Generate template for a Request API +//! +use structopt::StructOpt; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use kf_protocol::message::api_versions::KfApiVersionsRequest; +use kf_protocol::message::offset::KfListOffsetRequest; +use kf_protocol::message::group::KfListGroupsRequest; +use kf_protocol::message::group::KfJoinGroupRequest; +use kf_protocol::message::group::KfSyncGroupRequest; +use kf_protocol::message::group::KfLeaveGroupRequest; +use kf_protocol::message::group::KfDescribeGroupsRequest; +use kf_protocol::message::group::KfDeleteGroupsRequest; +use kf_protocol::message::group::KfFindCoordinatorRequest; +use kf_protocol::message::group::KfHeartbeatRequest; +use kf_protocol::message::metadata::KfMetadataRequest; +use kf_protocol::message::isr::KfLeaderAndIsrRequest; +use kf_protocol::message::offset::KfOffsetFetchRequest; + +use crate::advanced::RequestApi; +use crate::error::CliError; + +#[derive(Debug, StructOpt)] +pub struct GenerateTemplateOpt { + /// Request API + #[structopt( + short = "r", + long = "request", + value_name = "", + raw(possible_values = "&RequestApi::variants()", case_insensitive = "true") + )] + request: RequestApi, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Parse request API and generate template +pub fn process_generate_template(opt: GenerateTemplateOpt) -> Result<(), CliError> { + let json = match opt.request { + RequestApi::ApiVersions => serde_json::to_string_pretty(&KfApiVersionsRequest::default()), + RequestApi::ListOffset => serde_json::to_string_pretty(&KfListOffsetRequest::default()), + RequestApi::Metadata => serde_json::to_string_pretty(&KfMetadataRequest::default()), + RequestApi::LeaderAndIsr => serde_json::to_string_pretty(&KfLeaderAndIsrRequest::default()), + RequestApi::FindCoordinator => { + serde_json::to_string_pretty(&KfFindCoordinatorRequest::default()) + } + RequestApi::JoinGroup => serde_json::to_string_pretty(&KfJoinGroupRequest::default()), + RequestApi::SyncGroup => serde_json::to_string_pretty(&KfSyncGroupRequest::default()), + RequestApi::LeaveGroup => serde_json::to_string_pretty(&KfLeaveGroupRequest::default()), + RequestApi::DescribeGroups => { + serde_json::to_string_pretty(&KfDescribeGroupsRequest::default()) + } + RequestApi::ListGroups => serde_json::to_string_pretty(&KfListGroupsRequest::default()), + RequestApi::DeleteGroups => serde_json::to_string_pretty(&KfDeleteGroupsRequest::default()), + RequestApi::Heartbeat => serde_json::to_string_pretty(&KfHeartbeatRequest::default()), + RequestApi::OffsetFetch => serde_json::to_string_pretty(&KfOffsetFetchRequest::default()), + }; + + let result = json.map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err)))?; + println!("{}", result); + + Ok(()) +} diff --git a/cli/src/advanced/mod.rs b/cli/src/advanced/mod.rs new file mode 100644 index 0000000000..7c177ed7ce --- /dev/null +++ b/cli/src/advanced/mod.rs @@ -0,0 +1,42 @@ +mod generate; +mod run; +mod request_api; + +use structopt::StructOpt; + +use generate::GenerateTemplateOpt; +use run::RunRequestOpt; +use request_api::RequestApi; + +pub use generate::process_generate_template; +pub use run::process_run_request; +pub use request_api::send_request_to_server; +pub use request_api::parse_request_from_file; + +use crate::error::CliError; + +#[derive(Debug, StructOpt)] +pub enum AdvancedOpt { + #[structopt(name = "generate", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Generate a request template")] + Generate(GenerateTemplateOpt), + + #[structopt(name = "run", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Send request to server")] + Run(RunRequestOpt), +} + +pub fn process_advanced(opt: AdvancedOpt) -> Result<(), CliError> { + match opt { + AdvancedOpt::Generate(generate_opt) => process_generate_template(generate_opt), + AdvancedOpt::Run(run_opt) => process_run_request(run_opt), + } +} diff --git a/cli/src/advanced/request_api.rs b/cli/src/advanced/request_api.rs new file mode 100644 index 0000000000..c1ed40d0ab --- /dev/null +++ b/cli/src/advanced/request_api.rs @@ -0,0 +1,75 @@ +//! +//! # Request API +//! +//! Defines supported APIs and provides Request fuctionality +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::fs::read_to_string; +use std::path::Path; + +use structopt::clap::arg_enum; +use serde_json::to_string_pretty; +use serde::Serialize; +use serde::de::DeserializeOwned; + +use kf_protocol::api::Request; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::connect_and_send_request; + +// ----------------------------------- +// Request Api +// ----------------------------------- + +arg_enum! { + #[derive(Debug, Clone, PartialEq)] + #[allow(non_camel_case_types)] + pub enum RequestApi { + ApiVersions, + ListOffset, + Metadata, + LeaderAndIsr, + FindCoordinator, + JoinGroup, + SyncGroup, + LeaveGroup, + ListGroups, + DescribeGroups, + DeleteGroups, + Heartbeat, + OffsetFetch, + } +} + +// ----------------------------------- +// Implementation - Generics +// ----------------------------------- + +/// Parse from file and return Request object +pub fn parse_request_from_file(file_path: P) -> Result +where + P: AsRef, + R: DeserializeOwned, +{ + let file_str: String = read_to_string(file_path)?; + let list_offset_req: R = serde_json::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err)))?; + Ok(list_offset_req) +} + +// Connect to Kafka Controller and process request +pub fn send_request_to_server(server_addr: SocketAddr, request: R) -> Result<(), CliError> +where + R: Request + Send + Sync + 'static, + R::Response: Send + Sync + Serialize, +{ + // Send request without a version number, max_version will be used + let response = run_block_on(connect_and_send_request(server_addr, request, None))?; + let result = to_string_pretty(&response) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err)))?; + println!("{}", result); + Ok(()) +} diff --git a/cli/src/advanced/run.rs b/cli/src/advanced/run.rs new file mode 100644 index 0000000000..f97921a345 --- /dev/null +++ b/cli/src/advanced/run.rs @@ -0,0 +1,157 @@ +//! +//! # Run API request +//! +//! Send Request to server +//! + +use structopt::StructOpt; +use std::path::PathBuf; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use kf_protocol::message::offset::KfListOffsetRequest; +use kf_protocol::message::api_versions::KfApiVersionsRequest; +use kf_protocol::message::group::KfListGroupsRequest; +use kf_protocol::message::group::KfJoinGroupRequest; +use kf_protocol::message::group::KfSyncGroupRequest; +use kf_protocol::message::group::KfLeaveGroupRequest; +use kf_protocol::message::group::KfDescribeGroupsRequest; +use kf_protocol::message::group::KfDeleteGroupsRequest; +use kf_protocol::message::group::KfFindCoordinatorRequest; +use kf_protocol::message::group::KfHeartbeatRequest; +use kf_protocol::message::metadata::KfMetadataRequest; +use kf_protocol::message::isr::KfLeaderAndIsrRequest; +use kf_protocol::message::offset::KfOffsetFetchRequest; + +use crate::error::CliError; + +use crate::profile::{ProfileConfig, TargetServer}; +use crate::advanced::RequestApi; +use crate::advanced::send_request_to_server; +use crate::advanced::parse_request_from_file; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct RunRequestOpt { + /// Request API + #[structopt( + short = "r", + long = "request", + value_name = "", + raw(possible_values = "&RequestApi::variants()", case_insensitive = "true") + )] + request: RequestApi, + + /// Address of Kafka Controller + #[structopt(short = "k", long = "kf", value_name = "host:port")] + kf: Option, + + /// Request details file + #[structopt( + short = "j", + long = "json-file", + value_name = "file.json", + parse(from_os_str) + )] + details_file: PathBuf, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Parse CLI, build server address, run request & display result +pub fn process_run_request(opt: RunRequestOpt) -> Result<(), CliError> { + let profile_config = ProfileConfig::new(&None, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + let server_addr = match target_server { + TargetServer::Kf(server_addr) => server_addr, + TargetServer::Sc(server_addr) => server_addr, + _ => { + return Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid Kafka server {:?}", target_server), + ))) + } + }; + + // process file based on request type + match opt.request { + RequestApi::ApiVersions => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::ListOffset => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::Metadata => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::LeaderAndIsr => { + let req = parse_request_from_file::( + opt.details_file.clone(), + )?; + send_request_to_server(server_addr, req) + } + RequestApi::FindCoordinator => { + let req = parse_request_from_file::( + opt.details_file.clone(), + )?; + send_request_to_server(server_addr, req) + } + RequestApi::JoinGroup => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::SyncGroup => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::LeaveGroup => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::DescribeGroups => { + let req = parse_request_from_file::( + opt.details_file.clone(), + )?; + send_request_to_server(server_addr, req) + } + RequestApi::ListGroups => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::DeleteGroups => { + let req = parse_request_from_file::( + opt.details_file.clone(), + )?; + send_request_to_server(server_addr, req) + } + RequestApi::Heartbeat => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + RequestApi::OffsetFetch => { + let req = + parse_request_from_file::(opt.details_file.clone())?; + send_request_to_server(server_addr, req) + } + } +} diff --git a/cli/src/auth_token/create/cli.rs b/cli/src/auth_token/create/cli.rs new file mode 100644 index 0000000000..48dd319ea1 --- /dev/null +++ b/cli/src/auth_token/create/cli.rs @@ -0,0 +1,165 @@ +//! +//! # Create Auth Tokens +//! +//! CLI tree to generate Create Auth Tokens +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; + +use regex::Regex; +use structopt::clap::arg_enum; +use structopt::StructOpt; +use types::{TokenName, TokenSecret, SpuId}; +use utils::generators::generate_secret; +use types::print_ok_msg; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::process_sc_create_auth_token; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct CreateAuthTokenOpt { + /// Token name + #[structopt(short = "n", long = "token-name", value_name = "string")] + token_name: String, + + /// Token secret of 16 characters in length + #[structopt( + short = "s", + long = "secret", + value_name = "alpha-numeric", + required_unless = "generate_secret", + parse(try_from_str = "parse_token_secret") + )] + token_secret: Option, + + /// Generate a random secret + #[structopt(short = "g", long = "generate-secret", conflicts_with = "secret")] + generate_secret: bool, + + /// First SPU id in the match range (inclusive) + #[structopt(short = "m", long = "min-spu", value_name = "integer")] + min_spu: i32, + + /// Last SPU id in the match range (inclusive) + #[structopt(short = "x", long = "max-spu", value_name = "integer")] + max_spu: i32, + + /// Types + #[structopt( + short = "t", + long = "token-type", + value_name = "token-type", + raw( + possible_values = "&CliTokenType::variants()", + case_insensitive = "true" + ) + )] + token_type: Option, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +arg_enum! { + #[derive(Debug, Clone, PartialEq)] + #[allow(non_camel_case_types)] + pub enum CliTokenType { + Custom, + Managed, + Any + } +} + +/// Parse Token secret (should math regex ) +fn parse_token_secret(s: &str) -> Result { + let token_name = s.to_lowercase(); + + let re = Regex::new(r"^[a-z0-9]{16}$").unwrap(); + if re.is_match(&token_name) { + Ok(token_name) + } else { + Err(IoError::new(ErrorKind::InvalidData, format!("{}", s))) + } +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct CreateAuthTokenConfig { + pub token_name: TokenName, + pub token_secret: TokenSecret, + pub token_type: CliTokenType, + pub min_spu: SpuId, + pub max_spu: SpuId, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process create topic cli request +pub fn process_create_auth_token(opt: CreateAuthTokenOpt) -> Result<(), CliError> { + let (target_server, create_auth_token_cfg) = parse_opt(opt)?; + let token_secret = create_auth_token_cfg.token_secret.clone(); + + match target_server { + TargetServer::Sc(server_addr) => { + process_sc_create_auth_token(server_addr, create_auth_token_cfg)?; + print_auth_token_secret(token_secret); + Ok(()) + } + TargetServer::Kf(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "Kafka does not support create auth-tokens", + ))), + TargetServer::Spu(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "SPU does not implement create auth-tokens", + ))), + } +} + +/// Validate cli options. Generate target-server and create auth-token. +fn parse_opt(opt: CreateAuthTokenOpt) -> Result<(TargetServer, CreateAuthTokenConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // assign auth-token + let mut token_secret = opt.token_secret.unwrap_or("".to_owned()).to_lowercase(); + if opt.generate_secret { + token_secret = generate_secret(); + } + + // create auth-token config + let create_auth_token_cfg = CreateAuthTokenConfig { + token_name: opt.token_name, + token_secret: token_secret, + token_type: opt.token_type.unwrap_or(CliTokenType::Any), + min_spu: opt.min_spu, + max_spu: opt.max_spu, + }; + + // return server separately from config + Ok((target_server, create_auth_token_cfg)) +} + +/// Print token secret +fn print_auth_token_secret(token_secret: String) { + println!("Secret: {}", token_secret); + print_ok_msg!("Important", "secret will not be displayed again!"); +} diff --git a/cli/src/auth_token/create/mod.rs b/cli/src/auth_token/create/mod.rs new file mode 100644 index 0000000000..704f7fbad5 --- /dev/null +++ b/cli/src/auth_token/create/mod.rs @@ -0,0 +1,7 @@ +mod cli; +mod proc_sc; + +pub use cli::CreateAuthTokenOpt; +pub use cli::process_create_auth_token; + +pub use proc_sc::process_sc_create_auth_token; diff --git a/cli/src/auth_token/create/proc_sc.rs b/cli/src/auth_token/create/proc_sc.rs new file mode 100644 index 0000000000..7cf94dc098 --- /dev/null +++ b/cli/src/auth_token/create/proc_sc.rs @@ -0,0 +1,111 @@ +//! +//! # Fluvio SC - Processing +//! +//! Sends Create Auth Token request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::auth_tokens::{FlvCreateAuthTokensRequest, FlvCreateAuthTokensResponse}; +use sc_api::auth_tokens::{CreateAuthTokenRequest, AuthTokenRequest, FlvTokenType}; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use super::cli::{CreateAuthTokenConfig, CliTokenType}; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Create Auth Token Request +pub fn process_sc_create_auth_token( + server_addr: SocketAddr, + auth_token_cfg: CreateAuthTokenConfig, +) -> Result<(), CliError> { + let name = auth_token_cfg.token_name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, auth_token_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send create auth-token '{}': {}", name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let auth_token_resp = &response.results[0]; + let response = handle_sc_response( + &name, + "auth-token", + "created", + "", + &auth_token_resp.error_code, + &auth_token_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot create auth-token '{}': communication error", name), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + auth_token_cfg: CreateAuthTokenConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&auth_token_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvCreateAuthTokens, &versions); + + trace!("create auth-token req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("create auth-token res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode CreateAuthTokenRequest in Fluvio format +fn encode_request(auth_token_cfg: &CreateAuthTokenConfig) -> FlvCreateAuthTokensRequest { + // map token type + let token_type = match auth_token_cfg.token_type { + CliTokenType::Any => FlvTokenType::Any, + CliTokenType::Custom => FlvTokenType::Custom, + CliTokenType::Managed => FlvTokenType::Managed, + }; + + // generate Create AuthToken request + let create_auth_token = CreateAuthTokenRequest { + name: auth_token_cfg.token_name.clone(), + auth_token: AuthTokenRequest { + token_secret: auth_token_cfg.token_secret.clone(), + token_type: token_type, + min_spu: auth_token_cfg.min_spu, + max_spu: auth_token_cfg.max_spu, + }, + }; + + // generate request with 1 auth-token + FlvCreateAuthTokensRequest { + auth_tokens: vec![create_auth_token], + } +} diff --git a/cli/src/auth_token/delete/cli.rs b/cli/src/auth_token/delete/cli.rs new file mode 100644 index 0000000000..81530ef39c --- /dev/null +++ b/cli/src/auth_token/delete/cli.rs @@ -0,0 +1,78 @@ +//! +//! # Delete Auth Tokens +//! +//! CLI tree to generate Delete Auth Tokens +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::process_sc_delete_auth_token; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct DeleteAuthTokenOpt { + /// Token name + #[structopt(short = "n", long = "token-name", value_name = "string")] + token_name: String, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct DeleteAuthTokenConfig { + pub auth_token_name: String, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process delete auth-token cli request +pub fn process_delete_auth_token(opt: DeleteAuthTokenOpt) -> Result<(), CliError> { + let (target_server, delete_auth_token_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + process_sc_delete_auth_token(server_addr, delete_auth_token_cfg) + } + TargetServer::Kf(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "Kafka does not support delete auth-tokens", + ))), + TargetServer::Spu(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "SPU does not implement delete auth-tokens", + ))), + } +} + +/// Validate cli options. Generate target-server and delete auth-token. +fn parse_opt(opt: DeleteAuthTokenOpt) -> Result<(TargetServer, DeleteAuthTokenConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + let delete_auth_token_cfg = DeleteAuthTokenConfig { + auth_token_name: opt.token_name, + }; + + // return server separately from config + Ok((target_server, delete_auth_token_cfg)) +} diff --git a/cli/src/auth_token/delete/mod.rs b/cli/src/auth_token/delete/mod.rs new file mode 100644 index 0000000000..e1aa293b49 --- /dev/null +++ b/cli/src/auth_token/delete/mod.rs @@ -0,0 +1,8 @@ +mod cli; +mod proc_sc; + +pub use cli::DeleteAuthTokenOpt; +pub use cli::process_delete_auth_token; + +pub use proc_sc::process_sc_delete_auth_token; + diff --git a/cli/src/auth_token/delete/proc_sc.rs b/cli/src/auth_token/delete/proc_sc.rs new file mode 100644 index 0000000000..d6a6805c05 --- /dev/null +++ b/cli/src/auth_token/delete/proc_sc.rs @@ -0,0 +1,91 @@ +//! +//! # Fluvio SC - Delete Auth Token Processing +//! +//! Sends Delete Auth Token request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::auth_tokens::{FlvDeleteAuthTokensRequest, FlvDeleteAuthTokensResponse}; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use super::cli::DeleteAuthTokenConfig; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Delete AuthToken Request +pub fn process_sc_delete_auth_token<'a>( + server_addr: SocketAddr, + cfg: DeleteAuthTokenConfig, +) -> Result<(), CliError> { + let auth_token_name = cfg.auth_token_name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("sending delete auth-token '{}': {}", auth_token_name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let auth_token_resp = &response.results[0]; + let response = handle_sc_response( + &auth_token_resp.name, + "auth-token", + "deleted", + "", + &auth_token_resp.error_code, + &auth_token_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("delete auth-token '{}': empty response", auth_token_name), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server( + server_addr: SocketAddr, + cfg: DeleteAuthTokenConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvDeleteAuthTokens, &versions); + + trace!("delete auth-token req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("delete auth-token res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode DeleteAuthToken in Fluvio format +fn encode_request(cfg: &DeleteAuthTokenConfig) -> FlvDeleteAuthTokensRequest { + FlvDeleteAuthTokensRequest { + auth_tokens: vec![cfg.auth_token_name.clone()], + } +} diff --git a/cli/src/auth_token/list/cli.rs b/cli/src/auth_token/list/cli.rs new file mode 100644 index 0000000000..13cd380c7f --- /dev/null +++ b/cli/src/auth_token/list/cli.rs @@ -0,0 +1,94 @@ +//! +//! # List Auth Tokens CLI +//! +//! CLI tree to fetch and list one or more Auth Tokens +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::process_sc_list_auth_tokens; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ListAuthTokensOpt { + /// Token names + #[structopt(short = "n", long = "token-name", value_name = "string")] + token_names: Vec, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct ListAuthTokensConfig { + pub auth_tokens: Option>, + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Parse CLI, build SC address, query server and display result. +pub fn process_list_auth_tokens(opt: ListAuthTokensOpt) -> Result<(), CliError> { + let (target_server, list_tokens_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => process_sc_list_auth_tokens(server_addr, list_tokens_cfg), + TargetServer::Kf(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "Kafka does not support list auth-tokens", + ))), + TargetServer::Spu(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "SPU does not implement list auth-tokens", + ))), + } +} + +/// Validate cli options, parse profile, generate target server and create result object. +fn parse_opt(opt: ListAuthTokensOpt) -> Result<(TargetServer, ListAuthTokensConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + let auth_tokens = if opt.token_names.len() > 0 { + Some(opt.token_names.clone()) + } else { + None + }; + + // transfer config parameters + let config = ListAuthTokensConfig { + auth_tokens: auth_tokens, + output: opt.output.unwrap_or(OutputType::default()), + }; + + // return server separately from config result + Ok((target_server, config)) +} diff --git a/cli/src/auth_token/list/mod.rs b/cli/src/auth_token/list/mod.rs new file mode 100644 index 0000000000..c6ff0830c7 --- /dev/null +++ b/cli/src/auth_token/list/mod.rs @@ -0,0 +1,7 @@ +mod cli; +mod proc_sc; + +pub use cli::ListAuthTokensOpt; +pub use cli::process_list_auth_tokens; + +pub use proc_sc::process_sc_list_auth_tokens; diff --git a/cli/src/auth_token/list/proc_sc.rs b/cli/src/auth_token/list/proc_sc.rs new file mode 100644 index 0000000000..8de0b028c9 --- /dev/null +++ b/cli/src/auth_token/list/proc_sc.rs @@ -0,0 +1,120 @@ +//! +//! # Fluvio Sc -- List Auth Tokens Processing +//! +//! Retrieve all Auth Tokens and print to screen +//! + +use std::net::SocketAddr; + +use prettytable::Row; +use prettytable::row; +use prettytable::cell; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::common::{EncoderOutputHandler, TableOutputHandler}; + +use crate::auth_token::query_metadata::ScAuthTokenMetadata; +use crate::auth_token::query_metadata::TokenType; +use crate::auth_token::query_metadata::TokenResolution; +use crate::auth_token::query_metadata::query_sc_list_auth_tokens; + +use super::cli::ListAuthTokensConfig; + +// ----------------------------------- +// ListTopics Data Structure +// ----------------------------------- + +#[derive(Debug)] +struct ListAuthTokens { + auth_tokens: Vec, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +/// Query Fluvio SC server for Auth Topics and output to screen +pub fn process_sc_list_auth_tokens( + server_addr: SocketAddr, + list_tokens_cfg: ListAuthTokensConfig, +) -> Result<(), CliError> { + let names = list_tokens_cfg.auth_tokens.clone(); + let auth_tokens = query_sc_list_auth_tokens(server_addr, names)?; + let list_auth_tokens = ListAuthTokens { auth_tokens }; + + format_response_output(&list_auth_tokens, &list_tokens_cfg.output) +} + +/// Process server based on output type +fn format_response_output( + list_auth_tokens: &ListAuthTokens, + output_type: &OutputType, +) -> Result<(), CliError> { + // expecting array with one or more elements + if list_auth_tokens.auth_tokens.len() > 0 { + if output_type.is_table() { + list_auth_tokens.display_errors(); + list_auth_tokens.display_table(false); + } else { + list_auth_tokens.display_encoding(output_type)?; + } + } else { + println!("No auth-tokens found"); + } + Ok(()) +} + +// ----------------------------------- +// Format Output +// ----------------------------------- + +impl TableOutputHandler for ListAuthTokens { + /// table header implementation + fn header(&self) -> Row { + row!["TOKEN-NAME", "STATUS", "SPU-MIN", "SPU-MAX", "TYPE", "REASON"] + } + + /// return errors in string format + fn errors(&self) -> Vec { + let mut errors = vec![]; + for token_result in &self.auth_tokens { + if let Some(error) = &token_result.error { + errors.push(format!( + "AuthToken '{}': {}", + token_result.name, + error.to_sentence() + )); + } + } + errors + } + + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + for token_result in &self.auth_tokens { + if let Some(auth_token) = &token_result.auth_token { + rows.push(row![ + l -> token_result.name, + l -> TokenResolution::resolution_label(&auth_token.resolution), + c -> auth_token.min_spu, + c -> auth_token.max_spu, + l -> TokenType::type_label(&auth_token.token_type), + l -> auth_token.reason, + ]); + } + } + rows + } +} + +impl EncoderOutputHandler for ListAuthTokens { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.auth_tokens + } +} diff --git a/cli/src/auth_token/mod.rs b/cli/src/auth_token/mod.rs new file mode 100644 index 0000000000..5477b5218d --- /dev/null +++ b/cli/src/auth_token/mod.rs @@ -0,0 +1,34 @@ +mod create; +mod list; +mod delete; +mod query_metadata; + +use structopt::StructOpt; + +use create::CreateAuthTokenOpt; +use create::process_create_auth_token; +use list::ListAuthTokensOpt; +use list::process_list_auth_tokens; + +use delete::DeleteAuthTokenOpt; +use delete::process_delete_auth_token; + +use super::CliError; + +#[derive(Debug, StructOpt)] +pub enum AuthTokenOpt { + #[structopt(name = "create", author = "", about = "Create auth token")] + Create(CreateAuthTokenOpt), + #[structopt(name = "list", author = "", about = "List auth tokens")] + List(ListAuthTokensOpt), + #[structopt(name = "delete", author = "", about = "Delete an auth token")] + Delete(DeleteAuthTokenOpt), +} + +pub(crate) fn process_auth_tokens(auth_token_opt: AuthTokenOpt) -> Result<(), CliError> { + match auth_token_opt { + AuthTokenOpt::Create(create_token_opt) => process_create_auth_token(create_token_opt), + AuthTokenOpt::List(list_token_opt) => process_list_auth_tokens(list_token_opt), + AuthTokenOpt::Delete(delete_token_opt) => process_delete_auth_token(delete_token_opt), + } +} diff --git a/cli/src/auth_token/query_metadata/mod.rs b/cli/src/auth_token/query_metadata/mod.rs new file mode 100644 index 0000000000..8a28c89117 --- /dev/null +++ b/cli/src/auth_token/query_metadata/mod.rs @@ -0,0 +1,6 @@ +mod sc_auth_token_metadata; + +pub use sc_auth_token_metadata::ScAuthTokenMetadata; +pub use sc_auth_token_metadata::TokenType; +pub use sc_auth_token_metadata::TokenResolution; +pub use sc_auth_token_metadata::query_sc_list_auth_tokens; diff --git a/cli/src/auth_token/query_metadata/sc_auth_token_metadata.rs b/cli/src/auth_token/query_metadata/sc_auth_token_metadata.rs new file mode 100644 index 0000000000..759556cfc1 --- /dev/null +++ b/cli/src/auth_token/query_metadata/sc_auth_token_metadata.rs @@ -0,0 +1,201 @@ +//! +//! # Fluvio SC - Query Auth Tokens +//! +//! Communicates with Fluvio Streaming Controller to retrieve Auth Tokens and convert +//! them to ScAuthTokenMetadata +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use serde::Serialize; +use log::trace; +use types::{TokenName, SpuId}; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::auth_tokens::{FlvFetchAuthTokensRequest, FlvFetchAuthTokensResponse}; +use sc_api::auth_tokens::FetchAuthTokenResponse; +use sc_api::auth_tokens::FetchAuthToken; +use sc_api::auth_tokens::FlvTokenType; +use sc_api::auth_tokens::FlvTokenResolution; +use sc_api::errors::FlvErrorCode; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; + +// ----------------------------------- +// ScAuthTokenMetadata (Serializable) +// ----------------------------------- + +#[derive(Serialize, Debug)] +pub struct ScAuthTokenMetadata { + pub name: TokenName, + + #[serde(skip_serializing_if = "Option::is_none")] + pub auth_token: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Serialize, Debug)] +pub struct AuthToken { + pub token_type: TokenType, + pub min_spu: SpuId, + pub max_spu: SpuId, + pub resolution: TokenResolution, + pub reason: String, +} + +#[derive(Serialize, Debug)] +pub enum TokenType { + Any, + Custom, + Managed, +} + +#[derive(Serialize, Debug)] +pub enum TokenResolution { + Ok, + Init, + Invalid, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl ScAuthTokenMetadata { + pub fn new(fetched_auth_token: &FetchAuthTokenResponse) -> Self { + let auth_token = if let Some(token) = &fetched_auth_token.auth_token { + Some(AuthToken::new(token)) + } else { + None + }; + + // if error is present, convert it + let error_code = if fetched_auth_token.error_code.is_error() { + Some(fetched_auth_token.error_code) + } else { + None + }; + + // topic metadata with all parameters converted + ScAuthTokenMetadata { + name: fetched_auth_token.name.clone(), + auth_token: auth_token, + error: error_code, + } + } +} + +impl AuthToken { + pub fn new(fetched_token: &FetchAuthToken) -> Self { + let token_type = TokenType::new(&fetched_token.token_type); + let resolution = TokenResolution::new(&fetched_token.resolution); + + AuthToken { + token_type: token_type, + min_spu: fetched_token.min_spu, + max_spu: fetched_token.max_spu, + resolution: resolution, + reason: fetched_token.reason.clone(), + } + } +} + +impl TokenType { + pub fn new(fetch_token_type: &FlvTokenType) -> Self { + match fetch_token_type { + FlvTokenType::Any => TokenType::Any, + FlvTokenType::Managed => TokenType::Managed, + FlvTokenType::Custom => TokenType::Custom, + } + } + + pub fn type_label(token_type: &TokenType) -> &'static str { + match token_type { + TokenType::Any => "any", + TokenType::Managed => "managed", + TokenType::Custom => "custom", + } + } +} + +impl TokenResolution { + pub fn new(fetch_token_resolution: &FlvTokenResolution) -> Self { + match fetch_token_resolution { + FlvTokenResolution::Ok => TokenResolution::Ok, + FlvTokenResolution::Init => TokenResolution::Init, + FlvTokenResolution::Invalid => TokenResolution::Invalid, + } + } + + pub fn resolution_label(resolution: &TokenResolution) -> &'static str { + match resolution { + TokenResolution::Ok => "ok", + TokenResolution::Init => "initializing", + TokenResolution::Invalid => "invalid", + } + } +} + +// ----------------------------------- +// Query Server & Convert to Metadata +// ----------------------------------- + +/// Query Fluvio SC server for Auth Topics and output to screen +pub fn query_sc_list_auth_tokens( + server_addr: SocketAddr, + names: Option>, +) -> Result, CliError> { + match run_block_on(sc_fetch_auth_tokens(server_addr, names)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot retrieve auth topics: {}", err), + ))), + Ok(fetch_token_res) => { + let mut auth_tokens: Vec = vec![]; + for auth_token_res in &fetch_token_res.auth_tokens { + auth_tokens.push(ScAuthTokenMetadata::new(auth_token_res)); + } + Ok(auth_tokens) + } + } +} + +// Query SC Auth Tokens +async fn sc_fetch_auth_tokens<'a>( + server_addr: SocketAddr, + names: Option>, +) -> Result { + // look-up version + let mut conn = Connection::new(&server_addr).await?; + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvFetchAuthTokens, &versions); + + // generate request + let mut request = FlvFetchAuthTokensRequest::default(); + request.names = names; + + trace!( + "fetch auth-tokens req '{}': {:#?}", + conn.server_addr(), + request + ); + + let response = conn.send_request(request, version).await?; + + trace!( + "fetch auth-tokens '{}': {:#?}", + conn.server_addr(), + response + ); + + Ok(response) +} \ No newline at end of file diff --git a/cli/src/bin/main.rs b/cli/src/bin/main.rs new file mode 100644 index 0000000000..39ce41452d --- /dev/null +++ b/cli/src/bin/main.rs @@ -0,0 +1,12 @@ + +use fluvio::run_cli; + +use types::print_cli_err; + +fn main() { + utils::init_logger(); + + if let Err(err) = run_cli() { + print_cli_err!(err); + } +} diff --git a/cli/src/common/connection.rs b/cli/src/common/connection.rs new file mode 100644 index 0000000000..e6b1e808d5 --- /dev/null +++ b/cli/src/common/connection.rs @@ -0,0 +1,86 @@ +//! +//! # Send Request to Kafka server +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::{trace, debug}; +use utils::generators::rand_correlation_id; + +use kf_socket::KfSocket; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::Request; + +use crate::error::CliError; + +// ----------------------------------- +// Structure +// ----------------------------------- + +#[derive(Debug)] +pub struct Connection { + server_addr: SocketAddr, + socket: KfSocket, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl Connection { + /// Create a new connection + pub async fn new(server_addr: &SocketAddr) -> Result { + let socket = KfSocket::connect(&server_addr) + .await + .map_err(|err| IoError::new(ErrorKind::ConnectionRefused, format!("{}", err)))?; + + debug!("connected to: {}", server_addr); + Ok(Connection { + socket, + server_addr: server_addr.clone(), + }) + } + + /// Send request and return response (or error) + pub async fn send_request( + &mut self, + request: R, + version: Option, + ) -> Result + where + R: Request, + { + trace!("send API '{}' req to srv '{}'", R::API_KEY, self.server_addr); + + let mut req_msg: RequestMessage = RequestMessage::new_request(request); + req_msg + .get_mut_header() + .set_client_id("fluvio") + .set_correlation_id(rand_correlation_id()); + if let Some(ver) = version { + req_msg.get_mut_header().set_api_version(ver); + } + // send request & save response + match self.socket.send(&req_msg).await { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "rsvd '{}' from srv '{}': {}", + R::API_KEY, + self.server_addr, + err + ), + ))), + Ok(response) => { + trace!("rsvd '{}' res from srv '{}' ", R::API_KEY, self.server_addr); + Ok(response.response) + } + } + } + + /// Accessor for server address + pub fn server_addr(&self) -> &SocketAddr { + &self.server_addr + } +} diff --git a/cli/src/common/consume_hdlr.rs b/cli/src/common/consume_hdlr.rs new file mode 100644 index 0000000000..87594ab24d --- /dev/null +++ b/cli/src/common/consume_hdlr.rs @@ -0,0 +1,31 @@ +//! +//! # Consume Handler +//! +//! Output Type defines the types of consume output allowed. +//! + +// ----------------------------------- +// Consumer Output Types +// ----------------------------------- + +use structopt::clap::arg_enum; + +// Uses clap::arg_enum to choose possible variables +arg_enum! { + #[derive(Debug, Clone, PartialEq)] + #[allow(non_camel_case_types)] + pub enum ConsumeOutputType { + dynamic, + text, + binary, + json, + raw, + } +} + +/// Consume output type defaults to text formatting +impl ::std::default::Default for ConsumeOutputType { + fn default() -> Self { + ConsumeOutputType::dynamic + } +} diff --git a/cli/src/common/describe_hdlr.rs b/cli/src/common/describe_hdlr.rs new file mode 100644 index 0000000000..ab46daadd7 --- /dev/null +++ b/cli/src/common/describe_hdlr.rs @@ -0,0 +1,106 @@ +//! +//! # Describe Template +//! +//! Describe object boilerplate behind a trait. +//! + +use serde::Serialize; + +use crate::error::CliError; +use crate::common::{KeyValOutputHandler, TableOutputHandler}; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Serialize, Debug)] +pub struct DescribeObjects { + pub label: &'static str, + pub label_plural: &'static str, + + pub describe_objects: Vec, +} + +// ----------------------------------- +// Describe Object Trait +// ----------------------------------- + +pub trait DescribeObjectHandler { + fn is_ok(&self) -> bool; + fn is_error(&self) -> bool; + + fn validate(&self) -> Result<(), CliError>; +} + +// ----------------------------------- +// Describe Objects Implementation +// ----------------------------------- + +impl DescribeObjects +where + T: DescribeObjectHandler + KeyValOutputHandler + TableOutputHandler, +{ + /// Process server based on output type + pub fn print_table(&self) -> Result<(), CliError> { + match self.objects_cnt() { + 1 => { + let object = &self.describe_objects[0]; + // provide detailed validation + object.validate()?; + object.display_keyvals(); + object.display_table(true); + } + + _ => { + // header + println!("{}", self.header_summary()); + + // ob ject + for object in &self.describe_objects { + if object.is_ok() { + println!(""); + println!("{} DETAILS", self.label.to_ascii_uppercase()); + println!("-------------"); + object.display_keyvals(); + object.display_table(true); + } + } + } + } + + Ok(()) + } + + /// Provide a header summary + fn header_summary(&self) -> String { + let all_cnt = self.describe_objects.len(); + let mut ok_cnt = 0; + let mut err_cnt = 0; + for object in &self.describe_objects { + if object.is_ok() { + ok_cnt += 1; + } else if object.is_error() { + err_cnt += 1; + } + } + + let invalid_cnt = all_cnt - (ok_cnt + err_cnt); + if invalid_cnt > 0 { + format!( + "Retrieved {} out of {} {} ({} are invalid)", + ok_cnt, all_cnt, self.label_plural, invalid_cnt + ) + } else { + format!( + "Retrieved {} out of {} {}", + ok_cnt, all_cnt, self.label_plural + ) + } + } + + /// retrieves objects count + fn objects_cnt(&self) -> usize { + self.describe_objects.len() + } +} + diff --git a/cli/src/common/endpoint.rs b/cli/src/common/endpoint.rs new file mode 100644 index 0000000000..d0c736e12f --- /dev/null +++ b/cli/src/common/endpoint.rs @@ -0,0 +1,29 @@ +//! +//! # CLI Endpoint +//! +//! Endpoint data structure +//! +use std::fmt; + +use serde::Serialize; + +#[derive(Serialize, Debug)] +pub struct Endpoint { + pub host: String, + pub port: u16, +} + +impl Endpoint { + pub fn new(host: &String, port: &u16) -> Self { + Endpoint { + host: host.clone(), + port: *port, + } + } +} + +impl fmt::Display for Endpoint { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{}", self.host, self.port) + } +} diff --git a/cli/src/common/hex_dump.rs b/cli/src/common/hex_dump.rs new file mode 100644 index 0000000000..4e1a7d2c2c --- /dev/null +++ b/cli/src/common/hex_dump.rs @@ -0,0 +1,119 @@ +//! +//! # Hex Dump API +//! +//! Converts a vector of bytst to hex dump string format +//! + +/// Takes a u8 array of bytes and converts to hex dump +pub fn bytes_to_hex_dump(record: &Vec) -> String { + let cols = 16; + let record_cnt = record.len(); + let mut result = String::new(); + let mut collector = String::new(); + + for row_idx in 0..record_cnt { + // colunn index + if row_idx % cols == 0 { + result.push_str(&format!("{:08x}", row_idx)); + } + + // spacing half way + if row_idx % (cols / 2) == 0 { + result.push_str(" "); + } + + // convert and add character to collector + collector.push_str(&byte_to_string(&record[row_idx])); + + // push binary + result.push_str(&format!(" {:02x}", record[row_idx])); + + // push characters + if (row_idx + 1) % cols == 0 { + result.push_str(&format!(" |{}|\n", collector)); + collector = String::new(); + } + } + + // if collect not empty, fill-in gap and add characters + if collector.len() > 0 { + let last_char_idx = record_cnt % cols; + if last_char_idx <= cols / 2 { + result.push_str(&" ".to_owned()); + } + for _ in last_char_idx..cols { + collector.push_str(&" ".to_owned()); + result.push_str(&" ".to_owned()); + } + + result.push_str(&format!(" |{}|\n", collector)); + } + + result +} + +/// Converts a byte to string character +fn byte_to_string(byte: &u8) -> String { + if 0x20 <= *byte && *byte < 0x7f { + format!("{}", *byte as char) + } else if *byte == 0xa { + ".".to_owned() + } else { + " ".to_owned() + } +} + +/// Return separator for hex dump +pub fn hex_dump_separator() -> String { + "------------------------------------------------------------------------------\n".to_owned() +} + + +#[cfg(test)] +mod test { + use super::bytes_to_hex_dump; + + #[test] + fn test_bytes_to_hex_dump() { + let records: Vec = vec![ + 123, 10, 32, 32, 32, 32, 34, 112, 97, 114, 116, 105, 116, 105, 111, 110, 115, 34, 58, + 32, 91, 10, 32, 32, 32, 32, 32, 32, 32, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 34, 105, 100, 34, 58, 32, 48, 44, 10, 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 34, 114, 101, 112, 108, 105, 99, 97, 115, 34, 58, 32, 91, 10, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 53, 48, 48, 49, 44, 10, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 53, 48, 48, 50, 44, 10, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 53, 48, 48, 51, 10, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, 32, 93, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, + 32, 32, 32, 32, 93, 10, 125, + ]; + + let expected = + r#"00000000 7b 0a 20 20 20 20 22 70 61 72 74 69 74 69 6f 6e |{. "partition| +00000010 73 22 3a 20 5b 0a 20 20 20 20 20 20 20 20 7b 0a |s": [. {.| +00000020 20 20 20 20 20 20 20 20 20 20 20 20 22 69 64 22 | "id"| +00000030 3a 20 30 2c 0a 20 20 20 20 20 20 20 20 20 20 20 |: 0,. | +00000040 20 22 72 65 70 6c 69 63 61 73 22 3a 20 5b 0a 20 | "replicas": [. | +00000050 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 35 | 5| +00000060 30 30 31 2c 0a 20 20 20 20 20 20 20 20 20 20 20 |001,. | +00000070 20 20 20 20 20 35 30 30 32 2c 0a 20 20 20 20 20 | 5002,. | +00000080 20 20 20 20 20 20 20 20 20 20 20 35 30 30 33 0a | 5003.| +00000090 20 20 20 20 20 20 20 20 20 20 20 20 5d 0a 20 20 | ]. | +000000a0 20 20 20 20 20 20 7d 0a 20 20 20 20 5d 0a 7d | }. ].} | +"#; + + let result = bytes_to_hex_dump(&records); + assert_eq!(result, expected); + } + + #[test] + fn test_bytes_to_hex_dump_half_row() { + let records: Vec = vec![123, 10, 32, 32, 32, 32, 34, 112]; + + let expected = + r#"00000000 7b 0a 20 20 20 20 22 70 |{. "p | +"#; + + let result = bytes_to_hex_dump(&records); + assert_eq!(result, expected); + } +} diff --git a/cli/src/common/kf/leader_for_topic.rs b/cli/src/common/kf/leader_for_topic.rs new file mode 100644 index 0000000000..7d92b4fb72 --- /dev/null +++ b/cli/src/common/kf/leader_for_topic.rs @@ -0,0 +1,57 @@ +//! +//! # Kafka - Leader for Topic +//! +//! Given Kafka Controller address, it sends metadata request to retrieve all brokers/partitions. +//! It is reponsibility of the caller to compute the Leader. +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; +use types::socket_helpers::host_port_to_socket_addr; + +use kf_protocol::message::KfApiVersions; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::query_kf_metadata; + +/// Find address of the Broker leader for a topic/partition +pub async fn find_broker_leader_for_topic_partition<'a>( + conn: &'a mut Connection, + topic: String, + partition: i32, + versions: &'a KfApiVersions +) -> Result { + let kf_metadata = query_kf_metadata(conn, Some(vec![topic.clone()]), versions).await?; + + let brokers = &kf_metadata.brokers; + let topics = &kf_metadata.topics; + for response_topic in topics { + if response_topic.name == topic { + for response_partition in &response_topic.partitions { + if response_partition.partition_index == partition { + let leader_id = response_partition.leader_id; + + // traverse brokers and find leader + for broker in brokers { + if broker.node_id == leader_id { + trace!("broker {}/{} is leader", broker.host, broker.port); + return host_port_to_socket_addr(&broker.host, broker.port as u16) + .map_err(|err| err.into()); + } + } + } + } + } + } + + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "topic '{}/{}': unknown topic or partition", + topic, partition + ), + ))) +} diff --git a/cli/src/common/kf/mod.rs b/cli/src/common/kf/mod.rs new file mode 100644 index 0000000000..3a06927e63 --- /dev/null +++ b/cli/src/common/kf/mod.rs @@ -0,0 +1,14 @@ +mod leader_for_topic; +mod response_hdlr; +mod query_metadata; +mod query_api_versions; + +pub use leader_for_topic::find_broker_leader_for_topic_partition; + +pub use response_hdlr::handle_kf_response; + +pub use query_metadata::query_kf_metadata; + +pub use query_api_versions::kf_get_api_versions; +pub use query_api_versions::kf_lookup_version; + diff --git a/cli/src/common/kf/query_api_versions.rs b/cli/src/common/kf/query_api_versions.rs new file mode 100644 index 0000000000..aff7cfaba3 --- /dev/null +++ b/cli/src/common/kf/query_api_versions.rs @@ -0,0 +1,30 @@ +//! +//! # Kafka -- Query Metadata +//! +//! Communicates with Kafka Controller to retrieve version information +//! +use kf_protocol::message::api_versions::KfApiVersionsRequest; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; + +/// Query for API versions +pub async fn kf_get_api_versions(conn: &mut Connection) -> Result { + // Version is None, as we want API to request max_version. + let response = conn + .send_request(KfApiVersionsRequest::default(), None) + .await?; + Ok(response.api_keys) +} + +/// Given an API key, it returns max_version. None if not found +pub fn kf_lookup_version(api_key: AllKfApiKey, versions: &KfApiVersions) -> Option { + for version in versions { + if version.index == api_key as i16 { + return Some(version.max_version); + } + } + None +} diff --git a/cli/src/common/kf/query_metadata.rs b/cli/src/common/kf/query_metadata.rs new file mode 100644 index 0000000000..994986de6f --- /dev/null +++ b/cli/src/common/kf/query_metadata.rs @@ -0,0 +1,45 @@ +//! +//! # Kafka -- Query Metadata +//! +//! Communicates with Kafka Controller to retrieve Kafka Metadata for some or all topics +//! +use log::trace; + +use kf_protocol::message::metadata::{KfMetadataRequest, KfMetadataResponse}; +use kf_protocol::message::metadata::MetadataRequestTopic; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +// Query Kafka server for Brokers & Topic Metadata +pub async fn query_kf_metadata<'a>( + conn: &'a mut Connection, + topics: Option>, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfMetadataRequest::default(); + let version = kf_lookup_version(AllKfApiKey::Metadata, versions); + + // request topics metadata + let request_topics = if let Some(topics) = topics { + let mut req_topics: Vec = vec![]; + for name in topics { + req_topics.push(MetadataRequestTopic { name }); + } + Some(req_topics) + } else { + None + }; + request.topics = request_topics; + + trace!("metadata req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("metadata res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/common/kf/response_hdlr.rs b/cli/src/common/kf/response_hdlr.rs new file mode 100644 index 0000000000..5449f30e05 --- /dev/null +++ b/cli/src/common/kf/response_hdlr.rs @@ -0,0 +1,51 @@ +//! +//! # Kafka Controller - Response handlers +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use kf_protocol::api::ErrorCode as KfErrorCode; + +use crate::error::CliError; + +/// Handler for Kafka reponse codes (successful messages have Error code of None) +pub fn handle_kf_response( + name: &String, + label: &'static str, + operation: &'static str, + prepend_validation: &'static str, + error_code: &KfErrorCode, + error_msg: &Option, +) -> Result { + match error_code { + // sucess + KfErrorCode::None => { + if let Some(ref msg) = error_msg { + Ok(format!( + "{}{} '{}' {} successfully, {}", + prepend_validation, label, name, operation, msg + )) + } else { + Ok(format!( + "{}{} '{}' {} successfully", + prepend_validation, label, name, operation + )) + } + } + + // error + _ => { + let err_msg = if let Some(err_msg) = error_msg { + err_msg.clone() + } else { + format!("{} '{}', {}", label, name, error_code.to_sentence()) + }; + + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("{}{}", prepend_validation, err_msg), + ))) + + } + } +} \ No newline at end of file diff --git a/cli/src/common/mod.rs b/cli/src/common/mod.rs new file mode 100644 index 0000000000..39ec65546c --- /dev/null +++ b/cli/src/common/mod.rs @@ -0,0 +1,44 @@ +mod connection; +mod send_request; +mod kf; +mod sc; +mod spu; +mod output_hdlr; +mod describe_hdlr; +mod consume_hdlr; +mod hex_dump; +mod endpoint; + +pub use self::connection::Connection; + +pub use self::send_request::connect_and_send_request; + +pub use self::kf::kf_get_api_versions; +pub use self::kf::kf_lookup_version; +pub use self::kf::handle_kf_response; +pub use self::kf::find_broker_leader_for_topic_partition; +pub use self::kf::query_kf_metadata; + +pub use self::sc::sc_get_api_versions; +pub use self::sc::sc_lookup_version; +pub use self::sc::handle_sc_response; +pub use self::sc::find_spu_leader_for_topic_partition; +pub use self::sc::sc_get_topic_composition; + +pub use self::spu::spu_get_api_versions; +pub use self::spu::spu_lookup_version; + +pub use self::describe_hdlr::DescribeObjects; +pub use self::describe_hdlr::DescribeObjectHandler; + +pub use self::output_hdlr::OutputType; +pub use self::output_hdlr::TableOutputHandler; +pub use self::output_hdlr::KeyValOutputHandler; +pub use self::output_hdlr::EncoderOutputHandler; + +pub use self::consume_hdlr::ConsumeOutputType; + +pub use self::hex_dump::bytes_to_hex_dump; +pub use self::hex_dump::hex_dump_separator; + +pub use endpoint::Endpoint; diff --git a/cli/src/common/output_hdlr.rs b/cli/src/common/output_hdlr.rs new file mode 100644 index 0000000000..0ae0540207 --- /dev/null +++ b/cli/src/common/output_hdlr.rs @@ -0,0 +1,173 @@ +//! +//! # Output Handlers +//! +//! Output Type defines the types of output allowed. +//! Table and Encoding Traits encodes and prints to screen. +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use prettytable::format; +use prettytable::Row; +use prettytable::Table; +use prettytable::row; +use prettytable::cell; +use serde::Serialize; +use structopt::clap::arg_enum; + +use crate::error::CliError; + +// ----------------------------------- +// Output Types +// ----------------------------------- + +// Uses clap::arg_enum to choose possible variables +arg_enum! { + #[derive(Debug, Clone, PartialEq)] + #[allow(non_camel_case_types)] + pub enum OutputType { + table, + yaml, + json, + } +} + +/// OutputType defaults to table formatting +impl ::std::default::Default for OutputType { + fn default() -> Self { + OutputType::table + } +} + +/// OutputType check if table +impl OutputType { + pub fn is_table(&self) -> bool { + *self == OutputType::table + } +} + +// ----------------------------------- +// Table Handler Trait +// ----------------------------------- + +pub trait TableOutputHandler { + fn header(&self) -> Row; + fn content(&self) -> Vec; + fn errors(&self) -> Vec; + + // display errors one at a time + fn display_errors(&self) { + if self.errors().len() > 0 { + for error in self.errors() { + println!("{}", error); + } + println!("-------------"); + } + } + + /// convert result to table output and print to screen + fn display_table(&self, indent: bool) { + let header = self.header(); + let content = self.content(); + + // if table is empty, return + if content.len() == 0 { + return; + } + + // Create the table + let mut table = Table::new(); + let mut format = format::consts::FORMAT_CLEAN.clone(); + let pad_left = if indent { 5 } else { 1 }; + format.padding(pad_left, 1); + table.set_format(format); + + // add header + table.set_titles(header); + + // add rows + for row in content { + table.add_row(row); + } + + // print table to stdout + table.printstd(); + } +} + +// ----------------------------------- +// Key/Value Handler Trait +// ----------------------------------- + +pub trait KeyValOutputHandler { + fn key_vals(&self) -> Vec<(String, Option)>; + + /// convert result to table output and print to screen + fn display_keyvals(&self) { + let key_vals = self.key_vals(); + + // Create the table + let mut table = Table::new(); + table.set_format(*format::consts::FORMAT_CLEAN); + + for (key, val_opt) in key_vals { + if let Some(val) = val_opt { + table.add_row(row!(key, ":".to_owned(), val)); + } else { + table.add_row(row!(key)); + } + } + + // print table to stdout + table.printstd(); + } +} + +// ----------------------------------- +// Encoder Handler Trait +// ----------------------------------- + +pub trait EncoderOutputHandler { + type DataType: Serialize; + + /// references data type + fn data(&self) -> &Self::DataType; + + /// output data based on output type + fn display_encoding(&self, output_type: &OutputType) -> Result<(), CliError> { + match output_type { + OutputType::yaml => self.to_yaml(), + OutputType::json => self.to_json(), + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "unknown encoding type", + ))), + } + } + + /// convert result to json format and print to screen + fn to_json(&self) -> Result<(), CliError> + where + Self::DataType: Serialize, + { + let data: &Self::DataType = self.data(); + let serialized = serde_json::to_string_pretty(data).unwrap(); + + println!(" {}", serialized); + + Ok(()) + } + + /// convert result to yaml format and print to screen + fn to_yaml(&self) -> Result<(), CliError> + where + Self::DataType: Serialize, + { + let data: &Self::DataType = self.data(); + let serialized = serde_yaml::to_string(data).unwrap(); + + println!("{}", serialized); + + Ok(()) + } +} diff --git a/cli/src/common/sc/leader_for_topic.rs b/cli/src/common/sc/leader_for_topic.rs new file mode 100644 index 0000000000..0ac3de7e79 --- /dev/null +++ b/cli/src/common/sc/leader_for_topic.rs @@ -0,0 +1,101 @@ +//! +//! # Fluvio SC - Leader for Topic +//! +//! Given Fluvio Streaming Controller address, find all SPU replicas for a topic /partition. +//! It is reponsibility of the caller to compute the Leader. +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use types::socket_helpers::host_port_to_socket_addr; + +use sc_api::versions::ApiVersions; +use sc_api::errors::FlvErrorCode; + +use crate::common::Connection; +use crate::error::CliError; +use crate::common::sc::sc_get_topic_composition; + +/// Find address of the SPU leader for a topic/partition +pub async fn find_spu_leader_for_topic_partition<'a>( + conn: &'a mut Connection, + topic: String, + partition: i32, + versions: &'a ApiVersions, +) -> Result { + let topic_comp_resp = sc_get_topic_composition(conn, topic.clone(), versions).await?; + let topics_resp = &topic_comp_resp.topics; + let spus_resp = &topic_comp_resp.spus; + + // there must be one topic in reply + if topics_resp.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition, expected 1 topic, found {}", + topics_resp.len() + ), + ))); + } + + // check for errors + let topic_resp = &topics_resp[0]; + if topic_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition topic error: {}", + topic_resp.error_code.to_sentence() + ), + ))); + } + // lookup leader + for partition_resp in &topic_resp.partitions { + if partition_resp.partition_idx == partition { + // check for errors + if partition_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition partition error: {}", + topic_resp.error_code.to_sentence() + ), + ))); + } + + // traverse spus and find leader + let leader_id = partition_resp.leader_id; + for spu_resp in spus_resp { + if spu_resp.spu_id == leader_id { + // check for errors + if spu_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition spu error: {}", + topic_resp.error_code.to_sentence() + ), + ))); + } + + let host = &spu_resp.host; + let port = &spu_resp.port; + + trace!("spu {}/{}: is leader", host, port); + return host_port_to_socket_addr(host, *port).map_err(|err| err.into()); + } + } + } + } + + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "topic-composition '{}/{}': unknown topic or partition", + topic, partition + ), + ))) +} diff --git a/cli/src/common/sc/mod.rs b/cli/src/common/sc/mod.rs new file mode 100644 index 0000000000..e2f32681b3 --- /dev/null +++ b/cli/src/common/sc/mod.rs @@ -0,0 +1,13 @@ +mod leader_for_topic; +mod response_hdlr; +mod query_composition; +mod query_api_versions; + +pub use self::leader_for_topic::find_spu_leader_for_topic_partition; + +pub use self::query_composition::sc_get_topic_composition; + +pub use self::response_hdlr::handle_sc_response; + +pub use self::query_api_versions::sc_get_api_versions; +pub use self::query_api_versions::sc_lookup_version; diff --git a/cli/src/common/sc/query_api_versions.rs b/cli/src/common/sc/query_api_versions.rs new file mode 100644 index 0000000000..529b29f18d --- /dev/null +++ b/cli/src/common/sc/query_api_versions.rs @@ -0,0 +1,30 @@ +//! +//! # SC -- Query API Versions +//! +//! Communicates with SC Controller to retrieve version information +//! + +use sc_api::versions::{ApiVersions, ApiVersionsRequest}; +use sc_api::apis::ScApiKey; + +use crate::error::CliError; +use crate::common::Connection; + +/// Query for API versions +pub async fn sc_get_api_versions(conn: &mut Connection) -> Result { + // Version is None, as we want API to request max_version. + let response = conn + .send_request(ApiVersionsRequest::default(), None) + .await?; + Ok(response.api_keys) +} + +/// Given an API key, it returns max_version. None if not found +pub fn sc_lookup_version(api_key: ScApiKey, versions: &ApiVersions) -> Option { + for version in versions { + if version.api_key == api_key as i16 { + return Some(version.max_version); + } + } + None +} diff --git a/cli/src/common/sc/query_composition.rs b/cli/src/common/sc/query_composition.rs new file mode 100644 index 0000000000..1bd658b7c5 --- /dev/null +++ b/cli/src/common/sc/query_composition.rs @@ -0,0 +1,43 @@ +//! +//! # Kafka -- Query Topic Composition +//! +//! Query topic composition including replicas and SPUs +//! +use log::trace; + +use sc_api::apis::ScApiKey; + +use sc_api::topic::{FlvTopicCompositionRequest, FlvTopicCompositionResponse}; +use sc_api::versions::ApiVersions; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_lookup_version; + +/// Connect to server, get version, and for topic composition: Replicas and SPUs +pub async fn sc_get_topic_composition<'a>( + conn: &'a mut Connection, + topic: String, + versions: &'a ApiVersions, +) -> Result { + let mut request = FlvTopicCompositionRequest::default(); + let version = sc_lookup_version(ScApiKey::FlvTopicComposition, &versions); + + request.topic_names = vec![topic]; + + trace!( + "topic composition req '{}': {:#?}", + conn.server_addr(), + request + ); + + let response = conn.send_request(request, version).await?; + + trace!( + "topic composition res '{}': {:#?}", + conn.server_addr(), + response + ); + + Ok(response) +} diff --git a/cli/src/common/sc/response_hdlr.rs b/cli/src/common/sc/response_hdlr.rs new file mode 100644 index 0000000000..e24a5b5b4a --- /dev/null +++ b/cli/src/common/sc/response_hdlr.rs @@ -0,0 +1,51 @@ +//! +//! # Fluvio Streaming Controller - Response handlers +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use sc_api::errors::FlvErrorCode; + +use crate::error::CliError; + +/// Handler for SC reponse codes (successful messages have Error code of None) +pub fn handle_sc_response( + name: &String, + label: &'static str, + operation: &'static str, + prepend_validation: &'static str, + error_code: &FlvErrorCode, + error_msg: &Option, +) -> Result { + match error_code { + // success + FlvErrorCode::None => { + if let Some(ref msg) = error_msg { + Ok(format!( + "{}{} '{}' {} successfully, {}", + prepend_validation, label, name, operation, msg + )) + } else { + Ok(format!( + "{}{} '{}' {} successfully", + prepend_validation, label, name, operation + )) + } + } + + // error + _ => { + let err_msg = if let Some(err_msg) = error_msg { + err_msg.clone() + } else { + format!("{} '{}' {}", label, name, error_code.to_sentence()) + }; + + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("{}{}", prepend_validation, err_msg), + ))) + + } + } +} \ No newline at end of file diff --git a/cli/src/common/send_request.rs b/cli/src/common/send_request.rs new file mode 100644 index 0000000000..79c38142ce --- /dev/null +++ b/cli/src/common/send_request.rs @@ -0,0 +1,22 @@ +//! +//! # Send Request to a Server +//! +use std::net::SocketAddr; + +use kf_protocol::api::Request; + +use crate::error::CliError; +use crate::common::Connection; + +/// Create connection, send request and return response +pub async fn connect_and_send_request( + server_addr: SocketAddr, + request: R, + version: Option, +) -> Result +where + R: Request, +{ + let mut conn = Connection::new(&server_addr).await?; + conn.send_request(request, version).await +} diff --git a/cli/src/common/spu/mod.rs b/cli/src/common/spu/mod.rs new file mode 100644 index 0000000000..541cea792e --- /dev/null +++ b/cli/src/common/spu/mod.rs @@ -0,0 +1,4 @@ +mod query_api_versions; + +pub use self::query_api_versions::spu_get_api_versions; +pub use self::query_api_versions::spu_lookup_version; diff --git a/cli/src/common/spu/query_api_versions.rs b/cli/src/common/spu/query_api_versions.rs new file mode 100644 index 0000000000..ca716ec3f2 --- /dev/null +++ b/cli/src/common/spu/query_api_versions.rs @@ -0,0 +1,30 @@ +//! +//! # SPU -- Query API Versions +//! +//! Communicates with Streaming Processing Unit to retrieve version information +//! + +use spu_api::versions::{ApiVersions, ApiVersionsRequest}; +use spu_api::SpuApiKey; + +use crate::error::CliError; +use crate::common::Connection; + +/// Query for API versions +pub async fn spu_get_api_versions(conn: &mut Connection) -> Result { + // Version is None, as we want API to request max_version. + let response = conn + .send_request(ApiVersionsRequest::default(), None) + .await?; + Ok(response.api_keys) +} + +/// Given an API key, it returns max_version. None if not found +pub fn spu_lookup_version(api_key: SpuApiKey, versions: &ApiVersions) -> Option { + for version in versions { + if version.api_key == api_key as i16 { + return Some(version.max_version); + } + } + None +} diff --git a/cli/src/consume/cli.rs b/cli/src/consume/cli.rs new file mode 100644 index 0000000000..4b0bc62947 --- /dev/null +++ b/cli/src/consume/cli.rs @@ -0,0 +1,186 @@ +//! +//! # Consume CLI +//! +//! CLI command for Consume operation +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use kf_protocol::api::Offset; +use kf_protocol::api::MAX_BYTES; + +use crate::error::CliError; +use crate::common::ConsumeOutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::ReponseLogParams; + +use super::sc_consume_log_from_topic; +use super::sc_consume_log_from_topic_partition; +use super::spu_consume_log_from_topic_partition; +use super::kf_consume_log_from_topic; +use super::kf_consume_log_from_topic_partition; + +#[derive(Debug, StructOpt)] +pub struct ConsumeLogOpt { + /// Topic name + #[structopt(short = "t", long = "topic", value_name = "string")] + pub topic: String, + + /// Partition id + #[structopt(short = "p", long = "partition", value_name = "integer")] + pub partition: Option, + + /// Start reading from this offset + #[structopt(short = "g", long = "from-beginning")] + pub from_beginning: bool, + + /// Read messages in a infinite loop + #[structopt(short = "C", long = "continuous")] + pub continuous: bool, + + /// Maximum number of bytes to be retrieved + #[structopt(short = "b", long = "maxbytes", value_name = "integer")] + pub max_bytes: Option, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + pub sc: Option, + + /// Address of Streaming Processing Unit + #[structopt( + short = "u", + long = "spu", + value_name = "host:port", + conflicts_with = "sc" + )] + pub spu: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc", + conflicts_with = "spu" + )] + pub kf: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Suppress items items that have an unknown output type + #[structopt(short = "s", long = "suppress-unknown")] + pub suppress_unknown: bool, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw( + possible_values = "&ConsumeOutputType::variants()", + case_insensitive = "true" + ) + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +/// Consume log configuration parameters +#[derive(Debug)] +pub struct ConsumeLogConfig { + pub topic: String, + pub partition: Option, + pub from_beginning: bool, + pub continous: bool, + pub offset: Offset, + pub max_bytes: i32, + + pub output: ConsumeOutputType, + pub suppress_unknown: bool, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process Consume log cli request +pub fn process_consume_log(opt: ConsumeLogOpt) -> Result<(), CliError> { + let (target_server, consume_log_cfg) = parse_opt(opt)?; + + // setup response formatting + let response_params = ReponseLogParams { + output: consume_log_cfg.output.clone(), + suppress: consume_log_cfg.suppress_unknown, + }; + + if let Some(partition) = consume_log_cfg.partition { + // consume one topic/partition + match target_server { + TargetServer::Sc(server_addr) => sc_consume_log_from_topic_partition( + server_addr, + consume_log_cfg, + partition, + response_params, + ), + TargetServer::Spu(server_addr) => spu_consume_log_from_topic_partition( + server_addr, + consume_log_cfg, + partition, + response_params, + ), + TargetServer::Kf(server_addr) => kf_consume_log_from_topic_partition( + server_addr, + consume_log_cfg, + partition, + response_params, + ), + } + } else { + // consume one topic/all partitions + match target_server { + TargetServer::Sc(server_addr) => { + sc_consume_log_from_topic(server_addr, consume_log_cfg, response_params) + } + TargetServer::Kf(server_addr) => { + kf_consume_log_from_topic(server_addr, consume_log_cfg, response_params) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("Partition index is required for reading logs form SPU."), + ))), + } + } +} + +/// Validate cli options. Generate target-server and consume log configuration. +fn parse_opt(opt: ConsumeLogOpt) -> Result<(TargetServer, ConsumeLogConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new_with_spu(&opt.sc, &opt.spu, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + let max_bytes = opt.max_bytes.unwrap_or(MAX_BYTES); + + // consume log specific configurations + let consume_log_cfg = ConsumeLogConfig { + topic: opt.topic, + partition: opt.partition, + from_beginning: opt.from_beginning, + continous: opt.continuous, + offset: -1, + max_bytes: max_bytes, + + output: opt.output.unwrap_or(ConsumeOutputType::default()), + suppress_unknown: opt.suppress_unknown, + }; + + // return server separately from config + Ok((target_server, consume_log_cfg)) +} diff --git a/cli/src/consume/flv/mod.rs b/cli/src/consume/flv/mod.rs new file mode 100644 index 0000000000..999e5ce3ac --- /dev/null +++ b/cli/src/consume/flv/mod.rs @@ -0,0 +1,9 @@ +mod sc_fetch_topic_all; +mod sc_fetch_topic_part; +mod spu_fetch_topic_part; +mod spu_fetch_log_loop; +mod query; + +pub use sc_fetch_topic_part::sc_consume_log_from_topic_partition; +pub use sc_fetch_topic_all::sc_consume_log_from_topic; +pub use spu_fetch_topic_part::spu_consume_log_from_topic_partition; diff --git a/cli/src/consume/flv/query/fetch_local_spu.rs b/cli/src/consume/flv/query/fetch_local_spu.rs new file mode 100644 index 0000000000..e96dd1f147 --- /dev/null +++ b/cli/src/consume/flv/query/fetch_local_spu.rs @@ -0,0 +1,35 @@ +//! +//! # Fluvio -- Fetch Local SPU +//! +//! Communicates with SPU to fetch local parameters +//! +use log::trace; + +use spu_api::spus::{FlvFetchLocalSpuRequest, FlvFetchLocalSpuResponse}; +use spu_api::versions::ApiVersions; +use spu_api::SpuApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::spu_lookup_version; + +// Query SPU Replica leader for offsets. +pub async fn spu_fetch_local_spu<'a>( + conn: &'a mut Connection, + versions: &'a ApiVersions, +) -> Result { + let request = FlvFetchLocalSpuRequest::default(); + let version = spu_lookup_version(SpuApiKey::FlvFetchLocalSpu, versions); + + trace!( + "fetch local-spu req '{}': {:#?}", + conn.server_addr(), + request + ); + + let response = conn.send_request(request, version).await?; + + trace!("fetch local-spu '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/flv/query/fetch_offsets.rs b/cli/src/consume/flv/query/fetch_offsets.rs new file mode 100644 index 0000000000..822be0bd93 --- /dev/null +++ b/cli/src/consume/flv/query/fetch_offsets.rs @@ -0,0 +1,55 @@ +//! +//! # Fluvio -- Fetch Offsets form SPU Leader +//! +//! Communicates with SPU Replica Leader to Fetch Offsets for topic/partitions +//! +use log::trace; + +use spu_api::offsets::{FlvFetchOffsetsRequest, FlvFetchOffsetsResponse}; +use spu_api::offsets::FetchOffsetTopic; +use spu_api::offsets::FetchOffsetPartition; +use spu_api::versions::ApiVersions; +use spu_api::SpuApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::spu_lookup_version; + +use super::query_params::FlvLeaderParam; + +// Query SPU Replica leader for offsets. +pub async fn spu_fetch_offsets<'a>( + conn: &'a mut Connection, + topic_name: &'a String, + leader: &'a FlvLeaderParam, + versions: &'a ApiVersions, +) -> Result { + let mut request = FlvFetchOffsetsRequest::default(); + let version = spu_lookup_version(SpuApiKey::FlvFetchOffsets, versions); + + // collect partition index & epoch information from leader + let mut offset_partitions: Vec = vec![]; + for partition in &leader.partitions { + offset_partitions.push(FetchOffsetPartition { + partition_index: partition.partition_idx, + }); + } + + // update request + request.topics = vec![FetchOffsetTopic { + name: topic_name.clone(), + partitions: offset_partitions, + }]; + + trace!("fetch-offsets req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!( + "fetch-offsets res '{}': {:#?}", + conn.server_addr(), + response + ); + + Ok(response) +} diff --git a/cli/src/consume/flv/query/log_fetch.rs b/cli/src/consume/flv/query/log_fetch.rs new file mode 100644 index 0000000000..488f03fc02 --- /dev/null +++ b/cli/src/consume/flv/query/log_fetch.rs @@ -0,0 +1,63 @@ +//! +//! # Spu Fetch Logs +//! +//! Connects to server and fetches logs +//! + +use log::{debug, trace}; + +use kf_protocol::message::fetch::{DefaultKfFetchRequest, DefaultKfFetchResponse}; +use kf_protocol::message::fetch::FetchPartition; +use kf_protocol::message::fetch::{KfFetchRequest, FetchableTopic}; +use kf_protocol::api::Isolation; + +use crate::error::CliError; +use crate::common::Connection; + +use super::query_params::FlvFetchLogsParam; + +/// Fetch log records from a target server +pub async fn spu_fetch_logs<'a>( + conn: &'a mut Connection, + version: Option, + fetch_log_param: &'a FlvFetchLogsParam, +) -> Result { + let mut fetch_partitions = vec![]; + for partition_param in &fetch_log_param.partitions { + let mut fetch_part = FetchPartition::default(); + fetch_part.partition_index = partition_param.partition_idx; + fetch_part.current_leader_epoch = -1; + fetch_part.fetch_offset = partition_param.offset; + fetch_part.log_start_offset = -1; + fetch_part.max_bytes = fetch_log_param.max_bytes; + + fetch_partitions.push(fetch_part); + } + + let mut topic_request = FetchableTopic::default(); + topic_request.name = fetch_log_param.topic.clone(); + topic_request.fetch_partitions = fetch_partitions; + + let mut request: DefaultKfFetchRequest = KfFetchRequest::default(); + request.replica_id = -1; + request.max_wait = 500; + request.min_bytes = 1; + request.max_bytes = fetch_log_param.max_bytes; + request.isolation_level = Isolation::ReadCommitted; + request.session_id = 0; + request.epoch = -1; + request.topics.push(topic_request); + + debug!( + "fetch logs '{}' ({}) partition to {}", + fetch_log_param.topic, + fetch_log_param.partitions.len(), + conn.server_addr() + ); + trace!("fetch logs req {:#?}", request); + + let response = conn.send_request(request, version).await?; + + trace!("fetch logs res: {:#?}", response); + Ok(response) +} diff --git a/cli/src/consume/flv/query/mod.rs b/cli/src/consume/flv/query/mod.rs new file mode 100644 index 0000000000..203cb79043 --- /dev/null +++ b/cli/src/consume/flv/query/mod.rs @@ -0,0 +1,13 @@ +mod query_params; +mod fetch_offsets; +mod fetch_local_spu; +mod log_fetch; + +pub use fetch_offsets::spu_fetch_offsets; +pub use fetch_local_spu::spu_fetch_local_spu; +pub use log_fetch::spu_fetch_logs; + +pub use query_params::FlvFetchLogsParam; +pub use query_params::FlvTopicPartitionParam; +pub use query_params::FlvLeaderParam; +pub use query_params::FlvPartitionParam; diff --git a/cli/src/consume/flv/query/query_params.rs b/cli/src/consume/flv/query/query_params.rs new file mode 100644 index 0000000000..4ea7f6f9ee --- /dev/null +++ b/cli/src/consume/flv/query/query_params.rs @@ -0,0 +1,41 @@ +//! +//! # Fluvio -- Topic/Partition Parameters +//! +//! Intermediate structure to collect metadata information +//! +use std::net::SocketAddr; + +use kf_protocol::api::Offset; + +/// Fetch Logs parameters +#[derive(Debug)] +pub struct FlvFetchLogsParam { + pub topic: String, + pub max_bytes: i32, + + pub partitions: Vec, +} + +/// Topic/Partition parameters +#[derive(Debug, Clone, PartialEq)] +pub struct FlvTopicPartitionParam { + pub topic_name: String, + + pub leaders: Vec, +} + +/// Replica Leader parameters +#[derive(Debug, Clone, PartialEq)] +pub struct FlvLeaderParam { + pub leader_id: i32, + pub server_addr: SocketAddr, + + pub partitions: Vec, +} + +/// Partition parameters +#[derive(Debug, Clone, PartialEq)] +pub struct FlvPartitionParam { + pub partition_idx: i32, + pub offset: Offset, +} diff --git a/cli/src/consume/flv/sc_fetch_topic_all.rs b/cli/src/consume/flv/sc_fetch_topic_all.rs new file mode 100644 index 0000000000..613edea8bc --- /dev/null +++ b/cli/src/consume/flv/sc_fetch_topic_all.rs @@ -0,0 +1,364 @@ +//! +//! # Fluvio SC - Fetch logs from Topic and all Partitions +//! +//! Connect to Fluvio Streaming Controller, look-up SPU leaders for all topic/partitions, +//! connect to all SPU leaders and fetch logs continuously +//! +//! ## Conection 1 - Connect SC: +//! * APIVersions +//! * TopicComposition +//! +//! ## Connection 2 - Connect to each topic/partition leader SPU +//! * APIVersions +//! * FetchOffsets +//! * FetchLogs - continuously fetch logs (10 ms) +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use ctrlc; +use log::debug; +use types::socket_helpers::host_port_to_socket_addr; + +use sc_api::topic::FlvTopicCompositionResponse; +use sc_api::errors::FlvErrorCode; + +use futures::channel::mpsc; +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; +use futures::future::join_all; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_get_topic_composition; + +use crate::consume::ConsumeLogConfig; +use crate::consume::ReponseLogParams; + +use super::query::FlvTopicPartitionParam; +use super::query::FlvLeaderParam; +use super::query::FlvPartitionParam; + +use super::spu_fetch_log_loop::spu_fetch_log_loop; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +/// Generate future to consume logs from topic +pub fn sc_consume_log_from_topic( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + response_paramss: ReponseLogParams, +) -> Result<(), CliError> { + run_block_on(process_consume_log_from_topic_all( + server_addr, + cfg, + response_paramss, + )) +} + +/// Processing engine to consume logs one topic & and multiple partitions. +/// Step 1: Collection system information +/// * Lookup API versions, +/// * Request TopicComposition +/// Step 2: Create loop for continous log fetch +async fn process_consume_log_from_topic_all( + sc_addr: SocketAddr, + cfg: ConsumeLogConfig, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut sc_conn = Connection::new(&sc_addr).await?; + let sc_vers = sc_get_api_versions(&mut sc_conn).await?; + debug!("consume topic '{}'", cfg.topic); + + // query topic composition + let topic = &cfg.topic; + let topic_comp_res = sc_get_topic_composition(&mut sc_conn, topic.clone(), &sc_vers).await?; + let tp_params = composition_to_all_topic_partition_params(&topic_comp_res)?; + + // Generate futures for group heartbeat and fetch logs + fetch_log_futures( + cfg.max_bytes, + cfg.from_beginning, + cfg.continous, + tp_params, + response_params, + ) + .await +} + +/// Generate futures for to fetch-logs from all SPUs +async fn fetch_log_futures( + max_bytes: i32, + from_beginning: bool, + continous: bool, + tp_param: FlvTopicPartitionParam, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut send_channels = vec![]; + let mut fetch_log_futures = vec![]; + + // group fetch channels + for leader in &tp_param.leaders { + let (sender, receiver) = mpsc::channel::(5); + let topic_name = tp_param.topic_name.clone(); + send_channels.push(sender); + fetch_log_futures.push(spu_fetch_log_loop( + topic_name, + max_bytes, + from_beginning, + continous, + leader.clone(), + response_params.clone(), + receiver, + )); + } + + // attach all send channels to Ctrl-C event handler + // - all futures will exit exit on Ctrl-C event + if let Err(err) = ctrlc::set_handler(move || { + debug!(" received"); + send_ctrlc_signal(send_channels.clone()); + }) { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("CTRL-C handler: {}", err), + ))); + } + + // spin-off all futures + join_all(fetch_log_futures).await; + + Ok(()) +} + +// ----------------------------------- +// Event Processing +// ----------------------------------- + +/// Send CTRL c signal to all channels in send array +fn send_ctrlc_signal(send_channels: Vec>) { + let _ = run_block_on(async move { + for mut send_channel in send_channels { + send_channel.send(true).await.expect("should not fail"); + } + Ok(()) as Result<(), ()> + }); +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Parse topic composition and generate Topic/Partition parameters +fn composition_to_all_topic_partition_params( + topic_comp_resp: &FlvTopicCompositionResponse, +) -> Result { + let topics_resp = &topic_comp_resp.topics; + + // there must be one topic in reply + if topics_resp.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 topic, found {}", topics_resp.len()), + ))); + } + + // check for errors + let topic_resp = &topics_resp[0]; + let topic_name = &topic_resp.name; + if topic_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition topic '{}' error: {}", + topic_name, + topic_resp.error_code.to_sentence() + ), + ))); + } + + // generate topic/partition parameter object + let mut tp_param = FlvTopicPartitionParam { + topic_name: topic_name.clone(), + leaders: vec![], + }; + + // find partition + for partition_resp in &topic_resp.partitions { + // ensure valid partition + if partition_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition '{}/{}': {}", + topic_name, + partition_resp.partition_idx, + partition_resp.error_code.to_sentence() + ), + ))); + } + + // find leader for this partition + let mut leader: Option<&mut FlvLeaderParam> = None; + for leader_param in &mut tp_param.leaders { + if leader_param.leader_id == partition_resp.leader_id { + leader = Some(leader_param); + break; + } + } + + // generate leader + if leader.is_none() { + for spu_resp in &topic_comp_resp.spus { + if spu_resp.spu_id == partition_resp.leader_id { + let host = &spu_resp.host; + let port = &spu_resp.port; + + debug!("resolve {}:{}", spu_resp.host, spu_resp.port); + let server_addr = host_port_to_socket_addr(host, *port) + .map_err(|err| { + CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("cannot resolve '{}:{}': {}", host, port, err), + )) + }) + .unwrap(); + + tp_param.leaders.push(FlvLeaderParam { + leader_id: spu_resp.spu_id, + server_addr: server_addr, + partitions: vec![], + }); + + let len = tp_param.leaders.len(); + leader = Some(&mut tp_param.leaders[len - 1]); + break; + } + } + } + + if let Some(leader) = leader { + // add partition to leader + leader.partitions.push(FlvPartitionParam { + partition_idx: partition_resp.partition_idx, + offset: -1, + }); + } else { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "invalid topic composition", + ))); + }; + } + + // there must be at least one topic generated + if tp_param.leaders.len() == 0 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "error generating topic/partitions", + ))); + } + + debug!("topic-partition parameters {:#?}", tp_param); + + Ok(tp_param) +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + use std::net::{IpAddr, Ipv4Addr}; + + use sc_api::topic::FetchTopicReponse; + use sc_api::topic::FetchPartitionResponse; + use sc_api::topic::FetchSpuReponse; + + /// create metadata + fn create_topic_composition_response(leader_ids: Vec) -> FlvTopicCompositionResponse { + let mut fetch_partitions: Vec = vec![]; + for (i, leader_id) in leader_ids.iter().enumerate() { + fetch_partitions.push(FetchPartitionResponse { + error_code: FlvErrorCode::None, + partition_idx: i as i32, + leader_id: *leader_id, + replicas: vec![2, 3], + live_replicas: vec![3, 2], + }); + } + + FlvTopicCompositionResponse { + spus: vec![ + FetchSpuReponse { + error_code: FlvErrorCode::None, + spu_id: 2, + host: "10.0.0.23".to_owned(), + port: 9093, + }, + FetchSpuReponse { + error_code: FlvErrorCode::None, + spu_id: 3, + host: "10.0.0.23".to_owned(), + port: 9094, + }, + FetchSpuReponse { + error_code: FlvErrorCode::None, + spu_id: 1, + host: "10.0.0.23".to_owned(), + port: 9092, + }, + ], + topics: vec![FetchTopicReponse { + error_code: FlvErrorCode::None, + name: "test2".to_owned(), + partitions: fetch_partitions, + }], + } + } + + #[test] + fn test_composition_to_all_topic_partition_params() { + let topic_comp = create_topic_composition_response(vec![3, 2, 3]); + let result = composition_to_all_topic_partition_params(&topic_comp); + + let expected_result = FlvTopicPartitionParam { + topic_name: "test2".to_owned(), + leaders: vec![ + FlvLeaderParam { + leader_id: 3, + server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 23)), 9094), + partitions: vec![ + FlvPartitionParam { + partition_idx: 0, + offset: -1, + }, + FlvPartitionParam { + partition_idx: 2, + offset: -1, + }, + ], + }, + FlvLeaderParam { + leader_id: 2, + server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 23)), 9093), + partitions: vec![FlvPartitionParam { + partition_idx: 1, + offset: -1, + }], + }, + ], + }; + + println!("found: {:#?}\nexpected: {:#?}", result, expected_result); + assert_eq!(result.unwrap(), expected_result); + } +} diff --git a/cli/src/consume/flv/sc_fetch_topic_part.rs b/cli/src/consume/flv/sc_fetch_topic_part.rs new file mode 100644 index 0000000000..924ea3fa96 --- /dev/null +++ b/cli/src/consume/flv/sc_fetch_topic_part.rs @@ -0,0 +1,250 @@ +//! +//! # Fluvio SC - Fetch logs from Topic / Partition +//! +//! Connect to Fluvio Streaming Controller, identify leading SPU and fetch logs. +//! +//! ## Conection 1 - Connect SC: +//! * APIVersions +//! * TopicComposition +//! +//! ## Connection 2 - Connect to topic/partition SPU leader +//! * APIVersions +//! * FetchOffsets +//! * FetchLogs - continuously fetch logs (10 ms) +//! + +use std::io::ErrorKind; +use std::io::Error as IoError; +use std::net::SocketAddr; + +use ctrlc; +use log::debug; +use types::socket_helpers::host_port_to_socket_addr; + +use sc_api::topic::FlvTopicCompositionResponse; +use sc_api::errors::FlvErrorCode; + +use futures::channel::mpsc; +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_get_topic_composition; + +use crate::consume::ConsumeLogConfig; +use crate::consume::ReponseLogParams; + +use super::query::FlvTopicPartitionParam; +use super::query::FlvLeaderParam; +use super::query::FlvPartitionParam; + +use super::spu_fetch_log_loop::spu_fetch_log_loop; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Create execution block to consume log messages +pub fn sc_consume_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + run_block_on(process_log_from_topic_partition( + server_addr, + cfg, + partition, + response_params, + )) +} + +/// Processing engine to consume logs from one topic & partition. +/// Step 1: Collection system information +/// * Lookup API versions, +/// * Request TopicComposition +/// Step 2: Create loop for continous log fetch +async fn process_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut sc_conn = Connection::new(&server_addr).await?; + let sc_vers = sc_get_api_versions(&mut sc_conn).await?; + debug!("consume topic '{}'", cfg.topic); + + // query topic composition + let topic = &cfg.topic; + let topic_comp_res = sc_get_topic_composition(&mut sc_conn, topic.clone(), &sc_vers).await?; + let tp_params = composition_to_topic_partition_params(&topic_comp_res, topic, partition)?; + + // Generate future for continuous fetch-log + fetch_log_future( + cfg.max_bytes, + cfg.from_beginning, + cfg.continous, + tp_params, + response_params, + ) + .await +} + +/// Generate futures for fetch-log and link with CTRL-C +async fn fetch_log_future( + max_bytes: i32, + from_beginning: bool, + continous: bool, + tp_param: FlvTopicPartitionParam, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + + debug!("fetch log future"); + // ensure noly 1 leader + if tp_param.leaders.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 leader, found {}", tp_param.leaders.len()), + ))); + } + let leader = &tp_param.leaders[0]; + let topic_name = tp_param.topic_name.clone(); + + // fetch-log channel + let (sender, receiver) = mpsc::channel::(5); + + // attach sender to Ctrl-C event handler + if let Err(err) = ctrlc::set_handler(move || { + debug!(" received"); + send_ctrlc_signal(sender.clone()); + }) { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("CTRL-C handler: {}", err), + ))); + } + + // spin-off fetch log future + spu_fetch_log_loop( + topic_name, + max_bytes, + from_beginning, + continous, + leader.clone(), + response_params.clone(), + receiver, + ) + .await +} + +/// Send CTRL c signal to all channels in send array +fn send_ctrlc_signal(mut sender: Sender) { + let _ = run_block_on(async move { + sender.send(true).await.expect("should not fail"); + Ok(()) as Result<(), ()> + }); +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Parse topic composition and generate Topic/Partition parameters +fn composition_to_topic_partition_params( + topic_comp_resp: &FlvTopicCompositionResponse, + topic_name: &String, + partition: i32, +) -> Result { + + debug!("composition to topic partion param"); + let topics_resp = &topic_comp_resp.topics; + + // there must be one topic in reply + if topics_resp.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 topic, found {}", topics_resp.len()), + ))); + } + + // check for errors + let topic_resp = &topics_resp[0]; + if topic_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic-composition topic '{}' error: {}", + topic_name, + topic_resp.error_code.to_sentence() + ), + ))); + } + + // generate topic/partition parameter object + let mut tp_param = FlvTopicPartitionParam { + topic_name: topic_name.clone(), + leaders: vec![], + }; + + // find partition + for partition_resp in &topic_resp.partitions { + if partition_resp.partition_idx == partition { + // ensure valid partition + if partition_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition '{}/{}': {}", + topic_name, + partition, + partition_resp.error_code.to_sentence() + ), + ))); + } + + // generate leader + for spu_resp in &topic_comp_resp.spus { + if spu_resp.spu_id == partition_resp.leader_id { + let host = &spu_resp.host; + let port = &spu_resp.port; + + debug!("resolve {}:{}", spu_resp.host, spu_resp.port); + let server_addr = host_port_to_socket_addr(host, *port) + .map_err(|err| { + CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("cannot resolve '{}:{}': {}", host, port, err), + )) + }) + .unwrap(); + + tp_param.leaders.push(FlvLeaderParam { + leader_id: spu_resp.spu_id, + server_addr: server_addr, + partitions: vec![FlvPartitionParam { + partition_idx: partition_resp.partition_idx, + offset: -1, + }], + }); + + break; + } + } + } + } + + // there must be at least one topic generated + if tp_param.leaders.len() == 0 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "error generating topic/partitions", + ))); + } + + debug!("topic-partition parameters {:#?}", tp_param); + + Ok(tp_param) +} diff --git a/cli/src/consume/flv/sc_topic_partition.rs b/cli/src/consume/flv/sc_topic_partition.rs new file mode 100644 index 0000000000..fcd4a8a9e8 --- /dev/null +++ b/cli/src/consume/flv/sc_topic_partition.rs @@ -0,0 +1,58 @@ +//! +//! # Fluvio SC Consume Log +//! +//! Looks-up the SPU responsible for the topic/partition, connect to the server +//! and read logs. +//! + +use std::net::SocketAddr; + +use future_helper::run_block_on; +use kf_protocol::message::fetch::DefaultKfFetchResponse; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::find_spu_leader_for_topic_partition; +use crate::common::sc_get_api_versions; + +use super::{FetchLogParams, ReponseLogParams}; + +use super::logs_fetch::fetch_log_msgs; +use super::logs_output::process_fetch_topic_reponse; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +/// Connect to Fluvio Streaming Controller to look-up SPU and read log +pub fn sc_consume_log_from_topic_partition( + server_addr: SocketAddr, + log_params: FetchLogParams, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let response = run_block_on(find_spu_consume_log_server(server_addr, log_params))?; + + // process logs response + process_fetch_topic_reponse(&response, &response_params) +} + +/// Connect to SC Controller, find spu, and send log +async fn find_spu_consume_log_server( + sc_addr: SocketAddr, + log_params: FetchLogParams, +) -> Result { + let mut conn = Connection::new(&sc_addr).await?; + let sc_vers = sc_get_api_versions(&mut conn).await?; + + // find spu + let spu_addr = find_spu_leader_for_topic_partition( + &mut conn, + log_params.topic.clone(), + log_params.partition, + &sc_vers, + ) + .await?; + + // fetch logs + fetch_log_msgs(spu_addr, log_params).await +} diff --git a/cli/src/consume/flv/spu_fetch_log_loop.rs b/cli/src/consume/flv/spu_fetch_log_loop.rs new file mode 100644 index 0000000000..28887a896b --- /dev/null +++ b/cli/src/consume/flv/spu_fetch_log_loop.rs @@ -0,0 +1,226 @@ +//! +//! # SPU - Fetch Log +//! +//! Fetch logs from SPU +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::time::Duration; + +use log::debug; +use log::trace; + +use spu_api::SpuApiKey; +use spu_api::offsets::FlvFetchOffsetsResponse; + +use spu_api::errors::FlvErrorCode; + +use kf_protocol::api::ErrorCode as KfErrorCode; +use kf_protocol::message::fetch::DefaultKfFetchResponse; + +use futures::channel::mpsc; +use futures::future::FutureExt; +use futures::stream::StreamExt; +use futures::select; +use future_helper::sleep; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::spu_get_api_versions; +use crate::common::spu_lookup_version; + +use super::query::FlvFetchLogsParam; +use super::query::FlvLeaderParam; +use super::query::spu_fetch_logs; +use super::query::spu_fetch_offsets; + +use crate::consume::logs_output::ReponseLogParams; +use crate::consume::process_fetch_topic_reponse; + +// ----------------------------------- +// SPU - Fetch Loop +// ----------------------------------- + +/// Fetch log continuously +pub async fn spu_fetch_log_loop( + topic_name: String, + max_bytes: i32, + from_beginning: bool, + continous: bool, + mut leader_param: FlvLeaderParam, + response_params: ReponseLogParams, + mut receiver: mpsc::Receiver, +) -> Result<(), CliError> { + let mut spu_conn = Connection::new(&leader_param.server_addr).await?; + let vers = spu_get_api_versions(&mut spu_conn).await?; + let version = spu_lookup_version(SpuApiKey::KfFetch, &vers); + + debug!("fetch loop version: {:#?}",version); + + // list offsets + let list_offsets_res = + spu_fetch_offsets(&mut spu_conn, &topic_name, &leader_param, &vers).await?; + + + let _ = update_leader_partition_offsets( + &topic_name, + &mut leader_param, + &list_offsets_res, + from_beginning, + )?; + + // initialize fetch log parameters + let mut fetch_param = FlvFetchLogsParam { + topic: topic_name.clone(), + max_bytes: max_bytes, + partitions: leader_param.partitions.clone(), + }; + + trace!("fetch param: {:#?}",fetch_param); + + let mut delay = 0; + loop { + select! { + _ = (sleep(Duration::from_millis(delay))).fuse() => { + + debug!("start fetch loop"); + // fetch logs + let fetch_logs_res = spu_fetch_logs(&mut spu_conn, version, &fetch_param).await?; + + // process logs response + let _ = process_fetch_topic_reponse(&fetch_logs_res, &response_params)?; + + // update offsets + let _ = update_fetch_log_offsets(&topic_name, &mut fetch_param, &fetch_logs_res)?; + + debug!("end fetch loop"); + + if continous { + delay = 500; + } else { + return Ok(()) + } + }, + receiver_req = receiver.next() => { + debug!("... replica {} exiting", leader_param.leader_id); + println!(""); + return Ok(()) + } + } + } +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Update leader partition offsets +fn update_leader_partition_offsets( + topic_name: &String, + leader_param: &mut FlvLeaderParam, + list_offsets_res: &FlvFetchOffsetsResponse, + from_start: bool, +) -> Result<(), CliError> { + for topic_res in &list_offsets_res.topics { + // ensure valid topic + if topic_res.name != *topic_name { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("list offsets: unknown topic '{}'", topic_name), + ))); + } + + for partition_res in &topic_res.partitions { + let partition_name = format!("{}/{}", topic_res.name, partition_res.partition_index); + + // validate partition response + if partition_res.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition offset '{}': {}", + partition_name, + partition_res.error_code.to_sentence() + ), + ))); + } + + // update leader epoch & offsets in partitions + for partition in &mut leader_param.partitions { + if partition.partition_idx == partition_res.partition_index { + if from_start { + partition.offset = partition_res.start_offset; + } else { + partition.offset = partition_res.last_stable_offset; + } + + debug!( + "list-offsets '{}' offset {}", + partition_name, partition.offset + ); + } + } + } + } + + Ok(()) +} + +/// Update fetch log offets by copying last_stabe_offset of the reponse to params offset +/// for each partition +fn update_fetch_log_offsets( + topic_name: &String, + fetch_logs_param: &mut FlvFetchLogsParam, + fetch_res: &DefaultKfFetchResponse, +) -> Result<(), CliError> { + if fetch_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("fetch: {}", fetch_res.error_code.to_sentence()), + ))); + } + + // grab last stable offsets for each partition + for topic_res in &fetch_res.topics { + // ensure valid topic + if topic_res.name != *topic_name { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("fetch: unknown topic '{}'", topic_name), + ))); + } + + for partition_res in &topic_res.partitions { + let partition_name = format!("{}/{}", topic_res.name, partition_res.partition_index); + + // validate partition response + if partition_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "fetch partition '{}': {}", + partition_name, + partition_res.error_code.to_sentence() + ), + ))); + } + + // update epoch and offsets in partitions + + for partition in &mut fetch_logs_param.partitions { + if partition.partition_idx == partition_res.partition_index + && partition.offset != partition_res.last_stable_offset + { + partition.offset = partition_res.last_stable_offset; + + debug!( + "partition '{}' - updated offset {}", + partition_name, partition.offset + ); + } + } + } + } + Ok(()) +} diff --git a/cli/src/consume/flv/spu_fetch_topic_part.rs b/cli/src/consume/flv/spu_fetch_topic_part.rs new file mode 100644 index 0000000000..1396c83f9f --- /dev/null +++ b/cli/src/consume/flv/spu_fetch_topic_part.rs @@ -0,0 +1,192 @@ +//! +//! # Fluvio SPU - Fetch logs from Topic / Partition +//! +//! Connect to Fluvio Streaming Processing Unit, fetch logs (if leader) +//! +//! ## Connect to SPU +//! * APIVersions +//! * FetchLocalSPU +//! * FetchOffsets +//! * FetchLogs - continuously fetch logs (10 ms) +//! + +use std::io::ErrorKind; +use std::io::Error as IoError; +use std::net::SocketAddr; + +use ctrlc; +use log::debug; + +use spu_api::spus::FlvFetchLocalSpuResponse; +use spu_api::errors::FlvErrorCode; + +use futures::channel::mpsc; +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::spu_get_api_versions; + +use crate::consume::ConsumeLogConfig; +use crate::consume::ReponseLogParams; + +use super::query::FlvTopicPartitionParam; +use super::query::FlvLeaderParam; +use super::query::FlvPartitionParam; +use super::query::spu_fetch_local_spu; + +use super::spu_fetch_log_loop::spu_fetch_log_loop; + +// ----------------------------------- +// Fluvio SPU - Process Request +// ----------------------------------- + +// Create execution block to consume log messages +pub fn spu_consume_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + run_block_on(process_log_from_topic_partition( + server_addr, + cfg, + partition, + response_params, + )) +} + +/// Processing engine to consume logs from one multiple topic & partition. +/// Step 1: Collection system information +/// * Lookup API versions, +/// * Fetch Local SPU +/// Step 2: Create loop for continous log fetch +async fn process_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut spu_conn = Connection::new(&server_addr).await?; + let spu_vers = spu_get_api_versions(&mut spu_conn).await?; + debug!("consume topic: {}, partition: {}", cfg.topic,partition); + + // query topic composition + let topic = &cfg.topic; + let local_spu_res = spu_fetch_local_spu(&mut spu_conn, &spu_vers).await?; + let tp_params = + local_spu_to_topic_partition_params(&local_spu_res, &server_addr, topic, partition)?; + + // Generate future for continuous fetch-log + fetch_log_future( + cfg.max_bytes, + cfg.from_beginning, + cfg.continous, + tp_params, + response_params, + ) + .await?; + + Ok(()) +} + +/// Generate futures for fetch-log and link with CTRL-C +async fn fetch_log_future( + max_bytes: i32, + from_beginning: bool, + continous: bool, + tp_param: FlvTopicPartitionParam, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + + debug!("fetch log future"); + // ensure noly 1 leader + if tp_param.leaders.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 leader, found {}", tp_param.leaders.len()), + ))); + } + let leader = &tp_param.leaders[0]; + let topic_name = tp_param.topic_name.clone(); + + // fetch-log channel + let (sender, receiver) = mpsc::channel::(5); + + // attach sender to Ctrl-C event handler + if let Err(err) = ctrlc::set_handler(move || { + debug!(" received"); + send_ctrlc_signal(sender.clone()); + }) { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("CTRL-C handler: {}", err), + ))); + } + + // spin-off fetch log future + spu_fetch_log_loop( + topic_name, + max_bytes, + from_beginning, + continous, + leader.clone(), + response_params.clone(), + receiver, + ) + .await +} + +/// Send CTRL c signal to all channels in send array +fn send_ctrlc_signal(mut sender: Sender) { + let _ = run_block_on(async move { + sender.send(true).await.expect("should not fail"); + Ok(()) as Result<(), ()> + }); +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Parse local spu response and generate Topic/Partition parameters +fn local_spu_to_topic_partition_params( + local_spu_resp: &FlvFetchLocalSpuResponse, + server_addr: &SocketAddr, + topic_name: &String, + partition: i32, +) -> Result { + + debug!("local spu to topic partition name"); + + // check for errors + if local_spu_resp.error_code != FlvErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "topic '{}' local-spu error: {}", + topic_name, + local_spu_resp.error_code.to_sentence() + ), + ))); + } + + // generate topic/partition parameter object + let tp_param = FlvTopicPartitionParam { + topic_name: topic_name.clone(), + leaders: vec![FlvLeaderParam { + leader_id: local_spu_resp.id, + server_addr: server_addr.clone(), + partitions: vec![FlvPartitionParam { + partition_idx: partition, + offset: -1, + }], + }], + }; + + debug!("topic-partition parameters {:#?}", tp_param); + + Ok(tp_param) +} diff --git a/cli/src/consume/kf/kf_fetch_log_loop.rs b/cli/src/consume/kf/kf_fetch_log_loop.rs new file mode 100644 index 0000000000..f88c9d62a1 --- /dev/null +++ b/cli/src/consume/kf/kf_fetch_log_loop.rs @@ -0,0 +1,219 @@ +//! +//! # Kafka - Fetch Log (common APIs) +//! +//! Fetch APIs that are shared between +//! * Fetch Topic All +//! * Fetch Topic/Partition +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::time::Duration; + +use log::debug; + +use kf_protocol::message::fetch::DefaultKfFetchResponse; +use kf_protocol::message::offset::KfListOffsetResponse; +use kf_protocol::api::AllKfApiKey; +use kf_protocol::api::ErrorCode as KfErrorCode; + +use futures::channel::mpsc; +use futures::future::FutureExt; +use futures::stream::StreamExt; +use futures::select; +use future_helper::sleep; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_get_api_versions; +use crate::common::kf_lookup_version; + +use crate::consume::logs_output::ReponseLogParams; +use crate::consume::process_fetch_topic_reponse; + +use super::query::FetchLogsParam; +use super::query::LeaderParam; +use super::query::kf_fetch_logs; +use super::query::kf_list_offsets; + + +// ----------------------------------- +// Fetch Loop +// ----------------------------------- + +/// Fetch log continuously +pub async fn kf_fetch_log_loop( + topic_name: String, + max_bytes: i32, + from_beginning: bool, + mut leader_param: LeaderParam, + response_params: ReponseLogParams, + mut receiver: mpsc::Receiver, +) -> Result<(), CliError> { + let mut conn = Connection::new(&leader_param.server_addr).await?; + let vers = kf_get_api_versions(&mut conn).await?; + let version = kf_lookup_version(AllKfApiKey::Fetch, &vers); + + // list offsets + let list_offsets_res = + kf_list_offsets(&mut conn, &topic_name, &leader_param, &vers).await?; + let _ = update_leader_partition_offsets(&topic_name, &mut leader_param, &list_offsets_res)?; + + // initialize fetch log parameters + let mut fetch_param = FetchLogsParam { + topic: topic_name.clone(), + max_bytes: max_bytes, + partitions: leader_param.partitions.clone(), + }; + + // fetch logs - from beginning + if from_beginning { + let fetch_logs_res = kf_fetch_logs(&mut conn, version, &fetch_param).await?; + let _ = update_fetch_log_offsets(&topic_name, &mut fetch_param, &fetch_logs_res, true)?; + } + + loop { + select! { + _ = (sleep(Duration::from_millis(10))).fuse() => { + // fetch logs + let fetch_logs_res = kf_fetch_logs(&mut conn, version, &fetch_param).await?; + + // process logs response + let _ = process_fetch_topic_reponse(&fetch_logs_res, &response_params)?; + + // update offsets + let _ = update_fetch_log_offsets(&topic_name, &mut fetch_param, &fetch_logs_res, false)?; + }, + receiver_req = receiver.next() => { + debug!("... replica {} exiting", leader_param.leader_id); + println!(""); + return Ok(()) + } + } + } +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Update leader partition offsets +fn update_leader_partition_offsets( + topic_name: &String, + leader_param: &mut LeaderParam, + list_offsets_res: &KfListOffsetResponse, +) -> Result<(), CliError> { + for topic_res in &list_offsets_res.topics { + // ensure valid topic + if topic_res.name != *topic_name { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("list offsets: unknown topic '{}'", topic_name), + ))); + } + + for partition_res in &topic_res.partitions { + let partition_name = format!("{}/{}", topic_res.name, partition_res.partition_index); + + // validate partition response + if partition_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition offset '{}': {}", + partition_name, + partition_res.error_code.to_sentence() + ), + ))); + } + + // update leader epoch & offsets in partitions + for partition in &mut leader_param.partitions { + if partition.partition_idx == partition_res.partition_index { + partition.epoch = partition_res.leader_epoch; + partition.offset = partition_res.offset; + + debug!( + "list-offsets '{}' updated: epoch: {}, offset {}", + partition_name, partition.epoch, partition.offset + ); + } + } + } + } + + Ok(()) +} + +/// Update fetch log offets by copying last_stabe_offset of the reponse to params offset +/// for each partition +fn update_fetch_log_offsets( + topic_name: &String, + fetch_logs_param: &mut FetchLogsParam, + fetch_res: &DefaultKfFetchResponse, + from_start: bool, +) -> Result<(), CliError> { + if fetch_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("fetch: {}", fetch_res.error_code.to_sentence()), + ))); + } + + // grab last stable offsets for each partition + for topic_res in &fetch_res.topics { + // ensure valid topic + if topic_res.name != *topic_name { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("fetch: unknown topic '{}'", topic_name), + ))); + } + + for partition_res in &topic_res.partitions { + let partition_name = format!("{}/{}", topic_res.name, partition_res.partition_index); + + // validate partition response + if partition_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "fetch partition '{}': {}", + partition_name, + partition_res.error_code.to_sentence() + ), + ))); + } + + // update epoch and offsets in partitions + if from_start { + for partition in &mut fetch_logs_param.partitions { + if partition.partition_idx == partition_res.partition_index + && partition.offset != partition_res.log_start_offset + { + partition.offset = partition_res.log_start_offset; + + debug!( + "partition '{}' - updated offset {} (from start)", + partition_name, partition.offset + ); + } + } + } else { + for partition in &mut fetch_logs_param.partitions { + if partition.partition_idx == partition_res.partition_index + && partition.offset != partition_res.last_stable_offset + { + partition.offset = partition_res.last_stable_offset; + + debug!( + "partition '{}' - updated offset {}", + partition_name, partition.offset + ); + } + } + } + } + } + Ok(()) +} diff --git a/cli/src/consume/kf/kf_fetch_topic_all.rs b/cli/src/consume/kf/kf_fetch_topic_all.rs new file mode 100644 index 0000000000..eaf410601d --- /dev/null +++ b/cli/src/consume/kf/kf_fetch_topic_all.rs @@ -0,0 +1,605 @@ +//! +//! # Kafka - Fetch logs from Topic and all Partitions +//! +//! Kafka creates a group to listen to all partitions. +//! +//! ## Conection 1 - Connect any broker: +//! * APIVersions +//! * Metadata +//! * GroupCoordinator - lookup Broker hosting coordinator +//! +//! ## Connection 2 - Connect to group coordinator: +//! * APIVersions +//! * JoinGroup - get server assigned member-id +//! * JoinGroup - join group membership +//! * SyncGroup +//! * OffsetFetch +//! * HeartBeat - continuously send hearbeat (3 sec) +//! +//! ## Connection 3 - Connect to each topic/partition leader +//! * APIVersions +//! * ListOffsets +//! * Fetch - continuously fetch logs (10 ms) +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; +use std::time::Duration; + +use ctrlc; +use log::debug; +use utils::generators::generate_group_id; + +use kf_protocol::message::group::KfFindCoordinatorResponse; +use kf_protocol::message::group::KfJoinGroupResponse; +use kf_protocol::message::group::KfSyncGroupResponse; +use kf_protocol::message::offset::KfOffsetFetchResponse; +use kf_protocol::message::metadata::KfMetadataResponse; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; +use kf_protocol::api::ErrorCode as KfErrorCode; + +use futures::channel::mpsc; +use futures::channel::mpsc::Sender; +use futures::future::FutureExt; +use futures::stream::StreamExt; +use futures::sink::SinkExt; +use futures::select; +use futures::future::join_all; +use futures::future::join; +use future_helper::run_block_on; +use future_helper::sleep; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_get_api_versions; +use crate::common::query_kf_metadata; +use crate::common::kf_lookup_version; +use crate::profile::ProfileConfig; + +use crate::consume::ConsumeLogConfig; +use crate::consume::ReponseLogParams; + +use super::query::TopicPartitionParam; +use super::query::LeaderParam; +use super::query::PartitionParam; +use super::query::kf_group_coordinator; +use super::query::kf_offsets_fetch; +use super::query::kf_join_group; +use super::query::kf_sync_group; +use super::query::kf_leave_group; +use super::query::kf_send_heartbeat; + +use super::kf_fetch_log_loop::kf_fetch_log_loop; + +// ----------------------------------- +// Kafka - Process Request +// ----------------------------------- + +/// Lookup group coordinator +pub fn kf_consume_log_from_topic( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + response_paramss: ReponseLogParams, +) -> Result<(), CliError> { + run_block_on(process_consume_log_from_topic_all( + server_addr, + cfg, + response_paramss, + )) +} + +/// Processing engine to consume logs one topic & and multiple partitions. +/// Step 1: Collection system information +/// * Lookup API versions, +/// * Request metadata +/// * Fetch group coordinator +/// Step 2: Create loop for Group keep-alives +/// Step 3: Create loop for continous log fetch +async fn process_consume_log_from_topic_all( + kf_ctrl_addr: SocketAddr, + cfg: ConsumeLogConfig, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut brk_conn = Connection::new(&kf_ctrl_addr).await?; + let bkr_vers = kf_get_api_versions(&mut brk_conn).await?; + debug!("consume topic '{}'", cfg.topic); + + // query metadata for topics + let query_topics = Some(vec![cfg.topic.clone()]); + let metadata = query_kf_metadata(&mut brk_conn, query_topics, &bkr_vers).await?; + let mut tp_params = metadata_to_all_topic_partition_params(&metadata)?; + + // generate group-id + let grp_id = generate_group_id(); + debug!("group id: '{}'", grp_id); + + // query group coordinator + let grp_coordinator = kf_group_coordinator(&mut brk_conn, &grp_id, &bkr_vers).await?; + let coordinator_addr = group_coordinator_to_socket_addr(&grp_coordinator)?; + + // create connection to the group coordinator + let mut gc_conn = Connection::new(&coordinator_addr).await?; + let gc_vers = kf_get_api_versions(&mut gc_conn).await?; + + // join group coordinator (to get member id) + let empty_id = "".to_owned(); + let join_group_res = + kf_join_group(&mut gc_conn, &cfg.topic, &grp_id, &empty_id, &gc_vers).await?; + let mbr_id = join_group_to_member_id(&join_group_res)?; + + // join group + let join_group_res = + kf_join_group(&mut gc_conn, &cfg.topic, &grp_id, &mbr_id, &gc_vers).await?; + let gen_id = join_group_to_generation_id(&join_group_res)?; + + // sync group + let sync_group_res = + kf_sync_group(&mut gc_conn, &cfg.topic, &grp_id, &mbr_id, gen_id, &gc_vers).await?; + sync_group_response_validate(&sync_group_res)?; + + // offsets fetch + let offsets_fetch_res = + kf_offsets_fetch(&mut gc_conn, &grp_id, &cfg.topic, &tp_params, &gc_vers).await?; + let _ = update_topic_partition_params_offsets(&mut tp_params, &offsets_fetch_res); + + // Generate futures for group heartbeat and fetch logs + group_and_fetch_log_futures( + gc_conn, + gc_vers, + grp_id, + mbr_id, + gen_id, + cfg.max_bytes, + cfg.from_beginning, + tp_params, + response_params, + ) + .await?; + + Ok(()) +} + +/// Generate futures for group keep-alive and fetch-logs +async fn group_and_fetch_log_futures( + conn: Connection, + vers: KfApiVersions, + grp_id: String, + mbr_id: String, + gen_id: i32, + max_bytes: i32, + from_beginning: bool, + tp_param: TopicPartitionParam, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let mut send_channels = vec![]; + let mut fetch_log_futures = vec![]; + + // group heartbeat channel + let (sender, receiver) = mpsc::channel::(5); + send_channels.push(sender); + for leader in &tp_param.leaders { + let (sender, receiver) = mpsc::channel::(5); + let topic_name = tp_param.topic_name.clone(); + send_channels.push(sender); + fetch_log_futures.push(kf_fetch_log_loop( + topic_name, + max_bytes, + from_beginning, + leader.clone(), + response_params.clone(), + receiver, + )); + } + + // attach all send channels to Ctrl-C event handler + // - all futures will exit exit on Ctrl-C event + if let Err(err) = ctrlc::set_handler(move || { + debug!(" received"); + send_ctrlc_signal(send_channels.clone()); + }) { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("CTRL-C handler: {}", err), + ))); + } + + // spin-off all futures + let _r = join( + group_heartbeat_loop(conn, vers, grp_id, mbr_id, gen_id, receiver), + join_all(fetch_log_futures), + ) + .await; + + Ok(()) +} + +// ----------------------------------- +// Event Processing +// ----------------------------------- + +/// Send Group keepalive heartbeat at regilar intervals +/// Leave group on Ctrl-C +async fn group_heartbeat_loop( + mut conn: Connection, + vers: KfApiVersions, + grp_id: String, + mbr_id: String, + gen_id: i32, + mut receiver: mpsc::Receiver, +) -> Result<(), CliError> { + let heartbeat_ver = kf_lookup_version(AllKfApiKey::Heartbeat, &vers); + + loop { + select! { + _ = (sleep(Duration::from_secs(3))).fuse() => { + if let Err(err) = kf_send_heartbeat( + &mut conn, + &grp_id, + &mbr_id, + gen_id, + heartbeat_ver, + ) + .await { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("healthcheck failed: {}", err), + ))); + } + }, + receiver_req = receiver.next() => { + kf_leave_group( + &mut conn, + &grp_id, + &mbr_id, + &vers, + ) + .await?; + debug!("... heartbeat exiting"); + return Ok(()) + } + } + } +} + +/// Send CTRL c signal to all channels in send array +fn send_ctrlc_signal(send_channels: Vec>) { + let _ = run_block_on(async move { + for mut send_channel in send_channels { + send_channel.send(true).await.expect("should not fail"); + } + Ok(()) as Result<(), ()> + }); +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Parse metadata parameters and generate Topic/Partition parameters +fn metadata_to_all_topic_partition_params( + metadata_resp: &KfMetadataResponse, +) -> Result { + // there must be one topic in reply + if metadata_resp.topics.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 topic, found {}", metadata_resp.topics.len()), + ))); + } + + // check for errors + let topic_resp = &metadata_resp.topics[0]; + if topic_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("metadata response: {}", topic_resp.error_code.to_sentence()), + ))); + } + + // generate topic/partition parameter object + let mut tp_param = TopicPartitionParam { + topic_name: topic_resp.name.clone(), + leaders: vec![], + }; + + // traverse all partitions and look-up leaders + for partition_resp in &topic_resp.partitions { + if partition_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition '{}/{}': {}", + topic_resp.name, + partition_resp.partition_index, + partition_resp.error_code.to_sentence() + ), + ))); + } + + // find leader for this partition + let mut leader: Option<&mut LeaderParam> = None; + for leader_param in &mut tp_param.leaders { + if leader_param.leader_id == partition_resp.leader_id { + leader = Some(leader_param); + break; + } + } + + // create leader for this partition + if leader.is_none() { + // generate leader + for broker_resp in &metadata_resp.brokers { + if broker_resp.node_id == partition_resp.leader_id { + let server_addr = ProfileConfig::host_port_to_socket_addr(&format!( + "{}:{}", + broker_resp.host, broker_resp.port + ))?; + + tp_param.leaders.push(LeaderParam { + leader_id: broker_resp.node_id, + server_addr: server_addr, + partitions: vec![], + }); + + let len = tp_param.leaders.len(); + leader = Some(&mut tp_param.leaders[len - 1]); + break; + } + } + } + + if let Some(leader) = leader { + // add partition to leader + leader.partitions.push(PartitionParam { + partition_idx: partition_resp.partition_index, + epoch: partition_resp.leader_epoch, + offset: -1, + }); + } else { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "invalid metadata", + ))); + }; + } + + // there must be at least one topic generated + if tp_param.leaders.len() == 0 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "error generating topic/partitions", + ))); + } + + debug!("topic-partition parameters {:#?}", tp_param); + + Ok(tp_param) +} + +/// Parse group coordinator response and generate Server Address +fn group_coordinator_to_socket_addr( + coordinator_resp: &KfFindCoordinatorResponse, +) -> Result { + if coordinator_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "find group coordinator: {}", + coordinator_resp.error_code.to_sentence() + ), + ))); + } + + let server_addr = ProfileConfig::host_port_to_socket_addr(&format!( + "{}:{}", + coordinator_resp.host, coordinator_resp.port + ))?; + + debug!("group coordinator host/port: '{}'", server_addr); + + Ok(server_addr) +} + +/// Parse join group response for member-id +fn join_group_to_member_id(join_group_resp: &KfJoinGroupResponse) -> Result { + if join_group_resp.error_code != KfErrorCode::None + && join_group_resp.error_code != KfErrorCode::MemberIdRequired + { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("join group: {}", join_group_resp.error_code.to_sentence()), + ))); + } + debug!("member-id: '{}'", join_group_resp.member_id); + + Ok(join_group_resp.member_id.clone()) +} + +/// Parse join group response for generation-id +fn join_group_to_generation_id(join_group_resp: &KfJoinGroupResponse) -> Result { + if join_group_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("join group: {}", join_group_resp.error_code.to_sentence()), + ))); + } + debug!("generation-id: '{}'", join_group_resp.generation_id); + + Ok(join_group_resp.generation_id) +} + +/// Validate sync group response +fn sync_group_response_validate(sync_group_resp: &KfSyncGroupResponse) -> Result<(), CliError> { + if sync_group_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("join group: {}", sync_group_resp.error_code.to_sentence()), + ))); + } + debug!("sync group: ok"); + + Ok(()) +} + +/// Update topic partition offsets from offset fetch response +fn update_topic_partition_params_offsets( + tp_param: &mut TopicPartitionParam, + offsets_fetch_res: &KfOffsetFetchResponse, +) -> Result<(), CliError> { + if offsets_fetch_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "offset fetch: {}", + offsets_fetch_res.error_code.to_sentence() + ), + ))); + } + + for topic_res in &offsets_fetch_res.topics { + // ensure valid topic + if topic_res.name != tp_param.topic_name { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("offset fetch: unknown topic '{}'", topic_res.name), + ))); + } + + for partition_res in &topic_res.partitions { + let partition_name = format!("{}/{}", topic_res.name, partition_res.partition_index); + + // validate partition response + if partition_res.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition '{}': {}", + partition_name, + partition_res.error_code.to_sentence() + ), + ))); + } + + // update epoch & offsets in partitions + for leader in &mut tp_param.leaders { + for partition in &mut leader.partitions { + if partition.partition_idx == partition_res.partition_index { + partition.epoch = partition_res.committed_leader_epoch; + partition.offset = partition_res.committed_offset; + + debug!( + "fetch-offsets '{}' updated: epoch: {}, offset {}", + partition_name, partition.epoch, partition.offset + ); + } + } + } + } + } + + Ok(()) +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + use std::net::{IpAddr, Ipv4Addr}; + + use kf_protocol::message::metadata::MetadataResponseBroker; + use kf_protocol::message::metadata::MetadataResponseTopic; + use kf_protocol::message::metadata::MetadataResponsePartition; + + /// create metadata + fn create_metadata_response(leader_ids: Vec) -> KfMetadataResponse { + let mut metadata_partitions: Vec = vec![]; + for (i, leader_id) in leader_ids.iter().enumerate() { + metadata_partitions.push(MetadataResponsePartition { + error_code: KfErrorCode::None, + partition_index: i as i32, + leader_id: *leader_id, + leader_epoch: 14, + replica_nodes: vec![2, 3], + isr_nodes: vec![3, 2], + offline_replicas: vec![], + }); + } + + KfMetadataResponse { + throttle_time_ms: 0, + brokers: vec![ + MetadataResponseBroker { + node_id: 2, + host: "10.0.0.23".to_owned(), + port: 9093, + rack: None, + }, + MetadataResponseBroker { + node_id: 3, + host: "10.0.0.23".to_owned(), + port: 9094, + rack: None, + }, + MetadataResponseBroker { + node_id: 1, + host: "10.0.0.23".to_owned(), + port: 9092, + rack: None, + }, + ], + cluster_id: Some("RcFjJ4hKTDK5fhMC3g-AqQ".to_owned()), + controller_id: 1, + topics: vec![MetadataResponseTopic { + error_code: KfErrorCode::None, + name: "test2".to_owned(), + is_internal: false, + partitions: metadata_partitions, + }], + } + } + + #[test] + fn test_metadata_to_all_topic_partition_params() { + let metadata = create_metadata_response(vec![3, 2, 3]); + let result = metadata_to_all_topic_partition_params(&metadata); + + let expected_result = TopicPartitionParam { + topic_name: "test2".to_owned(), + leaders: vec![ + LeaderParam { + leader_id: 3, + server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 23)), 9094), + partitions: vec![ + PartitionParam { + partition_idx: 0, + offset: -1, + epoch: 14, + }, + PartitionParam { + partition_idx: 2, + offset: -1, + epoch: 14, + }, + ], + }, + LeaderParam { + leader_id: 2, + server_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(10, 0, 0, 23)), 9093), + partitions: vec![PartitionParam { + partition_idx: 1, + offset: -1, + epoch: 14, + }], + }, + ], + }; + + println!("found: {:#?}\nexpected: {:#?}", result, expected_result); + assert_eq!(result.unwrap(), expected_result); + } +} diff --git a/cli/src/consume/kf/kf_fetch_topic_part.rs b/cli/src/consume/kf/kf_fetch_topic_part.rs new file mode 100644 index 0000000000..a92b6a9766 --- /dev/null +++ b/cli/src/consume/kf/kf_fetch_topic_part.rs @@ -0,0 +1,242 @@ +//! +//! # Kafka - Fetch logs from Topic / Partition +//! +//! Connect to any Kafka server, identify leading Brooker and fetch logs. +//! +//! ## Conection 1 - Connect any broker: +//! * APIVersions +//! * Metadata +//! +//! ## Connection 2 - Connect to topic/partition leader +//! * APIVersions +//! * ListOffsets +//! * Fetch - continuously fetch logs (10 ms) +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use ctrlc; +use log::debug; + +use kf_protocol::message::metadata::KfMetadataResponse; +use kf_protocol::api::ErrorCode as KfErrorCode; + +use futures::channel::mpsc; +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_get_api_versions; +use crate::common::query_kf_metadata; + +use crate::profile::ProfileConfig; +use crate::consume::ConsumeLogConfig; +use crate::consume::ReponseLogParams; + +use super::query::TopicPartitionParam; +use super::query::LeaderParam; +use super::query::PartitionParam; +use super::kf_fetch_log_loop::kf_fetch_log_loop; + +// ----------------------------------- +// Kafka - Process Request +// ----------------------------------- + +// Create execution block to produce log message +pub fn kf_consume_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + run_block_on(process_log_from_topic_partition( + server_addr, + cfg, + partition, + response_params, + )) +} + +//// Processing engine to consume logs from one multiple topic & partition. +/// Step 1: Collection system information +/// * Lookup API versions, +/// * Request metadata +/// Step 2: Create loop for continous log fetch +async fn process_log_from_topic_partition( + server_addr: SocketAddr, + cfg: ConsumeLogConfig, + partition: i32, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + let topic = &cfg.topic; + let mut brk_conn = Connection::new(&server_addr).await?; + let bkr_vers = kf_get_api_versions(&mut brk_conn).await?; + debug!("consume topic '{}'", topic); + + // query metadata for topics + let query_topics = Some(vec![topic.clone()]); + let metadata = query_kf_metadata(&mut brk_conn, query_topics, &bkr_vers).await?; + let tp_params = metadata_to_topic_partition_params(&metadata, topic, partition)?; + + // Generate future for continuous fetch-log + fetch_log_future( + cfg.max_bytes, + cfg.from_beginning, + tp_params, + response_params, + ) + .await?; + + Ok(()) +} + +/// Generate futures for fetch-log and link with CTRL-C +async fn fetch_log_future( + max_bytes: i32, + from_beginning: bool, + tp_param: TopicPartitionParam, + response_params: ReponseLogParams, +) -> Result<(), CliError> { + // ensure noly 1 leader + if tp_param.leaders.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 leader, found {}", tp_param.leaders.len()), + ))); + } + let leader = &tp_param.leaders[0]; + let topic_name = tp_param.topic_name.clone(); + + // fetch-log channel + let (sender, receiver) = mpsc::channel::(5); + + // attach sender to Ctrl-C event handler + if let Err(err) = ctrlc::set_handler(move || { + debug!(" received"); + send_ctrlc_signal(sender.clone()); + }) { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("CTRL-C handler: {}", err), + ))); + } + + // spin-off fetch log future + let _ = kf_fetch_log_loop( + topic_name, + max_bytes, + from_beginning, + leader.clone(), + response_params.clone(), + receiver, + ) + .await; + + Ok(()) +} + +/// Send CTRL c signal to all channels in send array +fn send_ctrlc_signal(mut sender: Sender) { + let _ = run_block_on(async move { + sender.send(true).await.expect("should not fail"); + Ok(()) as Result<(), ()> + }); +} + +// ----------------------------------- +// Conversions & Validations +// ----------------------------------- + +/// Parse metadata parameters and generate Topic/Partition parameters +fn metadata_to_topic_partition_params( + metadata_resp: &KfMetadataResponse, + topic: &String, + partition: i32, +) -> Result { + // there must be one topic in reply + if metadata_resp.topics.len() != 1 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected 1 topic, found {}", metadata_resp.topics.len()), + ))); + } + + // check for errors + let topic_resp = &metadata_resp.topics[0]; + if topic_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("metadata response: {}", topic_resp.error_code.to_sentence()), + ))); + } + + // ensure correct topic + if topic_resp.name != *topic { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!("expected topic '{}', found '{}'", topic, topic_resp.name), + ))); + } + + // generate topic/partition parameter object + let mut tp_param = TopicPartitionParam { + topic_name: topic.clone(), + leaders: vec![], + }; + + // find partition + for partition_resp in &topic_resp.partitions { + if partition_resp.partition_index == partition { + // ensure valid partition + if partition_resp.error_code != KfErrorCode::None { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + format!( + "partition '{}/{}': {}", + topic_resp.name, + partition_resp.partition_index, + partition_resp.error_code.to_sentence() + ), + ))); + } + + // generate leader + for broker_resp in &metadata_resp.brokers { + if broker_resp.node_id == partition_resp.leader_id { + let server_addr = ProfileConfig::host_port_to_socket_addr(&format!( + "{}:{}", + broker_resp.host, broker_resp.port + ))?; + + tp_param.leaders.push(LeaderParam { + leader_id: broker_resp.node_id, + server_addr: server_addr, + partitions: vec![PartitionParam { + partition_idx: partition_resp.partition_index, + epoch: partition_resp.leader_epoch, + offset: -1, + }], + }); + + break; + } + } + } + } + + // there must be at least one topic generated + if tp_param.leaders.len() == 0 { + return Err(CliError::IoError(IoError::new( + ErrorKind::InvalidData, + "error generating topic/partitions", + ))); + } + + debug!("topic-partition parameters {:#?}", tp_param); + + Ok(tp_param) +} diff --git a/cli/src/consume/kf/mod.rs b/cli/src/consume/kf/mod.rs new file mode 100644 index 0000000000..5a4d590370 --- /dev/null +++ b/cli/src/consume/kf/mod.rs @@ -0,0 +1,16 @@ +mod query; +mod kf_fetch_log_loop; +mod kf_fetch_topic_all; +mod kf_fetch_topic_part; + +pub use kf_fetch_topic_all::kf_consume_log_from_topic; +pub use kf_fetch_topic_part::kf_consume_log_from_topic_partition; + +//pub use query::kf_offsets_fetch; +//pub use query::kf_list_offsets; +//pub use kf::kf_send_heartbeat; +//pub use kf::fetch_logs; +//pub use query::kf_group_coordinator; +//pub use kf::kf_join_group; +//pub use kf::kf_sync_group; +//pub use kf::kf_leave_group; diff --git a/cli/src/consume/kf/query/group_coordinator.rs b/cli/src/consume/kf/query/group_coordinator.rs new file mode 100644 index 0000000000..d206e94d69 --- /dev/null +++ b/cli/src/consume/kf/query/group_coordinator.rs @@ -0,0 +1,42 @@ +//! +//! # Kafka -- Find Group Coordinator +//! +//! Communicates with Kafka Controller to retrieve Group Coordinator for a consumer +//! +use log::trace; + +use kf_protocol::message::group::KfFindCoordinatorRequest; +use kf_protocol::message::group::KfFindCoordinatorResponse; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +// Query Kafka server for Group Coordinator +pub async fn kf_group_coordinator<'a>( + conn: &'a mut Connection, + group_id: &'a String, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfFindCoordinatorRequest::default(); + let version = kf_lookup_version(AllKfApiKey::FindCoordinator, versions); + request.key = group_id.clone(); + + trace!( + "find group-coordinator req '{}': {:#?}", + conn.server_addr(), + request + ); + + let response = conn.send_request(request, version).await?; + + trace!( + "find group-coordinator res '{}': {:#?}", + conn.server_addr(), + response + ); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/heartbeat.rs b/cli/src/consume/kf/query/heartbeat.rs new file mode 100644 index 0000000000..45fe0f8bf6 --- /dev/null +++ b/cli/src/consume/kf/query/heartbeat.rs @@ -0,0 +1,36 @@ +//! +//! # Kafka -- Send Heartbeat +//! +//! Communicates with Kafka Group Coordinator and sends heartbeat +//! +use log::trace; + +use kf_protocol::message::group::KfHeartbeatRequest; +use kf_protocol::message::group::KfHeartbeatResponse; + +use crate::error::CliError; +use crate::common::Connection; + +// Send Heartbeat to group coordinator +pub async fn kf_send_heartbeat<'a>( + conn: &'a mut Connection, + group_id: &'a String, + member_id: &'a String, + generation_id: i32, + version: Option, +) -> Result { + let mut request = KfHeartbeatRequest::default(); + + // request with protocol + request.group_id = group_id.clone(); + request.member_id = member_id.clone(); + request.generationid = generation_id; + + trace!("heartbeat req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("heartbeat res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/join_group.rs b/cli/src/consume/kf/query/join_group.rs new file mode 100644 index 0000000000..1811a5d2e4 --- /dev/null +++ b/cli/src/consume/kf/query/join_group.rs @@ -0,0 +1,59 @@ +//! +//! # Kafka -- Join Group +//! +//! Communicates with Kafka Group Coordinator and request to join the group +//! +use log::trace; + +use kf_protocol::message::group::KfJoinGroupRequest; +use kf_protocol::message::group::KfJoinGroupResponse; +use kf_protocol::message::group::JoinGroupRequestProtocol; +use kf_protocol::api::{ProtocolMetadata, Metadata}; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +// Ask Group Coordinator to join the group +pub async fn kf_join_group<'a>( + conn: &'a mut Connection, + topic_name: &'a String, + group_id: &'a String, + member_id: &'a String, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfJoinGroupRequest::default(); + let version = kf_lookup_version(AllKfApiKey::JoinGroup, versions); + + // metadata + let mut metadata = Metadata::default(); + metadata.topics = vec![topic_name.clone()]; + + // protocol metadata + let mut protocol_metadata = ProtocolMetadata::default(); + protocol_metadata.content = Some(metadata); + + // join group protocol + let join_group_protocol = JoinGroupRequestProtocol { + name: "range".to_owned(), + metadata: protocol_metadata, + }; + + // request with protocol + request.group_id = group_id.clone(); + request.session_timeout_ms = 10000; + request.rebalance_timeout_ms = 300000; + request.member_id = member_id.clone(); + request.protocol_type = "consumer".to_owned(); + request.protocols = vec![join_group_protocol]; + + trace!("join-group req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("join-group res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/leave_group.rs b/cli/src/consume/kf/query/leave_group.rs new file mode 100644 index 0000000000..250610733b --- /dev/null +++ b/cli/src/consume/kf/query/leave_group.rs @@ -0,0 +1,38 @@ +//! +//! # Kafka -- Leave Group +//! +//! Communicates with Kafka Group Coordinator and request to leave the group +//! +use log::trace; + +use kf_protocol::message::group::KfLeaveGroupRequest; +use kf_protocol::message::group::KfLeaveGroupResponse; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +// Ask Group Coordinator to leave the group +pub async fn kf_leave_group<'a>( + conn: &'a mut Connection, + group_id: &'a String, + member_id: &'a String, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfLeaveGroupRequest::default(); + let version = kf_lookup_version(AllKfApiKey::LeaveGroup, versions); + + // request with protocol + request.group_id = group_id.clone(); + request.member_id = member_id.clone(); + + trace!("leave-group req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("leave-group res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/log_fetch.rs b/cli/src/consume/kf/query/log_fetch.rs new file mode 100644 index 0000000000..f2f602a11b --- /dev/null +++ b/cli/src/consume/kf/query/log_fetch.rs @@ -0,0 +1,63 @@ +//! +//! # Kafka Fetch Logs +//! +//! Connects to server and fetches logs +//! + +use log::{debug, trace}; + +use kf_protocol::message::fetch::{DefaultKfFetchRequest, DefaultKfFetchResponse}; +use kf_protocol::message::fetch::FetchPartition; +use kf_protocol::message::fetch::{KfFetchRequest, FetchableTopic}; +use kf_protocol::api::Isolation; + +use crate::error::CliError; +use crate::common::Connection; + +use super::query_params::FetchLogsParam; + +/// Fetch log records from a target server +pub async fn kf_fetch_logs<'a>( + conn: &'a mut Connection, + version: Option, + fetch_log_param: &'a FetchLogsParam, +) -> Result { + let mut fetch_partitions = vec![]; + for partition_param in &fetch_log_param.partitions { + let mut fetch_part = FetchPartition::default(); + fetch_part.partition_index = partition_param.partition_idx; + fetch_part.current_leader_epoch = partition_param.epoch; + fetch_part.fetch_offset = partition_param.offset; + fetch_part.log_start_offset = -1; + fetch_part.max_bytes = fetch_log_param.max_bytes; + + fetch_partitions.push(fetch_part); + } + + let mut topic_request = FetchableTopic::default(); + topic_request.name = fetch_log_param.topic.clone(); + topic_request.fetch_partitions = fetch_partitions; + + let mut request: DefaultKfFetchRequest = KfFetchRequest::default(); + request.replica_id = -1; + request.max_wait = 500; + request.min_bytes = 1; + request.max_bytes = fetch_log_param.max_bytes; + request.isolation_level = Isolation::ReadCommitted; + request.session_id = 0; + request.epoch = -1; + request.topics.push(topic_request); + + debug!( + "fetch logs '{}' ({}) partition to {}", + fetch_log_param.topic, + fetch_log_param.partitions.len(), + conn.server_addr() + ); + trace!("fetch logs req {:#?}", request); + + let response = conn.send_request(request, version).await?; + + trace!("fetch logs res: {:#?}", response); + Ok(response) +} diff --git a/cli/src/consume/kf/query/mod.rs b/cli/src/consume/kf/query/mod.rs new file mode 100644 index 0000000000..a3456663f7 --- /dev/null +++ b/cli/src/consume/kf/query/mod.rs @@ -0,0 +1,23 @@ +mod group_coordinator; +mod join_group; +mod sync_group; +mod leave_group; +mod offsets_fetch; +mod offsets_list; +mod heartbeat; +mod log_fetch; +mod query_params; + +pub use group_coordinator::kf_group_coordinator; +pub use join_group::kf_join_group; +pub use sync_group::kf_sync_group; +pub use leave_group::kf_leave_group; +pub use offsets_fetch::kf_offsets_fetch; +pub use offsets_list::kf_list_offsets; +pub use heartbeat::kf_send_heartbeat; +pub use log_fetch::kf_fetch_logs; + +pub use query_params::FetchLogsParam; +pub use query_params::LeaderParam; +pub use query_params::PartitionParam; +pub use query_params::TopicPartitionParam; \ No newline at end of file diff --git a/cli/src/consume/kf/query/offsets_fetch.rs b/cli/src/consume/kf/query/offsets_fetch.rs new file mode 100644 index 0000000000..e18eb1790e --- /dev/null +++ b/cli/src/consume/kf/query/offsets_fetch.rs @@ -0,0 +1,55 @@ +//! +//! # Kafka -- Offset Fetch +//! +//! Communicates with Kafka Controller to Fetch Offsets for topic/partitions +//! +use log::trace; + +use kf_protocol::message::offset::{KfOffsetFetchRequest, KfOffsetFetchResponse}; +use kf_protocol::message::offset::OffsetFetchRequestTopic; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +use super::query_params::TopicPartitionParam; + +// Query Group Coordinator for offsets. +pub async fn kf_offsets_fetch<'a>( + conn: &'a mut Connection, + group_id: &'a String, + topic_name: &'a String, + tp_param: &'a TopicPartitionParam, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfOffsetFetchRequest::default(); + let version = kf_lookup_version(AllKfApiKey::OffsetFetch, versions); + + // collect partition indexes + let mut partition_indexes: Vec = vec![]; + for leader in &tp_param.leaders { + for partition in &leader.partitions { + partition_indexes.push(partition.partition_idx); + } + } + + // topics + let topics = vec![OffsetFetchRequestTopic { + name: topic_name.clone(), + partition_indexes: partition_indexes, + }]; + + // request + request.group_id = group_id.clone(); + request.topics = Some(topics); + + trace!("offset-fetch req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("offset-fetch res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/offsets_list.rs b/cli/src/consume/kf/query/offsets_list.rs new file mode 100644 index 0000000000..1b0b73e134 --- /dev/null +++ b/cli/src/consume/kf/query/offsets_list.rs @@ -0,0 +1,54 @@ +//! +//! # Kafka -- Offset List +//! +//! Communicates with Kafka Replica Leader to List Offsets for topic/partitions +//! +use log::trace; + +use kf_protocol::message::offset::{KfListOffsetRequest, KfListOffsetResponse}; +use kf_protocol::message::offset::ListOffsetTopic; +use kf_protocol::message::offset::ListOffsetPartition; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +use super::query_params::LeaderParam; + +// Query Replica leader for offsets. +pub async fn kf_list_offsets<'a>( + conn: &'a mut Connection, + topic_name: &'a String, + leader: &'a LeaderParam, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfListOffsetRequest::default(); + let version = kf_lookup_version(AllKfApiKey::ListOffsets, versions); + + // collect partition index & epoch information from leader + let mut offset_partitions: Vec = vec![]; + for partition in &leader.partitions { + offset_partitions.push(ListOffsetPartition { + partition_index: partition.partition_idx, + current_leader_epoch: partition.epoch, + timestamp: -1, + }); + } + + // update request + request.replica_id = -1; + request.topics = vec![ListOffsetTopic { + name: topic_name.clone(), + partitions: offset_partitions, + }]; + + trace!("list-offsets req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("list-offsets res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/kf/query/query_params.rs b/cli/src/consume/kf/query/query_params.rs new file mode 100644 index 0000000000..66ec78b262 --- /dev/null +++ b/cli/src/consume/kf/query/query_params.rs @@ -0,0 +1,42 @@ +//! +//! # Kafka -- Topic/Partition Parameters +//! +//! Intermediate structure to collect metadata information +//! +use std::net::SocketAddr; + +use kf_protocol::api::Offset; + +/// Fetch Logs parameters +#[derive(Debug)] +pub struct FetchLogsParam { + pub topic: String, + pub max_bytes: i32, + + pub partitions: Vec, +} + +/// Topic/Partition parameters +#[derive(Debug, Clone, PartialEq)] +pub struct TopicPartitionParam { + pub topic_name: String, + + pub leaders: Vec, +} + +/// Replica Leader parameters +#[derive(Debug, Clone, PartialEq)] +pub struct LeaderParam { + pub leader_id: i32, + pub server_addr: SocketAddr, + + pub partitions: Vec, +} + +/// Partition parameters +#[derive(Debug, Clone, PartialEq)] +pub struct PartitionParam { + pub partition_idx: i32, + pub epoch: i32, + pub offset: Offset, +} diff --git a/cli/src/consume/kf/query/sync_group.rs b/cli/src/consume/kf/query/sync_group.rs new file mode 100644 index 0000000000..4eab83e792 --- /dev/null +++ b/cli/src/consume/kf/query/sync_group.rs @@ -0,0 +1,59 @@ +//! +//! # Kafka -- Sync Group +//! +//! Communicates with Kafka Group Coordinator and request to sync the group +//! +use log::trace; + +use kf_protocol::message::group::KfSyncGroupRequest; +use kf_protocol::message::group::KfSyncGroupResponse; +use kf_protocol::message::group::SyncGroupRequestAssignment; +use kf_protocol::api::{GroupAssignment, Assignment}; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::kf_lookup_version; + +// Query Kafka server for Group Coordinator +pub async fn kf_sync_group<'a>( + conn: &'a mut Connection, + topic_name: &'a String, + group_id: &'a String, + member_id: &'a String, + generation_id: i32, + versions: &'a KfApiVersions, +) -> Result { + let mut request = KfSyncGroupRequest::default(); + let version = kf_lookup_version(AllKfApiKey::SyncGroup, versions); + + // assignment + let mut assignment = Assignment::default(); + assignment.topics = vec![topic_name.clone()]; + assignment.reserved_i32 = 1; + + // group assignment + let mut group_assignment = GroupAssignment::default(); + group_assignment.content = Some(assignment); + + // sync group assignment reqeust + let sync_group_assignment = SyncGroupRequestAssignment { + member_id: member_id.clone(), + assignment: group_assignment, + }; + + // sync group request + request.group_id = group_id.clone(); + request.generation_id = generation_id; + request.member_id = member_id.clone(); + request.assignments = vec![sync_group_assignment]; + + trace!("sync-group req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("sunc-group res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} diff --git a/cli/src/consume/logs_output.rs b/cli/src/consume/logs_output.rs new file mode 100644 index 0000000000..d4baf85001 --- /dev/null +++ b/cli/src/consume/logs_output.rs @@ -0,0 +1,266 @@ +//! +//! # Fluvio Fetch Logs +//! +//! Connects to server and fetches logs +//! + +use std::io::{self, Write}; + +use serde_json; +use serde_json::Value; +use types::print_cli_err; + +use kf_protocol::api::DefaultRecords; +use kf_protocol::message::fetch::DefaultKfFetchResponse; +use kf_protocol::message::fetch::FetchablePartitionResponse; + +use crate::error::CliError; +use crate::common::ConsumeOutputType; +use crate::common::{bytes_to_hex_dump, hex_dump_separator}; + +/// Consume log configuration parameters +#[derive(Debug, Clone)] +pub struct ReponseLogParams { + pub output: ConsumeOutputType, + pub suppress: bool, +} + +/// Process fetch topic response based on output type +pub fn process_fetch_topic_reponse( + response: &DefaultKfFetchResponse, + params: &ReponseLogParams, +) -> Result<(), CliError> { + // validate topic + for topic_res in &response.topics { + // ensure response topic has partitions + if topic_res.partitions.len() == 0 { + let err = format!("topic '{}' has no partitions", topic_res.name); + print_cli_err!(err); + continue; + } + + // parse records based on output type + let partitions_res = &topic_res.partitions; + match params.output { + ConsumeOutputType::json => { + let records = + generate_json_records(&topic_res.name, partitions_res, params.suppress); + print_json_records(&records); + } + ConsumeOutputType::text => { + print_text_records(&topic_res.name, partitions_res, params.suppress); + } + ConsumeOutputType::binary => { + print_binary_records(&topic_res.name, partitions_res); + } + ConsumeOutputType::dynamic => { + print_dynamic_records(&topic_res.name, partitions_res); + } + ConsumeOutputType::raw => { + print_raw_records(&topic_res.name, partitions_res); + } + } + } + + Ok(()) +} + +// ----------------------------------- +// JSON +// ----------------------------------- + +/// parse message and generate log records +pub fn generate_json_records<'a>( + topic_name: &String, + response_partitions: &Vec>, + suppress: bool, +) -> Vec { + let mut json_records: Vec = vec![]; + + for r_partition in response_partitions { + if let Some(err) = error_in_header(topic_name, r_partition) { + print_cli_err!(err); + continue; + } + + let mut new_records = partition_to_json_records(&r_partition, suppress); + json_records.append(&mut new_records); + } + + json_records +} + +/// Traverse all partition batches and parse records to json format +pub fn partition_to_json_records( + partition: &FetchablePartitionResponse, + suppress: bool, +) -> Vec { + let mut json_records: Vec = vec![]; + + // convert all batches to json records + for batch in &partition.records.batches { + for record in &batch.records { + if let Some(batch_record) = record.get_value().inner_value_ref() { + match serde_json::from_slice(&batch_record) { + Ok(value) => json_records.push(value), + Err(_) => { + if !suppress { + json_records.push(serde_json::json!({ + "error": record.get_value().describe() + })); + } + } + } + } + } + } + + json_records +} + +/// Print json records to screen +fn print_json_records(records: &Vec) { + println!("{},", serde_json::to_string_pretty(&records).unwrap()); +} + +// ----------------------------------- +// Text +// ----------------------------------- + +/// Print records in text format +pub fn print_text_records( + topic_name: &String, + response_partitions: &Vec>, + suppress: bool, +) { + for r_partition in response_partitions { + if let Some(err) = error_in_header(topic_name, r_partition) { + print_cli_err!(err); + continue; + } + + for batch in &r_partition.records.batches { + for record in &batch.records { + if record.get_value().inner_value_ref().is_some() { + if record.get_value().is_binary() { + if !suppress { + println!("{}", record.get_value().describe()); + } + } else { + println!("{}", record.get_value()); + } + } + } + } + } +} + +// ----------------------------------- +// Binary +// ----------------------------------- + +/// parse message and generate partition records +pub fn print_binary_records( + topic_name: &String, + response_partitions: &Vec>, +) { + let mut printed = false; + for r_partition in response_partitions { + if let Some(err) = error_in_header(topic_name, r_partition) { + println!("{}", hex_dump_separator()); + print_cli_err!(err); + printed = true; + continue; + } + + for batch in &r_partition.records.batches { + for record in &batch.records { + if let Some(batch_record) = record.get_value().inner_value_ref() { + println!("{}", hex_dump_separator()); + println!("{}", bytes_to_hex_dump(&batch_record)); + printed = true; + } + } + } + } + if printed { + println!("{}", hex_dump_separator()); + } +} + +// ----------------------------------- +// Dynamic +// ----------------------------------- + +/// Print records based on their type +pub fn print_dynamic_records( + topic_name: &String, + response_partitions: &Vec>, +) { + for r_partition in response_partitions { + if let Some(err) = error_in_header(topic_name, r_partition) { + print_cli_err!(err); + continue; + } + + for batch in &r_partition.records.batches { + for record in &batch.records { + if let Some(batch_record) = record.get_value().inner_value_ref() { + if record.get_value().is_binary() { + println!("{}", hex_dump_separator()); + println!("{}", bytes_to_hex_dump(&batch_record)); + println!("{}", hex_dump_separator()); + } else { + println!("{}", record.get_value()); + } + } + } + } + } +} + +// ----------------------------------- +// Raw +// ----------------------------------- + +/// Print records in raw format +pub fn print_raw_records( + topic_name: &String, + response_partitions: &Vec>, +) { + for r_partition in response_partitions { + if let Some(err) = error_in_header(topic_name, r_partition) { + print_cli_err!(err); + continue; + } + + for batch in &r_partition.records.batches { + for record in &batch.records { + if let Some(value) = record.get_value().inner_value_ref() { + let _ = io::stdout().write(value); + } + } + } + } +} + +// ----------------------------------- +// Utilities +// ----------------------------------- + +/// If header has error, format and return +pub fn error_in_header( + topic_name: &String, + r_partition: &FetchablePartitionResponse, +) -> Option { + if r_partition.error_code.is_error() { + Some(format!( + "topic '{}/{}': {}", + topic_name, + r_partition.partition_index, + r_partition.error_code.to_sentence() + )) + } else { + None + } +} diff --git a/cli/src/consume/mod.rs b/cli/src/consume/mod.rs new file mode 100644 index 0000000000..57454fc69f --- /dev/null +++ b/cli/src/consume/mod.rs @@ -0,0 +1,18 @@ +mod cli; +mod flv; +mod kf; +mod logs_output; + +pub use cli::ConsumeLogOpt; +pub use cli::ConsumeLogConfig; +pub use cli::process_consume_log; + +pub use kf::kf_consume_log_from_topic; +pub use kf::kf_consume_log_from_topic_partition; + +pub use flv::sc_consume_log_from_topic; +pub use flv::sc_consume_log_from_topic_partition; +pub use flv::spu_consume_log_from_topic_partition; + +pub use logs_output::ReponseLogParams; +pub use logs_output::process_fetch_topic_reponse; diff --git a/cli/src/error/error.rs b/cli/src/error/error.rs new file mode 100644 index 0000000000..293174e6dd --- /dev/null +++ b/cli/src/error/error.rs @@ -0,0 +1,31 @@ +use std::fmt; + +use kf_socket::KfSocketError; +use std::io::Error as IoError; + +#[derive(Debug)] +pub enum CliError { + IoError(IoError), + KfSocketError(KfSocketError), +} + +impl From for CliError { + fn from(error: IoError) -> Self { + CliError::IoError(error) + } +} + +impl From for CliError { + fn from(error: KfSocketError) -> Self { + CliError::KfSocketError(error) + } +} + +impl fmt::Display for CliError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + CliError::IoError(err) => write!(f, "{}", err), + CliError::KfSocketError(err) => write!(f, "{}", err), + } + } +} \ No newline at end of file diff --git a/cli/src/error/mod.rs b/cli/src/error/mod.rs new file mode 100644 index 0000000000..1196572152 --- /dev/null +++ b/cli/src/error/mod.rs @@ -0,0 +1,3 @@ +mod error; + +pub use self::error::CliError; diff --git a/cli/src/lib.rs b/cli/src/lib.rs new file mode 100644 index 0000000000..9224b937be --- /dev/null +++ b/cli/src/lib.rs @@ -0,0 +1,15 @@ +#![recursion_limit = "128"] + +mod common; +mod error; +mod consume; +mod produce; +mod profile; +mod root_cli; +mod spu; +mod topic; +mod advanced; +//mod auth_token; + +pub use self::error::CliError; +pub use self::root_cli::run_cli; diff --git a/cli/src/produce/cli.rs b/cli/src/produce/cli.rs new file mode 100644 index 0000000000..645e884dc6 --- /dev/null +++ b/cli/src/produce/cli.rs @@ -0,0 +1,198 @@ +//! +//! # Produce CLI +//! +//! CLI command for Produce operation +//! + +use std::io::prelude::*; +use std::io::BufReader; +use std::io::BufRead; +use std::fs::File; +use std::path::PathBuf; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::process_sc_produce_record; +use super::helpers::process_spu_produce_record; +use super::helpers::process_kf_produce_record; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ProduceLogOpt { + /// Topic name + #[structopt(short = "t", long = "topic", value_name = "string")] + pub topic: String, + + /// Partition id + #[structopt(short = "p", long = "partition", value_name = "integer")] + pub partition: i32, + + /// Send messages in an infinite loop + #[structopt(short = "C", long = "continuous")] + pub continuous: bool, + + /// Each line is a Record + #[structopt( + short = "l", + long = "record-per-line", + value_name = "filename", + parse(from_os_str) + )] + record_per_line: Option, + + /// Entire file is a Record (multiple) + #[structopt( + short = "r", + long = "record-file", + value_name = "filename", + parse(from_os_str), + conflicts_with = "record_per_line" + )] + record_file: Vec, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + pub sc: Option, + + ///Address of Streaming Processing Unit + #[structopt( + short = "u", + long = "spu", + value_name = "host:port", + conflicts_with = "sc" + )] + pub spu: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc", + conflicts_with = "spu" + )] + pub kf: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +/// Produce log configuration parameters +#[derive(Debug)] +pub struct ProduceLogConfig { + pub topic: String, + pub partition: i32, + + pub records_form_file: Option, +} + +#[derive(Debug)] +pub enum FileRecord { + Lines(PathBuf), + Files(Vec), +} + +pub type RecordTouples = Vec<(String, Vec)>; + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process produce record cli request +pub fn process_produce_record(opt: ProduceLogOpt) -> Result<(), CliError> { + let (target_server, produce_log_cfg, continous) = parse_opt(opt)?; + let file_records = file_to_records(&produce_log_cfg.records_form_file)?; + let topic = produce_log_cfg.topic.clone(); + let partition = produce_log_cfg.partition; + + match target_server { + TargetServer::Kf(server_addr) => { + process_kf_produce_record(server_addr, topic, partition, file_records,continous) + } + TargetServer::Spu(server_addr) => { + process_spu_produce_record(server_addr, topic, partition, file_records,continous) + } + TargetServer::Sc(server_addr) => { + process_sc_produce_record(server_addr, topic, partition, file_records, continous) + } + } +} + +/// Validate cli options. Generate target-server and produce log configuration. +fn parse_opt(opt: ProduceLogOpt) -> Result<(TargetServer, ProduceLogConfig,bool), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new_with_spu(&opt.sc, &opt.spu, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // generate file record + let records_from_file = if let Some(record_per_line) = opt.record_per_line { + Some(FileRecord::Lines(record_per_line.clone())) + } else if opt.record_file.len() > 0 { + Some(FileRecord::Files(opt.record_file.clone())) + } else { + None + }; + + // produce log specific configurations + let produce_log_cfg = ProduceLogConfig { + topic: opt.topic, + partition: opt.partition, + records_form_file: records_from_file, + }; + + // return server separately from config + Ok((target_server, produce_log_cfg,opt.continuous)) +} + +/// Retrieve one or more files and converts them into a list of (name, record) touples +pub fn file_to_records( + file_record_options: &Option, +) -> Result { + let mut records: RecordTouples = vec![]; + + match file_record_options { + Some(file_record) => { + match file_record { + // lines as records + FileRecord::Lines(lines2rec_path) => { + let f = File::open(lines2rec_path)?; + let file = BufReader::new(&f); + + // reach each line and conver to byte array + for line in file.lines() { + if let Ok(text) = line { + records.push((text.clone(), text.as_bytes().to_vec())); + } + } + } + + // files as records + FileRecord::Files(files_to_rec_path) => { + for file_path in files_to_rec_path { + let file_name = file_path.to_str().unwrap_or("?"); + let mut f = File::open(file_path)?; + let mut buffer = Vec::new(); + + // read the whole file in a byte array + f.read_to_end(&mut buffer)?; + records.push((file_name.to_owned(), buffer)); + } + } + } + } + None => {} + } + + Ok(records) +} diff --git a/cli/src/produce/helpers/mod.rs b/cli/src/produce/helpers/mod.rs new file mode 100644 index 0000000000..68e1b069ab --- /dev/null +++ b/cli/src/produce/helpers/mod.rs @@ -0,0 +1,10 @@ +mod proc_kf; +mod proc_sc; +mod proc_spu; +mod send_record; + +pub use proc_sc::process_sc_produce_record; +pub use proc_spu::process_spu_produce_record; +pub use proc_kf::process_kf_produce_record; + +pub use send_record::send_log_record_to_server; diff --git a/cli/src/produce/helpers/proc_kf.rs b/cli/src/produce/helpers/proc_kf.rs new file mode 100644 index 0000000000..a225046e91 --- /dev/null +++ b/cli/src/produce/helpers/proc_kf.rs @@ -0,0 +1,150 @@ +//! +//! # Kafka Produce Log +//! +//! Connects to Kafka server, identify Brooker and sends the log. +//! + +use std::io; +use std::io::prelude::*; +use std::net::SocketAddr; + +use types::{print_cli_err, print_cli_ok}; +use future_helper::run_block_on; + +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::{kf_lookup_version, kf_get_api_versions}; +use crate::common::find_broker_leader_for_topic_partition; + +use crate::produce::cli::RecordTouples; + +use super::send_log_record_to_server; + +// ----------------------------------- +// Kafka - Process Request +// ----------------------------------- + +/// Dispatch records based on the content of the record_touples variable +pub fn process_kf_produce_record( + server_addr: SocketAddr, + topic: String, + partition: i32, + record_touples: RecordTouples, + continous: bool +) -> Result<(), CliError> { + // look-up lookup spu for topic/partition leader + let broker_addr = run_block_on(find_leader_broker(server_addr, topic.clone(), partition))?; + + // get versions + let versions = run_block_on(get_server_versions(server_addr))?; + + // send records to Broker + send_records_to_broker(broker_addr, topic, partition, record_touples, versions,continous) +} + +// Connect to server, get versions and find broker +async fn find_leader_broker( + kf_ctrl_addr: SocketAddr, + topic: String, + partition: i32, +) -> Result { + let mut conn = Connection::new(&kf_ctrl_addr).await?; + let versions = kf_get_api_versions(&mut conn).await?; + + // find broker + find_broker_leader_for_topic_partition(&mut conn, topic, partition, &versions).await +} + +// Connect to server, get versions +async fn get_server_versions(socket_addr: SocketAddr) -> Result { + let mut conn = Connection::new(&socket_addr).await?; + kf_get_api_versions(&mut conn).await +} + +/// Dispatch records based on the content of the record_touples variable +fn send_records_to_broker( + broker_addr: SocketAddr, + topic: String, + partition: i32, + record_touples: RecordTouples, + versions: KfApiVersions, + continous: bool +) -> Result<(), CliError> { + let version = kf_lookup_version(AllKfApiKey::Produce, &versions); + + // in both cases, exit loop on error + if record_touples.len() > 0 { + // records from files + for r_touple in record_touples { + println!("{}", r_touple.0); + process_record(broker_addr, topic.clone(), partition, r_touple.1, version); + } + } else { + let stdin = io::stdin(); + for line in stdin.lock().lines() { + let text = line?; + let record = text.as_bytes().to_vec(); + process_record(broker_addr, topic.clone(), partition, record, version); + if !continous { + return Ok(()) + } + } + } + + Ok(()) +} + +/// Process record and print success or error +fn process_record( + broker_addr: SocketAddr, + topic: String, + partition: i32, + record: Vec, + version: Option, +) { + match run_block_on(send_log_record_to_server( + broker_addr, + topic, + partition, + record, + version, + )) { + Ok(()) => print_cli_ok!(), + Err(err) => print_cli_err!(format!("{}", err)), + } +} + +/* +#[cfg(test)] +mod tests { + + extern crate test; + use std::net::SocketAddr; + use std::io::Error as IoError; + use std::io::ErrorKind; + + use test::Bencher; + use super::process_record; + fn create_sample_record() -> Vec { + let mut record: Vec = vec![]; + for _ in 0..10000 { + record.push(0x10); + } + record + + } + + + #[bench] + fn bench_kf_send_record(b: &mut Bencher) { + let server_addr = "127.0.0.1:9092".to_owned(); + let socket_addr = server_addr + .parse::() + .map_err(|err| IoError::new(ErrorKind::InvalidInput, format!("{}", err))).expect("kf server"); + b.iter(move || process_record(socket_addr,"test".to_owned(),0,create_sample_record())); + } +} +*/ diff --git a/cli/src/produce/helpers/proc_sc.rs b/cli/src/produce/helpers/proc_sc.rs new file mode 100644 index 0000000000..00182e1958 --- /dev/null +++ b/cli/src/produce/helpers/proc_sc.rs @@ -0,0 +1,56 @@ +//! +//! # Fluvio SC Produce Log +//! +//! Looks-up the SPU responsible for the topic, connects to the server +//! and sends the log. +//! + +use std::net::SocketAddr; + +use log::debug; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::find_spu_leader_for_topic_partition; +use crate::common::sc_get_api_versions; + +use crate::produce::cli::RecordTouples; + +use super::process_spu_produce_record; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +/// Dispatch records based on the content of the record_touples variable +pub fn process_sc_produce_record( + server_addr: SocketAddr, + topic: String, + partition: i32, + record_touples: RecordTouples, + continous: bool +) -> Result<(), CliError> { + // look-up lookup spu for topic/partition leader + let spu_addr = run_block_on(spu_leader_for_topic_partition( + server_addr, + topic.clone(), + partition, + ))?; + + // send records to SPU + process_spu_produce_record(spu_addr, topic, partition, record_touples,continous) +} + +// Connect to SC Controller, find spu, and send log +async fn spu_leader_for_topic_partition( + sc_addr: SocketAddr, + topic: String, + partition: i32, +) -> Result { + let mut conn = Connection::new(&sc_addr).await?; + let sc_vers = sc_get_api_versions(&mut conn).await?; + + debug!("got sc version: {:#?}", sc_vers); + find_spu_leader_for_topic_partition(&mut conn, topic.clone(), partition, &sc_vers).await +} diff --git a/cli/src/produce/helpers/proc_spu.rs b/cli/src/produce/helpers/proc_spu.rs new file mode 100644 index 0000000000..1435a8a1d6 --- /dev/null +++ b/cli/src/produce/helpers/proc_spu.rs @@ -0,0 +1,95 @@ +//! +//! # Fluvio SPU Produce Log +//! +//! Connects to SPU server and sends the log. +//! + +use std::io; +use std::io::prelude::*; +use std::net::SocketAddr; + +use types::{print_cli_err, print_cli_ok}; +use future_helper::run_block_on; + +use crate::error::CliError; + +use crate::produce::cli::RecordTouples; + +use super::send_log_record_to_server; + +// ----------------------------------- +// Fluvio SPU - Process Request +// ----------------------------------- + +/// Dispatch records based on the content of the record_touples variable +pub fn process_spu_produce_record( + spu_addr: SocketAddr, + topic: String, + partition: i32, + record_touples: RecordTouples, + continous: bool, +) -> Result<(), CliError> { + // in both cases, exit loop on error + if record_touples.len() > 0 { + // records from files + for r_touple in record_touples { + println!("{}", r_touple.0); + process_record(spu_addr, topic.clone(), partition, r_touple.1); + } + } else { + let stdin = io::stdin(); + for line in stdin.lock().lines() { + let text = line?; + let record = text.as_bytes().to_vec(); + process_record(spu_addr, topic.clone(), partition, record); + if !continous { + return Ok(()) + } + } + } + + Ok(()) +} + +/// Process record and print success or error +/// TODO: Add version handling for SPU +fn process_record(spu_addr: SocketAddr, topic: String, partition: i32, record: Vec) { + match run_block_on(send_log_record_to_server( + spu_addr, topic, partition, record, None, + )) { + Ok(()) => print_cli_ok!(), + Err(err) => print_cli_err!(format!("{}", err)), + } +} + +/* +#[cfg(test)] +mod tests { + + extern crate test; + use std::net::SocketAddr; + use std::io::Error as IoError; + use std::io::ErrorKind; + + use test::Bencher; + use super::process_record; + fn create_sample_record() -> Vec { + let mut record: Vec = vec![]; + for _ in 0..10000 { + record.push(0x10); + } + record + + } + + + #[bench] + fn bench_spu_send_record(b: &mut Bencher) { + let server_addr = "127.0.0.1:9004".to_owned(); + let socket_addr = server_addr + .parse::() + .map_err(|err| IoError::new(ErrorKind::InvalidInput, format!("{}", err))).expect("kf server"); + b.iter(move || process_record(socket_addr,"test".to_owned(),0,create_sample_record())); + } +} +*/ diff --git a/cli/src/produce/helpers/send_record.rs b/cli/src/produce/helpers/send_record.rs new file mode 100644 index 0000000000..cbeeb41e96 --- /dev/null +++ b/cli/src/produce/helpers/send_record.rs @@ -0,0 +1,75 @@ +//! +//! # Fluvio Produce Log +//! +//! Takes user input and sends to the SPU, SC, or KF servers. +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::{debug, trace}; + +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::message::produce::DefaultKfPartitionRequest; +use kf_protocol::message::produce::DefaultKfTopicRequest; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::DefaultRecord; + +use crate::error::CliError; +use crate::common::Connection; + +/// Sends record to a target server (Kf, SPU, or SC) +pub async fn send_log_record_to_server( + addr: SocketAddr, + topic: String, + partition: i32, + record: Vec, + version: Option, +) -> Result<(), CliError> { + let mut conn = Connection::new(&addr).await?; + let topic_name = topic.clone(); + + // build produce log request message + let mut request = DefaultKfProduceRequest::default(); + let mut topic_request = DefaultKfTopicRequest::default(); + let mut partition_request = DefaultKfPartitionRequest::default(); + + debug!("send record {} bytes to: {}", record.len(), addr); + + let record_msg: DefaultRecord = record.into(); + let mut batch = DefaultBatch::default(); + batch.records.push(record_msg); + + partition_request.partition_index = partition; + partition_request.records.batches.push(batch); + topic_request.name = topic; + topic_request.partitions.push(partition_request); + + request.acks = 1; + request.timeout_ms = 1500; + request.topics.push(topic_request); + + trace!("produce request: {:#?}", request); + + let response = conn.send_request(request, version).await?; + + trace!("received response: {:?}", response); + + // process response + match response.find_partition_response(&topic_name, partition) { + Some(partition_response) => { + if partition_response.error_code.is_error() { + return Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("{}", partition_response.error_code.to_sentence()), + ))); + } + Ok(()) + } + None => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "unknown error", + ))), + } +} diff --git a/cli/src/produce/mod.rs b/cli/src/produce/mod.rs new file mode 100644 index 0000000000..fc2a08fd55 --- /dev/null +++ b/cli/src/produce/mod.rs @@ -0,0 +1,5 @@ +mod cli; +mod helpers; + +pub use cli::ProduceLogOpt; +pub use cli::process_produce_record; diff --git a/cli/src/profile/config.rs b/cli/src/profile/config.rs new file mode 100644 index 0000000000..b01355a09c --- /dev/null +++ b/cli/src/profile/config.rs @@ -0,0 +1,166 @@ +//! +//! # Profile Configurations +//! +//! Stores configuration parameter retrieved from the default or custom profile file. +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; +use std::path::Path; + +use types::socket_helpers::string_to_socket_addr; + +use crate::CliError; + +use super::profile_file::build_cli_profile_file_path; +use super::profile_file::ProfileFile; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +/// Profile parameters +#[derive(Default, Debug, PartialEq)] +pub struct ProfileConfig { + pub sc_addr: Option, + pub spu_addr: Option, + pub kf_addr: Option, +} + +/// Target Server +#[derive(Debug)] +pub enum TargetServer { + Sc(SocketAddr), + Spu(SocketAddr), + Kf(SocketAddr), +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl ProfileConfig { + /// generate profile configuration based on a default or custom profile file + pub fn new( + sc_host_port: &Option, + kf_host_port: &Option, + profile_name: &Option, + ) -> Result { + ProfileConfig::new_with_spu(sc_host_port, &None, kf_host_port, profile_name) + } + + /// generate profile configuration with spu based on a default or custom profile file + pub fn new_with_spu( + sc_host_port: &Option, + spu_host_port: &Option, + kf_host_port: &Option, + profile_name: &Option, + ) -> Result { + // build profile config from cli parameters + let cli_config = + ProfileConfig::config_from_cli_params(&sc_host_port, &spu_host_port, &kf_host_port)?; + + // if server is configured from cli, do not load profile (as it impacts precedence) + let profile_config = if cli_config.valid_servers_or_error().is_ok() { + cli_config + } else { + // build profile config from profile file + let mut file_config = match profile_name { + Some(profile) => ProfileConfig::config_from_custom_profile(profile)?, + None => ProfileConfig::config_from_default_profile()?, + }; + + // merge the profiles (cli takes precedence) + file_config.merge_with(&cli_config); + file_config + }; + + Ok(profile_config) + } + + /// retrieve target server + pub fn target_server(&self) -> Result { + if let Some(sc_server) = self.sc_addr { + Ok(TargetServer::Sc(sc_server.clone())) + } else if let Some(spu_server) = self.spu_addr { + Ok(TargetServer::Spu(spu_server.clone())) + } else if let Some(kf_server) = self.kf_addr { + Ok(TargetServer::Kf(kf_server.clone())) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "target server configuration missing", + ))) + } + } + + /// ensure there is at least one server. + pub fn valid_servers_or_error(&self) -> Result<(), CliError> { + if self.sc_addr.is_some() || self.spu_addr.is_some() || self.kf_addr.is_some() { + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "no sc address or spu address is provided", + ))) + } + } + + /// merge local profile with the other profile + /// - values are augmented but not cleared by other + fn merge_with(&mut self, other: &ProfileConfig) { + if other.sc_addr.is_some() { + self.sc_addr = other.sc_addr; + } + if other.spu_addr.is_some() { + self.spu_addr = other.spu_addr; + } + if other.kf_addr.is_some() { + self.kf_addr = other.kf_addr; + } + } + + /// read profile config from a user-defined (custom) profile + fn config_from_custom_profile(profile: &String) -> Result { + let custom_profile_path = build_cli_profile_file_path(Some(&profile))?; + (ProfileFile::from_file(custom_profile_path)?).to_config() + } + + /// read profile config from the default profile + fn config_from_default_profile() -> Result { + let default_path = build_cli_profile_file_path(None)?; + if Path::new(&default_path).exists() { + (ProfileFile::from_file(default_path)?).to_config() + } else { + Ok(ProfileConfig::default()) + } + } + + /// generate fluvio configuration from cli parameters + fn config_from_cli_params( + sc_host_port: &Option, + spu_host_port: &Option, + kf_host_port: &Option, + ) -> Result { + let mut profile_config = ProfileConfig::default(); + + if let Some(host_port) = sc_host_port { + profile_config.sc_addr = Some(ProfileConfig::host_port_to_socket_addr(&host_port)?); + } + + if let Some(host_port) = spu_host_port { + profile_config.spu_addr = Some(ProfileConfig::host_port_to_socket_addr(&host_port)?); + } + + if let Some(host_port) = kf_host_port { + profile_config.kf_addr = Some(ProfileConfig::host_port_to_socket_addr(&host_port)?); + } + + Ok(profile_config) + } + + /// parse host/prot to socket address + pub fn host_port_to_socket_addr(host_port: &String) -> Result { + string_to_socket_addr(host_port) + } +} diff --git a/cli/src/profile/mod.rs b/cli/src/profile/mod.rs new file mode 100644 index 0000000000..01f9442d95 --- /dev/null +++ b/cli/src/profile/mod.rs @@ -0,0 +1,8 @@ +mod config; +mod profile_file; + +pub use self::profile_file::build_cli_profile_file_path; + +pub use self::config::ProfileConfig; +pub use self::config::TargetServer; + diff --git a/cli/src/profile/profile_file.rs b/cli/src/profile/profile_file.rs new file mode 100644 index 0000000000..faff2c4449 --- /dev/null +++ b/cli/src/profile/profile_file.rs @@ -0,0 +1,259 @@ +//! +//! # Profiles File Data Structure +//! +//! Profile file retrieves configurations from profile file into memory. +//! +use std::env; +use std::fs::read_to_string; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; + +use dirs::home_dir; +use serde::Deserialize; + +use types::defaults::{CLI_CONFIG_PATH, CLI_DEFAULT_PROFILE, CLI_PROFILES_DIR}; +use types::defaults::{CONFIG_FILE_EXTENTION, FLV_FLUVIO_HOME}; + +use super::config::ProfileConfig; + +#[derive(Debug, PartialEq, Deserialize)] +pub struct ProfileFile { + pub version: String, + sc: Option, + spu: Option, + kf: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct TargetScGroup { + pub host: String, + pub port: u16, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct TargetSpuGroup { + pub host: String, + pub port: u16, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct TargetKfGroup { + pub host: String, + pub port: u16, +} + +// --------------------------------------- +// Implementation +// --------------------------------------- + +impl ProfileFile { + // read and parse the .toml file + pub fn from_file>(path: T) -> Result { + let file_str: String = read_to_string(path)?; + toml::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err))) + } + + // converts profile file into a profile configuation + pub fn to_config(&self) -> Result { + let sc_addr = if let Some(ref sc) = &self.sc { + Some( + format!("{}:{}", sc.host, sc.port) + .parse::() + .map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid streaming controller {}", err), + ) + })?, + ) + } else { + None + }; + + let spu_addr = if let Some(ref spu) = &self.spu { + Some( + format!("{}:{}", spu.host, spu.port) + .parse::() + .map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid target_spu {}", err), + ) + })?, + ) + } else { + None + }; + + let kf_addr = if let Some(ref kf) = &self.kf { + Some( + format!("{}:{}", kf.host, kf.port) + .parse::() + .map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid target_kf {}", err), + ) + })?, + ) + } else { + None + }; + + Ok(ProfileConfig { + sc_addr, + spu_addr, + kf_addr, + }) + } +} + +/// Look-up profile file for Fluvio CLI based on profile name +/// +/// * Step1: +/// set base path: +/// 1) use $FLUVIO_HOME environment variable if set +/// 2) else ~/.fluvio +/// +/// * Step 2: +/// get configuration based on profile (default for none): +/// default => /profiles/default.toml +/// profile1 => /profiles/profile1.toml +/// +pub fn build_cli_profile_file_path(profile_name: Option<&String>) -> Result { + // set base path + let base_path = match env::var(FLV_FLUVIO_HOME) { + Ok(val) => { + // FLUVIO_HOME env variable is set + let mut user_dir = PathBuf::new(); + user_dir.push(val); + user_dir + } + Err(_) => { + // use HOME directory + if let Some(mut home_dir) = home_dir() { + home_dir.push(CLI_CONFIG_PATH); + home_dir + } else { + return Err(IoError::new( + ErrorKind::InvalidInput, + "can't get home directory", + )); + } + } + }; + + // augment profiles path + let mut file_path = base_path.join(CLI_PROFILES_DIR); + + // augment profile name + if profile_name.is_some() { + file_path.push(profile_name.unwrap()); + } else { + file_path.push(CLI_DEFAULT_PROFILE); + } + + // augment extension + file_path.set_extension(CONFIG_FILE_EXTENTION); + + Ok(file_path) +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + use std::path::PathBuf; + + #[test] + fn test_default_profile_ok() { + let mut profile_path = PathBuf::new(); + profile_path.push("./test-data/profiles/default.toml"); + + // test read & parse + let result = ProfileFile::from_file(profile_path); + assert!(result.is_ok()); + + // compare with expected result + let expected = ProfileFile { + version: "1.0".to_owned(), + sc: Some(TargetScGroup { + host: "127.0.0.1".to_owned(), + port: 9033, + }), + spu: Some(TargetSpuGroup { + host: "127.0.0.1".to_owned(), + port: 9034, + }), + kf: Some(TargetKfGroup { + host: "127.0.0.1".to_owned(), + port: 9093, + }), + }; + + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_default_profile_not_found() { + let mut profile_path = PathBuf::new(); + profile_path.push("./test-data/profiles/notfound.toml"); + + // run test + let result = ProfileFile::from_file(profile_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "No such file or directory (os error 2)" + ); + } + + #[test] + fn test_invalid_profile_file() { + let mut profile_path = PathBuf::new(); + profile_path.push("./test-data/profiles/invalid.toml"); + + // run test + let result = ProfileFile::from_file(profile_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "missing field `port` for key `sc`" + ); + } + + #[test] + fn test_build_default_profile_file_path() { + let file_path = build_cli_profile_file_path(None); + assert_eq!(file_path.is_ok(), true); + + let mut expected_file_path = PathBuf::new(); + expected_file_path.push(home_dir().unwrap()); + expected_file_path.push(".fluvio/profiles/default.toml"); + + assert_eq!(file_path.unwrap(), expected_file_path); + } + + #[test] + fn test_build_custom_cli_profile_file_path() { + let file_path = build_cli_profile_file_path(Some(&"custom".to_owned())); + assert_eq!(file_path.is_ok(), true); + + let mut expected_file_path = PathBuf::new(); + expected_file_path.push(home_dir().unwrap()); + expected_file_path.push(".fluvio/profiles/custom.toml"); + + assert_eq!(file_path.unwrap(), expected_file_path); + } + +} diff --git a/cli/src/root_cli.rs b/cli/src/root_cli.rs new file mode 100644 index 0000000000..db489f78ed --- /dev/null +++ b/cli/src/root_cli.rs @@ -0,0 +1,120 @@ +//! +//! # Root CLI +//! +//! CLI configurations at the top of the tree +//! +use structopt::clap::AppSettings; +use structopt::StructOpt; + +use super::consume::process_consume_log; +use super::produce::process_produce_record; +use super::topic::process_topic; +use super::advanced::process_advanced; +use super::spu::all::process_spu; +use super::spu::custom::process_custom_spu; +use super::spu::group::process_spu_group; + +use super::consume::ConsumeLogOpt; +use super::produce::ProduceLogOpt; +use super::topic::TopicOpt; +use super::advanced::AdvancedOpt; +use super::spu::all::SpuOpt; +use super::spu::custom::CustomSpuOpt; +use super::spu::group::SpuGroupOpt; + +use super::CliError; + +#[derive(Debug, StructOpt)] +#[structopt( + about = "Fluvio Command Line Interface", + author = "", + name = "fluvio", + template = "{about} + +{usage} + +{all-args} +", + raw( + global_settings = "&[AppSettings::VersionlessSubcommands, AppSettings::DeriveDisplayOrder]" + ) +)] +enum Root { + #[structopt( + name = "consume", + author = "", + template = "{about} + +{usage} + +{all-args} +", + about = "Read messages from a topic/partition" + )] + Consume(ConsumeLogOpt), + + #[structopt( + name = "produce", + author = "", + template = "{about} + +{usage} + +{all-args} +", + about = "Write messages to a topic/partition" + )] + Produce(ProduceLogOpt), + + #[structopt(name = "spu", author = "", template = "{about} + +{usage} + +{all-args} +", about = "SPU Operations")] + SPU(SpuOpt), + + #[structopt(name = "spu-group", author = "", template = "{about} + +{usage} + +{all-args} +", about = "SPU Group Operations")] + SPUGroup(SpuGroupOpt), + + #[structopt(name = "custom-spu", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Custom SPU Operations")] + CustomSPU(CustomSpuOpt), + + #[structopt(name = "topic", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Topic operations")] + Topic(TopicOpt), + + #[structopt(name = "advanced", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Advanced operations")] + Advanced(AdvancedOpt), +} + +pub fn run_cli() -> Result<(), CliError> { + match Root::from_args() { + Root::Consume(consume) => process_consume_log(consume), + Root::Produce(produce) => process_produce_record(produce), + Root::SPU(spu) => process_spu(spu), + Root::SPUGroup(spu_group) => process_spu_group(spu_group), + Root::CustomSPU(custom_spu) => process_custom_spu(custom_spu), + Root::Topic(topic) => process_topic(topic), + Root::Advanced(advanced) => process_advanced(advanced), + } +} diff --git a/cli/src/spu/all/list.rs b/cli/src/spu/all/list.rs new file mode 100644 index 0000000000..e5ad6041bf --- /dev/null +++ b/cli/src/spu/all/list.rs @@ -0,0 +1,94 @@ +//! +//! # List All Spus CLI +//! +//! CLI tree and processing to list SPUs +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use crate::spu::helpers::query_spu_list_metadata; +use crate::spu::helpers::format_spu_response_output; +use crate::spu::helpers::flv_response_to_spu_metadata; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ListSpusOpt { + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct ListSpusConfig { + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process list spus cli request +pub fn process_list_spus(opt: ListSpusOpt) -> Result<(), CliError> { + let (target_server, list_spu_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => fetch_and_list_all_spus(server_addr, &list_spu_cfg), + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "SC is solely responsible for SPUs", + ))), + } +} + +/// Validate cli options and generate config +fn parse_opt(opt: ListSpusOpt) -> Result<(TargetServer, ListSpusConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // transfer config parameters + let list_spu_cfg = ListSpusConfig { + output: opt.output.unwrap_or(OutputType::default()), + }; + + // return server separately from topic result + Ok((target_server, list_spu_cfg)) +} + +// Retrieve all spus and output in desired format +fn fetch_and_list_all_spus( + server_addr: SocketAddr, + list_spu_cfg: &ListSpusConfig, +) -> Result<(), CliError> { + let flv_spus = query_spu_list_metadata(server_addr, false)?; + let sc_spus = flv_response_to_spu_metadata(&flv_spus); + + // format and dump to screen + format_spu_response_output(sc_spus, &list_spu_cfg.output) +} diff --git a/cli/src/spu/all/mod.rs b/cli/src/spu/all/mod.rs new file mode 100644 index 0000000000..682d00b236 --- /dev/null +++ b/cli/src/spu/all/mod.rs @@ -0,0 +1,25 @@ +mod list; + +use list::ListSpusOpt; +use list::process_list_spus; + +use structopt::StructOpt; + +use crate::error::CliError; + +#[derive(Debug, StructOpt)] +pub enum SpuOpt { + #[structopt(name = "list", author = "", template = "{about} + +{usage} + +{all-args} +", about = "List custom & managed SPUs")] + List(ListSpusOpt), +} + +pub(crate) fn process_spu(spu_opt: SpuOpt) -> Result<(), CliError> { + match spu_opt { + SpuOpt::List(spu_opt) => process_list_spus(spu_opt), + } +} diff --git a/cli/src/spu/custom/create.rs b/cli/src/spu/custom/create.rs new file mode 100644 index 0000000000..62d974160c --- /dev/null +++ b/cli/src/spu/custom/create.rs @@ -0,0 +1,101 @@ +//! +//! # Create Custom SPUs +//! +//! CLI tree to generate Create Custom SPUs +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::convert::TryFrom; + +use structopt::StructOpt; +use types::socket_helpers::ServerAddress; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::proc_create::process_sc_create_custom_spu; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct CreateCustomSpuOpt { + /// SPU id + #[structopt(short = "i", long = "id")] + id: i32, + + /// SPU name + #[structopt(short = "n", long = "name", value_name = "string")] + name: Option, + + /// Rack name + #[structopt(short = "r", long = "rack", value_name = "string")] + rack: Option, + + /// Public server::port + #[structopt(short = "p", long = "public-server", value_name = "host:port")] + public_server: String, + + /// Private server::port + #[structopt(short = "v", long = "private-server", value_name = "host:port")] + private_server: String, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct CreateCustomSpuConfig { + pub id: i32, + pub name: String, + pub public_server: ServerAddress, + pub private_server: ServerAddress, + pub rack: Option, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- +pub fn process_create_custom_spu(opt: CreateCustomSpuOpt) -> Result<(), CliError> { + let (target_server, create_custom_spu_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + process_sc_create_custom_spu(server_addr, create_custom_spu_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid sc server {:?}", target_server), + ))), + } +} + +/// Validate cli options. Generate target-server and create custom spu config. +fn parse_opt(opt: CreateCustomSpuOpt) -> Result<(TargetServer, CreateCustomSpuConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // create custom spu config + let cfg = CreateCustomSpuConfig { + id: opt.id, + name: opt.name.unwrap_or(format!("custom-spu-{}", opt.id)), + public_server: TryFrom::try_from(&opt.public_server)?, + private_server: TryFrom::try_from(&opt.private_server)?, + rack: opt.rack.clone(), + }; + + // return server separately from config + Ok((target_server, cfg)) +} diff --git a/cli/src/spu/custom/delete.rs b/cli/src/spu/custom/delete.rs new file mode 100644 index 0000000000..cc292f784c --- /dev/null +++ b/cli/src/spu/custom/delete.rs @@ -0,0 +1,110 @@ +//! +//! # Delete Custom SPUs +//! +//! CLI tree to generate Delete Custom SPUs +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::fmt; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::proc_delete::process_sc_delete_custom_spu; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct DeleteCustomSpuOpt { + /// SPU id + #[structopt(short = "i", long = "id", required_unless = "name")] + id: Option, + + /// SPU name + #[structopt( + short = "n", + long = "name", + value_name = "string", + conflicts_with = "id" + )] + name: Option, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct DeleteCustomSpuConfig { + pub custom_spu: CustomSpu, +} + +#[derive(Debug)] +pub enum CustomSpu { + Name(String), + Id(i32), +} + +impl fmt::Display for CustomSpu { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + CustomSpu::Name(name) => write!(f, "{}", name), + CustomSpu::Id(id) => write!(f, "{}", id), + } + } +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process delete custom-spu cli request +pub fn process_delete_custom_spu(opt: DeleteCustomSpuOpt) -> Result<(), CliError> { + let (target_server, delete_custom_spu_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + process_sc_delete_custom_spu(server_addr, delete_custom_spu_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid sc server {:?}", target_server), + ))), + } +} +/// Validate cli options. Generate target-server and delete custom spu config. +fn parse_opt(opt: DeleteCustomSpuOpt) -> Result<(TargetServer, DeleteCustomSpuConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // custom spu + let custom_spu = if let Some(name) = opt.name { + CustomSpu::Name(name) + } else if let Some(id) = opt.id { + CustomSpu::Id(id) + } else { + return Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "missing custom SPU name or id", + ))); + }; + + // delete custom spu config + let cfg = DeleteCustomSpuConfig { custom_spu }; + + // return server separately from config + Ok((target_server, cfg)) +} diff --git a/cli/src/spu/custom/helpers/mod.rs b/cli/src/spu/custom/helpers/mod.rs new file mode 100644 index 0000000000..0720606975 --- /dev/null +++ b/cli/src/spu/custom/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod proc_create; +pub mod proc_delete; diff --git a/cli/src/spu/custom/helpers/proc_create.rs b/cli/src/spu/custom/helpers/proc_create.rs new file mode 100644 index 0000000000..c79b2ae1db --- /dev/null +++ b/cli/src/spu/custom/helpers/proc_create.rs @@ -0,0 +1,111 @@ +//! +//! # Fluvio SC - Create Processing +//! +//! Sends Create Custom SPU request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::{FlvCreateCustomSpusRequest, FlvCreateCustomSpusResponse}; +use sc_api::spu::{FlvCreateCustomSpuRequest, FlvEndPointMetadata}; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use crate::spu::custom::create::CreateCustomSpuConfig; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Create Custom Spu Request +pub fn process_sc_create_custom_spu( + server_addr: SocketAddr, + custom_spu_cfg: CreateCustomSpuConfig, +) -> Result<(), CliError> { + let id = custom_spu_cfg.id; + let name = custom_spu_cfg.name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, custom_spu_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send create custom-spu '{}({})': {}", name, id, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let custom_spu_resp = &response.results[0]; + let response = handle_sc_response( + &name, + "custom-spu", + "created", + "", + &custom_spu_resp.error_code, + &custom_spu_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "cannot create custom spu '{}){})': communication error", + name, id + ), + ))) + } + } + } +} + +/// Connect to server, get version, and send create request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + custom_spu_cfg: CreateCustomSpuConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&custom_spu_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvCreateCustomSpus, &versions); + + trace!("create custom-spu req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("create custom-spu res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode CreateCustomSpuConfig in Fluvio format +fn encode_request(custom_spu_cfg: &CreateCustomSpuConfig) -> FlvCreateCustomSpusRequest { + let create_custom_spu = FlvCreateCustomSpuRequest { + id: custom_spu_cfg.id, + name: custom_spu_cfg.name.clone(), + public_server: FlvEndPointMetadata { + host: custom_spu_cfg.public_server.host.clone(), + port: custom_spu_cfg.public_server.port, + }, + private_server: FlvEndPointMetadata { + host: custom_spu_cfg.private_server.host.clone(), + port: custom_spu_cfg.private_server.port, + }, + rack: custom_spu_cfg.rack.clone(), + }; + + // generate request with 1 custom spu + FlvCreateCustomSpusRequest { + custom_spus: vec![create_custom_spu], + } +} diff --git a/cli/src/spu/custom/helpers/proc_delete.rs b/cli/src/spu/custom/helpers/proc_delete.rs new file mode 100644 index 0000000000..35a8bc9c82 --- /dev/null +++ b/cli/src/spu/custom/helpers/proc_delete.rs @@ -0,0 +1,102 @@ +//! +//! # Fluvio SC - Delete Processing +//! +//! Sends Delete Custom SPU request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::{FlvDeleteCustomSpusRequest, FlvDeleteCustomSpusResponse}; +use sc_api::spu::FlvCustomSpu; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use crate::spu::custom::delete::CustomSpu; +use crate::spu::custom::delete::DeleteCustomSpuConfig; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Delete Custom Spu Request +pub fn process_sc_delete_custom_spu( + server_addr: SocketAddr, + custom_spu_cfg: DeleteCustomSpuConfig, +) -> Result<(), CliError> { + let custom_spu_str = format!("{}", custom_spu_cfg.custom_spu); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, custom_spu_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send delete custom-spu '{}': {}", custom_spu_str, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let custom_spu_resp = &response.results[0]; + let response = handle_sc_response( + &custom_spu_str, + "custom-spu", + "deleted", + "", + &custom_spu_resp.error_code, + &custom_spu_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "cannot delete custom spu '{}': communication error", + custom_spu_str + ), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + custom_spu_cfg: DeleteCustomSpuConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&custom_spu_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvDeleteCustomSpus, &versions); + + trace!("delete custom-spu req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("delete custom-spu res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode DeleteCustomSpuConfig in Fluvio format +fn encode_request(custom_spu_cfg: &DeleteCustomSpuConfig) -> FlvDeleteCustomSpusRequest { + let flv_custom_spu = match &custom_spu_cfg.custom_spu { + CustomSpu::Name(spu_name) => FlvCustomSpu::Name(spu_name.clone()), + CustomSpu::Id(id) => FlvCustomSpu::Id(*id), + }; + + // generate request with 1 custom spu + FlvDeleteCustomSpusRequest { + custom_spus: vec![flv_custom_spu], + } +} diff --git a/cli/src/spu/custom/list.rs b/cli/src/spu/custom/list.rs new file mode 100644 index 0000000000..8dad172e71 --- /dev/null +++ b/cli/src/spu/custom/list.rs @@ -0,0 +1,96 @@ +//! +//! # List Custom SPUs CLI +//! +//! CLI tree and processing to list Custom SPUs +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use crate::spu::helpers::query_spu_list_metadata; +use crate::spu::helpers::format_spu_response_output; +use crate::spu::helpers::flv_response_to_spu_metadata; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ListCustomSpusOpt { + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct ListCustomSpusConfig { + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process list spus cli request +pub fn process_list_custom_spus(opt: ListCustomSpusOpt) -> Result<(), CliError> { + let (target_server, list_custom_spu_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + fetch_and_list_custom_spus(server_addr, &list_custom_spu_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "Invalid SC server configuration", + ))), + } +} + +/// Validate cli options and generate config +fn parse_opt(opt: ListCustomSpusOpt) -> Result<(TargetServer, ListCustomSpusConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // transfer config parameters + let list_custom_spu_cfg = ListCustomSpusConfig { + output: opt.output.unwrap_or(OutputType::default()), + }; + + // return server separately from topic result + Ok((target_server, list_custom_spu_cfg)) +} + +// Fetch custom SPUs and output in desired format +fn fetch_and_list_custom_spus( + server_addr: SocketAddr, + list_custom_spu_cfg: &ListCustomSpusConfig, +) -> Result<(), CliError> { + let flv_spus = query_spu_list_metadata(server_addr, true)?; + let sc_spus = flv_response_to_spu_metadata(&flv_spus); + + // format and dump to screen + format_spu_response_output(sc_spus, &list_custom_spu_cfg.output) +} diff --git a/cli/src/spu/custom/mod.rs b/cli/src/spu/custom/mod.rs new file mode 100644 index 0000000000..86b3b00679 --- /dev/null +++ b/cli/src/spu/custom/mod.rs @@ -0,0 +1,52 @@ +mod create; +mod list; +mod delete; +mod helpers; + +use structopt::StructOpt; + +use create::CreateCustomSpuOpt; +use create::process_create_custom_spu; + +use delete::DeleteCustomSpuOpt; +use delete::process_delete_custom_spu; + +use list::ListCustomSpusOpt; +use list::process_list_custom_spus; + +use crate::error::CliError; + +#[derive(Debug, StructOpt)] +pub enum CustomSpuOpt { + #[structopt(name = "create", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Create custom SPU")] + Create(CreateCustomSpuOpt), + + #[structopt(name = "delete", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Delete custom SPU")] + Delete(DeleteCustomSpuOpt), + + #[structopt(name = "list", author = "", template = "{about} + +{usage} + +{all-args} +", about = "List custom SPUs")] + List(ListCustomSpusOpt), +} + +pub(crate) fn process_custom_spu(custom_spu_opt: CustomSpuOpt) -> Result<(), CliError> { + match custom_spu_opt { + CustomSpuOpt::Create(custom_spu_opt) => process_create_custom_spu(custom_spu_opt), + CustomSpuOpt::Delete(custom_spu_opt) => process_delete_custom_spu(custom_spu_opt), + CustomSpuOpt::List(custom_spu_opt) => process_list_custom_spus(custom_spu_opt), + } +} diff --git a/cli/src/spu/group/create.rs b/cli/src/spu/group/create.rs new file mode 100644 index 0000000000..8115e83288 --- /dev/null +++ b/cli/src/spu/group/create.rs @@ -0,0 +1,98 @@ +//! +//! # Create Mange SPU Groups +//! +//! CLI tree to generate Create Managed SPU Groups +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::debug; +use structopt::StructOpt; + +use sc_api::spu::FlvCreateSpuGroupRequest; +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::group_config::GroupConfig; +use super::helpers::proc_create::process_create_spu_group; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct CreateManagedSpuGroupOpt { + /// Managed SPU group name + #[structopt(short = "n", long = "name", value_name = "string")] + name: String, + + /// SPU replicas + #[structopt(short = "l", long = "replicas")] + replicas: u16, + + /// Minimum SPU id (default: 1) + #[structopt(short = "i", long = "min-id")] + min_id: Option, + + /// Rack name + #[structopt(short = "r", long = "rack", value_name = "string")] + rack: Option, + + /// storage size + #[structopt(short = "s", long = "size", value_name = "string")] + storage: Option, + + + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- +pub fn process_create_managed_spu_group(opt: CreateManagedSpuGroupOpt) -> Result<(), CliError> { + let (target_server, create_spu_group_cfg) = parse_opt(opt)?; + + debug!("{:#?}", create_spu_group_cfg); + + match target_server { + TargetServer::Sc(server_addr) => { + process_create_spu_group(server_addr, create_spu_group_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid sc server {:?}", target_server), + ))), + } +} + +/// Validate cli options. Generate target-server and create spu group config. +fn parse_opt( + opt: CreateManagedSpuGroupOpt, +) -> Result<(TargetServer, FlvCreateSpuGroupRequest), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + let grp_config = opt.storage.map(|storage| GroupConfig::with_storage(storage)); + + let group = FlvCreateSpuGroupRequest { + name: opt.name, + replicas: opt.replicas, + min_id: opt.min_id, + config: grp_config.map(|cf| cf.into()).unwrap_or_default(), + rack: opt.rack + }; + + // return server separately from config + + Ok((target_server, group)) +} diff --git a/cli/src/spu/group/delete.rs b/cli/src/spu/group/delete.rs new file mode 100644 index 0000000000..c893af1e6e --- /dev/null +++ b/cli/src/spu/group/delete.rs @@ -0,0 +1,74 @@ +//! +//! # Delete Managed SPU Groups +//! +//! CLI tree to generate Delete Managed SPU Groups +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::proc_delete::process_delete_spu_group; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct DeleteManagedSpuGroupOpt { + /// Managed SPU group name + #[structopt(short = "n", long = "name", value_name = "string")] + name: String, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct DeleteManagedSpuGroupConfig { + pub name: String, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process delete custom-spu cli request +pub fn process_delete_managed_spu_group(opt: DeleteManagedSpuGroupOpt) -> Result<(), CliError> { + let (target_server, delete_spu_group_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + process_delete_spu_group(server_addr, delete_spu_group_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid sc server {:?}", target_server), + ))), + } +} + +/// Validate cli options. Generate target-server and delete spu group configuration. +fn parse_opt( + opt: DeleteManagedSpuGroupOpt, +) -> Result<(TargetServer, DeleteManagedSpuGroupConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + let delete_spu_group_cfg = DeleteManagedSpuGroupConfig { name: opt.name }; + + // return server separately from config + Ok((target_server, delete_spu_group_cfg)) +} diff --git a/cli/src/spu/group/helpers/group_config.rs b/cli/src/spu/group/helpers/group_config.rs new file mode 100644 index 0000000000..a511d5fa2f --- /dev/null +++ b/cli/src/spu/group/helpers/group_config.rs @@ -0,0 +1,102 @@ +//! +//! # Group Config +//! +//! Group configuration is read from file +//! + +use serde::Deserialize; + +use sc_api::spu::FlvGroupConfig; +use sc_api::spu::FlvStorageConfig; +use sc_api::spu::FlvReplicationConfig; +use sc_api::spu::FlvEnvVar; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug, Deserialize, Default, PartialEq)] +pub struct GroupConfig { + pub storage: Option, + pub replication: Option, + pub env: Vec, +} + +impl GroupConfig { + pub fn with_storage(storage: String) -> Self { + Self { + storage: Some(StorageConfig::new(storage)), + ..Default::default() + } + } +} + +impl Into for GroupConfig { + fn into(self) -> FlvGroupConfig { + FlvGroupConfig { + storage: self.storage.map(|cfg| cfg.into()), + replication: self.replication.map(|cfg| cfg.into()), + env: self.env.into_iter().map(|var| var.into()).collect() + } + } +} + + +#[derive(Debug, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct StorageConfig { + pub log_dir: Option, + pub size: Option, +} + +impl StorageConfig { + pub fn new(size: String) -> Self { + Self { + log_dir: None, + size: Some(size) + } + } +} + + +impl Into for StorageConfig { + fn into(self) -> FlvStorageConfig { + FlvStorageConfig { + log_dir: self.log_dir.map(|cfg| cfg.into()), + size: self.size.map(|cfg| cfg.into()), + } + } +} + + +#[derive(Debug, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct ReplicationConfig { + pub in_sync_replica_min: Option, +} + + +impl Into for ReplicationConfig { + fn into(self) -> FlvReplicationConfig { + FlvReplicationConfig { + in_sync_replica_min: self.in_sync_replica_min.map(|cfg| cfg.into()) + } + } +} + +#[derive(Debug, Deserialize, PartialEq)] +pub struct EnvVar { + pub name: String, + pub value: String, +} + + +impl Into for EnvVar { + fn into(self) -> FlvEnvVar { + FlvEnvVar { + name: self.name, + value: self.value + } + } +} + diff --git a/cli/src/spu/group/helpers/list_output.rs b/cli/src/spu/group/helpers/list_output.rs new file mode 100644 index 0000000000..68e97f18d1 --- /dev/null +++ b/cli/src/spu/group/helpers/list_output.rs @@ -0,0 +1,162 @@ +//! +//! # Fluvio SC - output processing +//! +//! Format SPU Group response based on output type +//! +use prettytable::Row; +use prettytable::row; +use prettytable::Cell; +use prettytable::cell; +use prettytable::format::Alignment; +use serde::Serialize; +use log::debug; + +use sc_api::spu::FlvFetchSpuGroupsResponse; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::spg::SpuGroupStatus; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::common::{EncoderOutputHandler, TableOutputHandler}; + + + +#[derive(Serialize, Debug)] +pub struct SpuGroupRow { + pub name: String, + pub spec: SpuGroupSpec, + pub status: SpuGroupStatus, +} + +impl SpuGroupRow { + + fn name(&self) -> String { + self.name.clone() + } + + fn replicas(&self) -> String { + self.spec.replicas.to_string() + } + + fn min_id(&self) -> String { + self.spec.min_id().to_string() + } + + fn rack(&self) -> String { + self.spec.template.spec.rack.clone().unwrap_or("".to_string()) + } + + fn size(&self) -> String { + self.spec.template.spec.storage.clone().unwrap().size() + } + + fn status(&self) -> String { + self.status.resolution.to_string() + } +} + + + + +#[derive(Debug)] +struct ListSpuGroups(Vec); + + + +// ----------------------------------- +// Format Output +// ----------------------------------- + +/// Format SPU Group based on output type +pub fn spu_group_response_to_output( + spu_groups: FlvFetchSpuGroupsResponse, + output_type: &OutputType, +) -> Result<(), CliError> { + + let groups = spu_groups.spu_groups; + + // TODO: display error output + + let list_spu_groups = ListSpuGroups( + groups + .into_iter() + .map(|g| { + let (name,spec,status) = g.into(); + SpuGroupRow { + name, + spec, + status + } + }).collect() + ); + + debug!("groups: {:#?}",list_spu_groups); + + // expecting array with one or more elements + if list_spu_groups.0.len() > 0 { + if output_type.is_table() { + list_spu_groups.display_table(false); + } else { + list_spu_groups.display_encoding(output_type)?; + } + } else { + println!("No spu groups found"); + } + Ok(()) +} + + +// ----------------------------------- +// Output Handlers +// ----------------------------------- +impl TableOutputHandler for ListSpuGroups{ + /// table header implementation + fn header(&self) -> Row { + row![ + "NAME", + "REPLICAS", + "MIN ID", + "RACK", + "SIZE", + "STATUS", + ] + } + + /// return errors in string format + fn errors(&self) -> Vec { + self.0.iter().map(|_g| "".to_owned()).collect() + } + + /// table content implementation + fn content(&self) -> Vec { + + self.0 + .iter() + .map( |r| { + Row::new( + vec![ + Cell::new_align(&r.name(),Alignment::RIGHT), + Cell::new_align(&r.replicas(),Alignment::CENTER), + Cell::new_align(&r.min_id(),Alignment::RIGHT), + Cell::new_align(&r.rack(),Alignment::RIGHT), + Cell::new_align(&r.size(), Alignment::RIGHT), + Cell::new_align(&r.status(),Alignment::RIGHT) + ] + ) + }) + .collect() + + } +} + + + +impl EncoderOutputHandler for ListSpuGroups { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.0 + } +} diff --git a/cli/src/spu/group/helpers/mod.rs b/cli/src/spu/group/helpers/mod.rs new file mode 100644 index 0000000000..da194ec348 --- /dev/null +++ b/cli/src/spu/group/helpers/mod.rs @@ -0,0 +1,6 @@ +pub mod group_config; +pub mod list_output; + +pub mod proc_create; +pub mod proc_delete; +pub mod proc_list; diff --git a/cli/src/spu/group/helpers/proc_create.rs b/cli/src/spu/group/helpers/proc_create.rs new file mode 100644 index 0000000000..ab22ab6421 --- /dev/null +++ b/cli/src/spu/group/helpers/proc_create.rs @@ -0,0 +1,87 @@ +//! +//! # Fluvio SC - Process Create Group +//! +//! Sends Create SPU Group request to Fluvio Streaming Controller +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::FlvCreateSpuGroupsResponse; +use sc_api::spu::FlvCreateSpuGroupRequest; +use sc_api::spu::FlvCreateSpuGroupsRequest; + + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Create Spu Group Request +pub fn process_create_spu_group( + server_addr: SocketAddr, + spg_request: FlvCreateSpuGroupRequest, +) -> Result<(), CliError> { + + let name = spg_request.name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, spg_request)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send create spu-group '{}': {}", name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let spu_group_resp = &response.results[0]; + let response = handle_sc_response( + &name, + "spu-group", + "created", + "", + &spu_group_resp.error_code, + &spu_group_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot create spu group '{}': communication error", name), + ))) + } + } + } +} + +/// Connect to server, get version, and send create request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + spu_request: FlvCreateSpuGroupRequest, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request: FlvCreateSpuGroupsRequest = spu_request.into(); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvCreateSpuGroups, &versions); + + trace!("create spu-group req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("create spu-group res '{}': {:#?}", server_addr, response); + + Ok(response) +} + diff --git a/cli/src/spu/group/helpers/proc_delete.rs b/cli/src/spu/group/helpers/proc_delete.rs new file mode 100644 index 0000000000..770d832ac4 --- /dev/null +++ b/cli/src/spu/group/helpers/proc_delete.rs @@ -0,0 +1,91 @@ +//! +//! # Fluvio SC - Delete Processing +//! +//! Sends Delete SPU group request to Fluvio Streaming Controller +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::{FlvDeleteSpuGroupsRequest, FlvDeleteSpuGroupsResponse}; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use crate::spu::group::delete::DeleteManagedSpuGroupConfig; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Delete Spu Group Request +pub fn process_delete_spu_group( + server_addr: SocketAddr, + spu_group_cfg: DeleteManagedSpuGroupConfig, +) -> Result<(), CliError> { + let name = spu_group_cfg.name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, spu_group_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send delete spu-group '{}': {}", &name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let spu_group_resp = &response.results[0]; + let response = handle_sc_response( + &name, + "spu-group", + "deleted", + "", + &spu_group_resp.error_code, + &spu_group_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot delete spu-group '{}': communication error", &name), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + spu_group_cfg: DeleteManagedSpuGroupConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&spu_group_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvDeleteSpuGroups, &versions); + + trace!("delete spu-group req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("delete spu-group res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode DeleteManagedSpuGroupConfig in Fluvio format +fn encode_request(spu_group_cfg: &DeleteManagedSpuGroupConfig) -> FlvDeleteSpuGroupsRequest { + // generate request with 1 spu group + FlvDeleteSpuGroupsRequest { + spu_groups: vec![spu_group_cfg.name.clone()], + } +} diff --git a/cli/src/spu/group/helpers/proc_list.rs b/cli/src/spu/group/helpers/proc_list.rs new file mode 100644 index 0000000000..b6c9e8a997 --- /dev/null +++ b/cli/src/spu/group/helpers/proc_list.rs @@ -0,0 +1,52 @@ +//! +//! # Query SC for SPU Group metadata +//! +//! Retrieve SPU Groups from SC +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::trace; +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::FlvFetchSpuGroupsRequest; +use sc_api::spu::FlvFetchSpuGroupsResponse; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; + +/// Query Fluvio SC server for SPU Groups and convert to SPU Metadata +pub fn query_spu_group_metadata( + server_addr: SocketAddr, +) -> Result { + run_block_on(send_request_to_server(server_addr)).map_err(|err| { + CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot retrieve spu groups: {}", err), + )) + }) +} + +/// Send query to server and retrieve a list of SPU Groups or errors. +async fn send_request_to_server<'a>( + server_addr: SocketAddr, +) -> Result { + // look-up version + let mut conn = Connection::new(&server_addr).await?; + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvFetchSpuGroups, &versions); + + // generate request + trace!("query spu group req '{}'", server_addr); + + let request = FlvFetchSpuGroupsRequest::default(); + let response = conn.send_request(request, version).await?; + + trace!("query spu group res '{}': {:#?}", server_addr, response); + + Ok(response) +} diff --git a/cli/src/spu/group/list.rs b/cli/src/spu/group/list.rs new file mode 100644 index 0000000000..cc375a4207 --- /dev/null +++ b/cli/src/spu/group/list.rs @@ -0,0 +1,93 @@ +//! # List SPU Groups CLI +//! +//! CLI tree and processing to list SPU Groups +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::proc_list::query_spu_group_metadata; +use super::helpers::list_output::spu_group_response_to_output; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ListManagedSpuGroupsOpt { + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct ListSpuGroupsConfig { + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process list spus cli request +pub fn process_list_managed_spu_groups(opt: ListManagedSpuGroupsOpt) -> Result<(), CliError> { + let (target_server, list_spu_group_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Sc(server_addr) => { + fetch_and_list_spu_groups(server_addr, &list_spu_group_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "Invalid SC server configuration", + ))), + } +} + +/// Validate cli options and generate config +fn parse_opt( + opt: ListManagedSpuGroupsOpt, +) -> Result<(TargetServer, ListSpuGroupsConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &None, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // transfer config parameters + let list_spu_group_cfg = ListSpuGroupsConfig { + output: opt.output.unwrap_or(OutputType::default()), + }; + + // return server separately from topic result + Ok((target_server, list_spu_group_cfg)) +} + +// Fetch Spu groups and output in desired format +fn fetch_and_list_spu_groups( + server_addr: SocketAddr, + list_spu_group_cfg: &ListSpuGroupsConfig, +) -> Result<(), CliError> { + let flv_spu_groups = query_spu_group_metadata(server_addr)?; + spu_group_response_to_output(flv_spu_groups, &list_spu_group_cfg.output) +} diff --git a/cli/src/spu/group/mod.rs b/cli/src/spu/group/mod.rs new file mode 100644 index 0000000000..27a15aad71 --- /dev/null +++ b/cli/src/spu/group/mod.rs @@ -0,0 +1,52 @@ +mod create; +mod delete; +mod list; +mod helpers; + +use structopt::StructOpt; + +use create::CreateManagedSpuGroupOpt; +use create::process_create_managed_spu_group; + +use delete::DeleteManagedSpuGroupOpt; +use delete::process_delete_managed_spu_group; + +use list::ListManagedSpuGroupsOpt; +use list::process_list_managed_spu_groups; + +use crate::error::CliError; + +#[derive(Debug, StructOpt)] +pub enum SpuGroupOpt { + #[structopt(name = "create", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Create managed SPU group")] + Create(CreateManagedSpuGroupOpt), + + #[structopt(name = "delete", author = "", template = "{about} + +{usage} + +{all-args} +", about = "Delete managed SPU group")] + Delete(DeleteManagedSpuGroupOpt), + + #[structopt(name = "list", author = "", template = "{about} + +{usage} + +{all-args} +", about = "List managed SPU groups")] + List(ListManagedSpuGroupsOpt), +} + +pub(crate) fn process_spu_group(spu_group_opt: SpuGroupOpt) -> Result<(), CliError> { + match spu_group_opt { + SpuGroupOpt::Create(spu_group_opt) => process_create_managed_spu_group(spu_group_opt), + SpuGroupOpt::Delete(spu_group_opt) => process_delete_managed_spu_group(spu_group_opt), + SpuGroupOpt::List(spu_group_opt) => process_list_managed_spu_groups(spu_group_opt), + } +} diff --git a/cli/src/spu/helpers/list_metadata.rs b/cli/src/spu/helpers/list_metadata.rs new file mode 100644 index 0000000000..27afd5c82f --- /dev/null +++ b/cli/src/spu/helpers/list_metadata.rs @@ -0,0 +1,157 @@ +//! +//! # Fluvio SC - List SPU Metadata +//! +//! Serializable metadata for List SPU result +//! +use serde::Serialize; + +use sc_api::spu::FlvFetchSpuResponse; +use sc_api::spu::FlvFetchSpu; +use sc_api::spu::FlvSpuResolution; +use sc_api::spu::FlvSpuType; +use sc_api::errors::FlvErrorCode; + +use crate::common::Endpoint; + +// ----------------------------------- +// ScSpuMetadata (Serializable) +// ----------------------------------- + +#[derive(Serialize, Debug)] +pub struct ScSpuMetadata { + pub name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub spu: Option, +} + +#[derive(Serialize, Debug)] +pub struct Spu { + pub id: i32, + pub name: String, + pub spu_type: SpuType, + pub public_server: Endpoint, + pub private_server: Endpoint, + + #[serde(skip_serializing_if = "Option::is_none")] + pub rack: Option, + + pub status: SpuResolution, +} + +#[derive(Serialize, Debug)] +pub enum SpuType { + Custom, + Managed, +} + +#[derive(Serialize, Debug)] +pub enum SpuResolution { + Online, + Offline, + Init, +} + +// ----------------------------------- +// Convert from FLV to SPU Metadata +// ----------------------------------- + +pub fn flv_response_to_spu_metadata(flv_spus: &Vec) -> Vec { + let mut sc_spus: Vec = vec![]; + for flv_spu in flv_spus { + sc_spus.push(ScSpuMetadata::new(flv_spu)); + } + sc_spus +} + +// ----------------------------------- +// Implementation +// ----------------------------------- +impl ScSpuMetadata { + pub fn new(fetch_spu_resp: &FlvFetchSpuResponse) -> Self { + // if spu is present, convert it + let spu = if let Some(fetched_spu) = &fetch_spu_resp.spu { + Some(Spu::new(&fetch_spu_resp.name, fetched_spu)) + } else { + None + }; + + // if error is present, convert it + let error = if fetch_spu_resp.error_code.is_error() { + Some(fetch_spu_resp.error_code) + } else { + None + }; + + // spu metadata with all parameters converted + ScSpuMetadata { + name: fetch_spu_resp.name.clone(), + error: error, + spu: spu, + } + } +} + +impl Spu { + pub fn new(spu_name: &String, fetched_spu: &FlvFetchSpu) -> Self { + let public_ep = &fetched_spu.public_ep; + let private_ep = &fetched_spu.private_ep; + + Spu { + id: fetched_spu.id, + name: spu_name.clone(), + spu_type: SpuType::new(&fetched_spu.spu_type), + + public_server: Endpoint::new(&public_ep.host, &public_ep.port), + private_server: Endpoint::new(&private_ep.host, &private_ep.port), + + rack: fetched_spu.rack.clone(), + status: SpuResolution::new(&fetched_spu.resolution), + } + } + + pub fn type_label(&self) -> &'static str { + SpuType::type_label(&self.spu_type) + } + + pub fn status_label(&self) -> &'static str { + SpuResolution::resolution_label(&self.status) + } +} + +impl SpuType { + pub fn new(flv_spu_type: &FlvSpuType) -> Self { + match flv_spu_type { + FlvSpuType::Custom => SpuType::Custom, + FlvSpuType::Managed => SpuType::Managed, + } + } + + pub fn type_label(spu_type: &SpuType) -> &'static str { + match spu_type { + SpuType::Custom => "custom", + SpuType::Managed => "managed", + } + } +} + +impl SpuResolution { + pub fn new(flv_spu_resolution: &FlvSpuResolution) -> Self { + match flv_spu_resolution { + FlvSpuResolution::Online => SpuResolution::Online, + FlvSpuResolution::Offline => SpuResolution::Offline, + FlvSpuResolution::Init => SpuResolution::Init, + } + } + + pub fn resolution_label(resolution: &SpuResolution) -> &'static str { + match resolution { + SpuResolution::Online => "online", + SpuResolution::Offline => "offline", + SpuResolution::Init => "initializing", + } + } +} diff --git a/cli/src/spu/helpers/list_output.rs b/cli/src/spu/helpers/list_output.rs new file mode 100644 index 0000000000..9661549f7a --- /dev/null +++ b/cli/src/spu/helpers/list_output.rs @@ -0,0 +1,102 @@ +//! +//! # Fluvio SC - output processing +//! +//! Format SPU response based on output type +//! +use prettytable::Row; +use prettytable::row; +use prettytable::cell; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::common::{EncoderOutputHandler, TableOutputHandler}; + +use super::list_metadata::ScSpuMetadata; + +// ----------------------------------- +// ListSpus Data Structure +// ----------------------------------- + +#[derive(Debug)] +struct ListSpus { + spus: Vec, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +/// Process server based on output type +pub fn format_spu_response_output( + spus: Vec, + output_type: &OutputType, +) -> Result<(), CliError> { + let list_spus = ListSpus { spus }; + + // expecting array with one or more elements + if list_spus.spus.len() > 0 { + if output_type.is_table() { + list_spus.display_errors(); + list_spus.display_table(false); + } else { + list_spus.display_encoding(output_type)?; + } + } else { + println!("No spus found"); + } + Ok(()) +} + +// ----------------------------------- +// Output Handlers +// ----------------------------------- +impl TableOutputHandler for ListSpus { + /// table header implementation + fn header(&self) -> Row { + row!["ID", "NAME", "STATUS", "TYPE", "RACK", "PUBLIC", "PRIVATE"] + } + + /// return errors in string format + fn errors(&self) -> Vec { + let mut errors = vec![]; + for spu_metadata in &self.spus { + if let Some(error) = &spu_metadata.error { + errors.push(format!( + "Spu '{}': {}", + spu_metadata.name, + error.to_sentence() + )); + } + } + errors + } + + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + for spu_metadata in &self.spus { + if let Some(spu) = &spu_metadata.spu { + rows.push(row![ + r -> spu.id, + l -> spu.name, + l -> spu.status_label(), + l -> spu.type_label(), + c -> (&spu.rack).as_ref().unwrap_or(&"-".to_string()), + l -> spu.public_server, + l -> spu.private_server, + ]); + } + } + rows + } +} + +impl EncoderOutputHandler for ListSpus { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.spus + } +} diff --git a/cli/src/spu/helpers/mod.rs b/cli/src/spu/helpers/mod.rs new file mode 100644 index 0000000000..4355ef7b26 --- /dev/null +++ b/cli/src/spu/helpers/mod.rs @@ -0,0 +1,7 @@ +mod list_output; +mod list_metadata; +mod proc_list; + +pub use list_output::format_spu_response_output; +pub use list_metadata::flv_response_to_spu_metadata; +pub use proc_list::query_spu_list_metadata; diff --git a/cli/src/spu/helpers/proc_list.rs b/cli/src/spu/helpers/proc_list.rs new file mode 100644 index 0000000000..f6079d0a34 --- /dev/null +++ b/cli/src/spu/helpers/proc_list.rs @@ -0,0 +1,61 @@ +//! +//! # Query SC for SPU metadata +//! +//! Retrieve SPUs from SC +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::trace; +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::spu::FlvFetchSpusRequest; +use sc_api::spu::FlvFetchSpuResponse; +use sc_api::spu::FlvRequestSpuType; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; + +/// Query Fluvio SC server for SPU and convert to SPU Metadata +pub fn query_spu_list_metadata( + server_addr: SocketAddr, + only_custom_spu: bool, +) -> Result, CliError> { + run_block_on(send_request_to_server(server_addr, only_custom_spu)).map_err(|err| { + CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot retrieve spus: {}", err), + )) + }) +} + +/// Send query to server and retrieve a list of SPUs metadata or errors. +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + only_custom_spu: bool, +) -> Result, CliError> { + // look-up version + let mut conn = Connection::new(&server_addr).await?; + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvFetchSpus, &versions); + + // generate request + let mut request = FlvFetchSpusRequest::default(); + let req_type = match only_custom_spu { + true => FlvRequestSpuType::Custom, + false => FlvRequestSpuType::All, + }; + request.req_spu_type = req_type; + + trace!("query spu req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("query spu res '{}': {:#?}", server_addr, response); + + Ok(response.spus) +} diff --git a/cli/src/spu/mod.rs b/cli/src/spu/mod.rs new file mode 100644 index 0000000000..bf3b21b16b --- /dev/null +++ b/cli/src/spu/mod.rs @@ -0,0 +1,4 @@ +pub mod custom; +pub mod group; +pub mod all; +pub mod helpers; diff --git a/cli/src/topic/create.rs b/cli/src/topic/create.rs new file mode 100644 index 0000000000..cac09aa3ee --- /dev/null +++ b/cli/src/topic/create.rs @@ -0,0 +1,177 @@ +//! +//! # Create Topics +//! +//! CLI tree to generate Create Topics +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::path::PathBuf; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::Partitions; +use super::helpers::process_sc_create_topic; +use super::helpers::process_kf_create_topic; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct CreateTopicOpt { + /// Topic name + #[structopt(short = "t", long = "topic", value_name = "string")] + topic: String, + + /// Number of partitions + #[structopt( + short = "p", + long = "partitions", + value_name = "integer", + required_unless = "replica_assignment" + )] + partitions: Option, + + /// Replication factor per partition + #[structopt( + short = "r", + long = "replication", + value_name = "integer", + required_unless = "replica_assignment" + )] + replication: Option, + + /// Ignore racks while computing replica assignment + #[structopt( + short = "i", + long = "ignore-rack-assignment", + conflicts_with = "replica_assignment" + )] + ignore_rack_assigment: bool, + + /// Replica assignment file + #[structopt( + short = "f", + long = "replica-assignment", + value_name = "file.json", + parse(from_os_str), + conflicts_with = "partitions", + conflicts_with = "replication" + )] + replica_assignment: Option, + + /// Validates configuration, does not provision + #[structopt(short = "v", long = "validate-only")] + validate_only: bool, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc" + )] + kf: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct CreateTopicConfig { + pub name: String, + pub replica: ReplicaConfig, + pub validate_only: bool, +} + +#[derive(Debug)] +pub enum ReplicaConfig { + // replica assignment + Assigned(Partitions), + + // partitions, replication, ignore_rack_assignment + Computed(i32, i16, bool), +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process create topic cli request +pub fn process_create_topic(opt: CreateTopicOpt) -> Result<(), CliError> { + let (target_server, create_topic_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Kf(server_addr) => process_kf_create_topic(server_addr, create_topic_cfg), + TargetServer::Sc(server_addr) => process_sc_create_topic(server_addr, create_topic_cfg), + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid target server {:?}", target_server), + ))), + } +} + +/// Validate cli options. Generate target-server and create-topic configuration. +fn parse_opt(opt: CreateTopicOpt) -> Result<(TargetServer, CreateTopicConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // topic specific configurations + let replica_config = if opt.partitions.is_some() { + parse_computed_replica(&opt)? + } else { + parse_assigned_replica(&opt)? + }; + let create_topic_cfg = CreateTopicConfig { + name: opt.topic, + replica: replica_config, + validate_only: opt.validate_only, + }; + + // return server separately from config + Ok((target_server, create_topic_cfg)) +} + +/// Ensure all parameters are valid for computed replication +fn parse_computed_replica(opt: &CreateTopicOpt) -> Result { + Ok(ReplicaConfig::Computed( + opt.partitions.unwrap_or(-1), + opt.replication.unwrap_or(-1), + opt.ignore_rack_assigment, + )) +} + +/// Ensure all parameters are valid for computed replication +fn parse_assigned_replica(opt: &CreateTopicOpt) -> Result { + if let Some(replica_assign_file) = &opt.replica_assignment { + match Partitions::file_decode(replica_assign_file) { + Ok(partitions) => Ok(ReplicaConfig::Assigned(partitions)), + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::InvalidInput, + format!( + "cannot parse replica assignment file {:?}: {}", + replica_assign_file, err + ), + ))), + } + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::InvalidInput, + "cannot find replica assignment file", + ))) + } +} diff --git a/cli/src/topic/delete.rs b/cli/src/topic/delete.rs new file mode 100644 index 0000000000..15aed1cd24 --- /dev/null +++ b/cli/src/topic/delete.rs @@ -0,0 +1,81 @@ +//! +//! # Delete Topics +//! +//! CLI tree to generate Delete Topics +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::process_sc_delete_topic; +use super::helpers::process_kf_delete_topic; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct DeleteTopicOpt { + /// Topic name + #[structopt(short = "t", long = "topic", value_name = "string")] + topic: String, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc" + )] + kf: Option, + + /// Profile name + #[structopt(short = "P", long = "profile")] + profile: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct DeleteTopicConfig { + pub name: String, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process delete topic cli request +pub fn process_delete_topic(opt: DeleteTopicOpt) -> Result<(), CliError> { + let (target_server, delete_topic_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Kf(server_addr) => process_kf_delete_topic(server_addr, delete_topic_cfg), + TargetServer::Sc(server_addr) => process_sc_delete_topic(server_addr, delete_topic_cfg), + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("Invalid Target Server Server {:?}", target_server), + ))), + } +} + +/// Validate cli options. Generate target-server and delete-topic configuration. +fn parse_opt(opt: DeleteTopicOpt) -> Result<(TargetServer, DeleteTopicConfig), CliError> { + // profile specific configurations (target server) + let profile_config = ProfileConfig::new(&opt.sc, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + let delete_topic_cfg = DeleteTopicConfig { name: opt.topic }; + + // return server separately from config + Ok((target_server, delete_topic_cfg)) +} diff --git a/cli/src/topic/describe.rs b/cli/src/topic/describe.rs new file mode 100644 index 0000000000..dbbbe5b242 --- /dev/null +++ b/cli/src/topic/describe.rs @@ -0,0 +1,100 @@ +//! +//! # Describe Topic CLI +//! +//! CLI to describe Topics and their corresponding Partitions +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::process_sc_describe_topics; +use super::helpers::process_kf_describe_topics; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct DescribeTopicsOpt { + /// Topic names + #[structopt(short = "t", long = "topic", value_name = "string")] + topics: Vec, + + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc" + )] + kf: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct DescribeTopicsConfig { + pub topic_names: Vec, + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process describe topic cli request +pub fn process_describe_topics(opt: DescribeTopicsOpt) -> Result<(), CliError> { + let (target_server, describe_topics_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Kf(server_addr) => { + process_kf_describe_topics(server_addr, &describe_topics_cfg) + } + TargetServer::Sc(server_addr) => { + process_sc_describe_topics(server_addr, &describe_topics_cfg) + } + _ => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("invalid target server {:?}", target_server), + ))), + } +} + +/// Validate cli options and generate config +fn parse_opt(opt: DescribeTopicsOpt) -> Result<(TargetServer, DescribeTopicsConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // transfer config parameters + let describe_topics_cfg = DescribeTopicsConfig { + output: opt.output.unwrap_or(OutputType::default()), + topic_names: opt.topics, + }; + + // return server separately from topic result + Ok((target_server, describe_topics_cfg)) +} diff --git a/cli/src/topic/helpers/mod.rs b/cli/src/topic/helpers/mod.rs new file mode 100644 index 0000000000..d50c6c6898 --- /dev/null +++ b/cli/src/topic/helpers/mod.rs @@ -0,0 +1,29 @@ +mod proc_create_sc; +mod proc_create_kf; +mod proc_delete_sc; +mod proc_delete_kf; +mod proc_describe_sc; +mod proc_describe_kf; +mod proc_list_sc; +mod proc_list_kf; + +mod topic_metadata_kf; +mod topic_metadata_sc; + +mod partition_map; + +pub use proc_create_sc::process_create_topic as process_sc_create_topic; +pub use proc_create_kf::process_create_topic as process_kf_create_topic; +pub use proc_delete_sc::process_delete_topic as process_sc_delete_topic; +pub use proc_delete_kf::process_delete_topic as process_kf_delete_topic; +pub use proc_describe_sc::process_sc_describe_topics; +pub use proc_describe_kf::process_kf_describe_topics; +pub use proc_list_sc::process_list_topics as process_sc_list_topics; +pub use proc_list_kf::process_list_topics as process_kf_list_topics; + +pub use topic_metadata_sc::ScTopicMetadata; +pub use topic_metadata_sc::query_sc_topic_metadata; +pub use topic_metadata_kf::KfTopicMetadata; +pub use topic_metadata_kf::query_kf_topic_metadata; + +pub use partition_map::Partitions; diff --git a/cli/src/topic/helpers/partition_map.rs b/cli/src/topic/helpers/partition_map.rs new file mode 100644 index 0000000000..386d8f47f8 --- /dev/null +++ b/cli/src/topic/helpers/partition_map.rs @@ -0,0 +1,67 @@ +//! +//! # Partition Map +//! +//! Partition map is read from file and converted to SC/KF configuration +//! + +use serde::Deserialize; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::fs::read_to_string; +use std::path::Path; + +use kf_protocol::message::topic::CreatableReplicaAssignment; +use sc_api::topic::FlvTopicPartitionMap; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct Partitions { + partitions: Vec, +} + +#[derive(Debug, Deserialize, PartialEq)] +pub struct Partition { + id: i32, + replicas: Vec, +} + +// ----------------------------------- +// Partitions - Decode/Encode +// ----------------------------------- + +impl Partitions { + /// Read and decode the json file into Replica Assignment map + pub fn file_decode>(path: T) -> Result { + let file_str: String = read_to_string(path)?; + serde_json::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err))) + } + + // Encode Replica Assignment map into Kafka Create Replica Assignment + pub fn kf_encode(&self) -> Vec { + let mut assignments: Vec = vec![]; + for partition in &self.partitions { + assignments.push(CreatableReplicaAssignment { + partition_index: partition.id, + broker_ids: partition.replicas.clone(), + }) + } + assignments + } + + // Encode Replica Assignment map into Fluvio format + pub fn sc_encode(&self) -> Vec { + let mut partition_map: Vec = vec![]; + for partition in &self.partitions { + partition_map.push(FlvTopicPartitionMap { + id: partition.id, + replicas: partition.replicas.clone(), + }) + } + partition_map + } +} diff --git a/cli/src/topic/helpers/proc_create_kf.rs b/cli/src/topic/helpers/proc_create_kf.rs new file mode 100644 index 0000000000..fc85cbca39 --- /dev/null +++ b/cli/src/topic/helpers/proc_create_kf.rs @@ -0,0 +1,134 @@ +//! +//! # Kafka - Processing +//! +//! Sends Create Topic request to Kafka Controller +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::trace; +use types::defaults::KF_REQUEST_TIMEOUT_MS; + +use kf_protocol::message::topic::CreatableTopic; +use kf_protocol::message::topic::{KfCreateTopicsRequest, KfCreateTopicsResponse}; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::handle_kf_response; +use crate::common::{kf_lookup_version, kf_get_api_versions}; + +use crate::topic::create::{CreateTopicConfig, ReplicaConfig}; + +// ----------------------------------- +// Kafka - Process Request +// ----------------------------------- + +// Connect to Kafka Controller and process Create Topic Request +pub fn process_create_topic( + server_addr: SocketAddr, + topic_cfg: CreateTopicConfig, +) -> Result<(), CliError> { + let topic_name = topic_cfg.name.clone(); + let prepend_validation = if topic_cfg.validate_only { + "(validation-only) " + } else { + "" + }; + + // Run command and collect results + match run_block_on(get_version_and_create_topic(server_addr, topic_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send create topic '{}' to kf: {}", topic_name, err), + ))), + Ok(response) => { + if response.topics.len() > 0 { + let topic_resp = &response.topics[0]; + let response = handle_kf_response( + &topic_resp.name, + "topic", + "created", + prepend_validation, + &topic_resp.error_code, + &topic_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "{}create topic '{}': empty response", + prepend_validation, &topic_name + ), + ))) + } + } + } +} + +// Connect to Kafka server, get version and send request +async fn get_version_and_create_topic( + server_addr: SocketAddr, + topic_cfg: CreateTopicConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let versions = kf_get_api_versions(&mut conn).await?; + + send_request_to_server(&mut conn, topic_cfg, &versions).await +} + +/// Send create topic request to Kafka server +async fn send_request_to_server<'a>( + conn: &'a mut Connection, + topic_cfg: CreateTopicConfig, + versions: &'a KfApiVersions, +) -> Result { + let request = encode_request(&topic_cfg); + let version = kf_lookup_version(AllKfApiKey::CreateTopics, versions); + + trace!("create topic req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("create topic res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} + +/// encode CreateTopicRequest in Kafka format +fn encode_request(topic_cfg: &CreateTopicConfig) -> KfCreateTopicsRequest { + // create topic request + let topic_request = match &topic_cfg.replica { + // Computed Replicas + ReplicaConfig::Computed(partitions, replicas, _) => CreatableTopic { + name: topic_cfg.name.clone(), + num_partitions: *partitions, + replication_factor: *replicas, + assignments: vec![], + configs: vec![], + }, + + // Assigned (user defined) Replicas + ReplicaConfig::Assigned(partitions) => CreatableTopic { + name: topic_cfg.name.clone(), + num_partitions: -1, + replication_factor: -1, + assignments: partitions.kf_encode(), + configs: vec![], + }, + }; + + // encode topic request + KfCreateTopicsRequest { + topics: vec![topic_request], + timeout_ms: KF_REQUEST_TIMEOUT_MS, + validate_only: topic_cfg.validate_only, + } +} diff --git a/cli/src/topic/helpers/proc_create_sc.rs b/cli/src/topic/helpers/proc_create_sc.rs new file mode 100644 index 0000000000..c37f734ed1 --- /dev/null +++ b/cli/src/topic/helpers/proc_create_sc.rs @@ -0,0 +1,120 @@ +//! +//! # Fluvio SC - Processing +//! +//! Sends Create Topic request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::topic::{FlvCreateTopicRequest, FlvCreateTopicsRequest, FlvCreateTopicsResponse}; +use sc_api::topic::FlvTopicSpecMetadata; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use crate::topic::create::{CreateTopicConfig, ReplicaConfig}; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Create Topic Request +pub fn process_create_topic( + server_addr: SocketAddr, + topic_cfg: CreateTopicConfig, +) -> Result<(), CliError> { + let topic_name = topic_cfg.name.clone(); + let prepend_validation = if topic_cfg.validate_only { + "(validation-only) " + } else { + "" + }; + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, topic_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("send create topic '{}': {}", topic_name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let topic_resp = &response.results[0]; + let response = handle_sc_response( + &topic_resp.name, + "topic", + "created", + prepend_validation, + &topic_resp.error_code, + &topic_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!( + "{}cannot create topic '{}': communication error", + prepend_validation, &topic_name + ), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + topic_cfg: CreateTopicConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&topic_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvDeleteTopics, &versions); + + trace!("create topic req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("create topic res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode CreateTopicRequest in Fluvio format +fn encode_request(topic_cfg: &CreateTopicConfig) -> FlvCreateTopicsRequest { + // generate Topic Create Metadata + let topic_metadata = match &topic_cfg.replica { + // Computed Replicas + ReplicaConfig::Computed(partitions, replicas, ignore_rack) => { + FlvTopicSpecMetadata::Computed((*partitions, *replicas as i32, *ignore_rack).into()) + } + + // Assigned (user defined) Replicas + ReplicaConfig::Assigned(partitions) => { + FlvTopicSpecMetadata::Assigned(partitions.sc_encode().into()) + } + }; + // generate topic request + let create_topic_req = FlvCreateTopicRequest { + name: topic_cfg.name.clone(), + topic: topic_metadata, + }; + + // encode & return create topic request + FlvCreateTopicsRequest { + topics: vec![create_topic_req], + validate_only: topic_cfg.validate_only, + } +} diff --git a/cli/src/topic/helpers/proc_delete_kf.rs b/cli/src/topic/helpers/proc_delete_kf.rs new file mode 100644 index 0000000000..a64adc6854 --- /dev/null +++ b/cli/src/topic/helpers/proc_delete_kf.rs @@ -0,0 +1,104 @@ +//! +//! # Kafka - Delete Topic Processing +//! +//! Sends Delete Topic request to Kafka Controller +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::trace; + +use kf_protocol::message::topic::{KfDeleteTopicsRequest, KfDeleteTopicsResponse}; +use kf_protocol::message::KfApiVersions; +use kf_protocol::api::AllKfApiKey; +use future_helper::run_block_on; + +use types::defaults::KF_REQUEST_TIMEOUT_MS; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::handle_kf_response; +use crate::common::{kf_lookup_version, kf_get_api_versions}; + +use crate::topic::delete::DeleteTopicConfig; + +// ----------------------------------- +// Kafka - Process Request +// ----------------------------------- + +// Connect to Kafka Controller and process Delete Topic Request +pub fn process_delete_topic<'a>( + server_addr: SocketAddr, + topic_cfg: DeleteTopicConfig, +) -> Result<(), CliError> { + let topic_name = topic_cfg.name.clone(); + + // Run command and collect results + match run_block_on(get_version_and_delete_topic(server_addr, topic_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("sending delete topic '{}': {}", topic_name, err), + ))), + Ok(response) => { + // print errors + if response.responses.len() > 0 { + let topic_resp = &response.responses[0]; + let response = handle_kf_response( + &topic_resp.name, + "topic", + "deleted", + "", + &topic_resp.error_code, + &None, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("delete topic '{}': empty response", &topic_name), + ))) + } + } + } +} + +// Connect to Kafka server, get version and send request +async fn get_version_and_delete_topic( + server_addr: SocketAddr, + topic_cfg: DeleteTopicConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let versions = kf_get_api_versions(&mut conn).await?; + + send_request_to_server(&mut conn, topic_cfg, &versions).await +} + +/// Send delete topic request to Kafka server +async fn send_request_to_server<'a>( + conn: &'a mut Connection, + topic_cfg: DeleteTopicConfig, + versions: &'a KfApiVersions, +) -> Result { + let request = encode_request(&topic_cfg); + let version = kf_lookup_version(AllKfApiKey::DeleteTopics, versions); + + trace!("delete topic req '{}': {:#?}", conn.server_addr(), request); + + let response = conn.send_request(request, version).await?; + + trace!("delete topic res '{}': {:#?}", conn.server_addr(), response); + + Ok(response) +} + +/// encode DeleteTopicRequest in Kafka format +fn encode_request(topic_cfg: &DeleteTopicConfig) -> KfDeleteTopicsRequest { + KfDeleteTopicsRequest { + topic_names: vec![topic_cfg.name.clone()], + timeout_ms: KF_REQUEST_TIMEOUT_MS, + } +} diff --git a/cli/src/topic/helpers/proc_delete_sc.rs b/cli/src/topic/helpers/proc_delete_sc.rs new file mode 100644 index 0000000000..cd003f11de --- /dev/null +++ b/cli/src/topic/helpers/proc_delete_sc.rs @@ -0,0 +1,91 @@ +//! +//! # Fluvio SC - Delete Topic Processing +//! +//! Sends Delete Topic request to Fluvio Streaming Controller +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; + +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::topic::{FlvDeleteTopicsRequest, FlvDeleteTopicsResponse}; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; +use crate::common::handle_sc_response; + +use crate::topic::delete::DeleteTopicConfig; + +// ----------------------------------- +// Fluvio SC - Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and process Delete Topic Request +pub fn process_delete_topic<'a>( + server_addr: SocketAddr, + topic_cfg: DeleteTopicConfig, +) -> Result<(), CliError> { + let topic_name = topic_cfg.name.clone(); + + // Run command and collect results + match run_block_on(send_request_to_server(server_addr, topic_cfg)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("sending delete topic '{}': {}", topic_name, err), + ))), + Ok(response) => { + if response.results.len() > 0 { + let topic_resp = &response.results[0]; + let response = handle_sc_response( + &topic_resp.name, + "topic", + "deleted", + "", + &topic_resp.error_code, + &topic_resp.error_message, + )?; + println!("{}", response); + + Ok(()) + } else { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("delete topic '{}': empty response", &topic_name), + ))) + } + } + } +} + +/// Connect to server, get version, and send delete request +async fn send_request_to_server( + server_addr: SocketAddr, + topic_cfg: DeleteTopicConfig, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let request = encode_request(&topic_cfg); + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvDeleteTopics, &versions); + + trace!("delete topic req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("delete topic res '{}': {:#?}", server_addr, response); + + Ok(response) +} + +/// encode DeleteTopicRequest in Fluvio format +fn encode_request(topic_cfg: &DeleteTopicConfig) -> FlvDeleteTopicsRequest { + FlvDeleteTopicsRequest { + topics: vec![topic_cfg.name.clone()], + } +} diff --git a/cli/src/topic/helpers/proc_describe_kf.rs b/cli/src/topic/helpers/proc_describe_kf.rs new file mode 100644 index 0000000000..074ab1222f --- /dev/null +++ b/cli/src/topic/helpers/proc_describe_kf.rs @@ -0,0 +1,190 @@ +//! +//! # Kafka - Describe Topic Processing +//! +//! Communicates with Kafka Controller to retrieve desired Topic +//! +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use serde::Serialize; +use prettytable::Row; +use prettytable::row; +use prettytable::cell; + +use crate::error::CliError; +use crate::common::{DescribeObjects, DescribeObjectHandler}; +use crate::common::{EncoderOutputHandler, KeyValOutputHandler, TableOutputHandler}; + +use super::topic_metadata_kf::KfTopicMetadata; +use super::topic_metadata_kf::query_kf_topic_metadata; + +use crate::topic::describe::DescribeTopicsConfig; + +// ----------------------------------- +// Data Structures (Serializable) +// ----------------------------------- + +type DescribeKfTopics = DescribeObjects; + +#[derive(Serialize, Debug)] +pub struct DescribeKfTopic { + pub topic_metadata: KfTopicMetadata, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +// Connect to Kafka Controller and query server for topic +pub fn process_kf_describe_topics( + server_addr: SocketAddr, + describe_topic_cfg: &DescribeTopicsConfig, +) -> Result<(), CliError> { + // query none for empty topic_names array + let topic_names = if describe_topic_cfg.topic_names.len() > 0 { + Some(describe_topic_cfg.topic_names.clone()) + } else { + None + }; + + let topics_metadata = query_kf_topic_metadata(server_addr, topic_names)?; + let describe_topics = DescribeKfTopics::new(topics_metadata); + + // print table + let output_type = &describe_topic_cfg.output; + if output_type.is_table() { + describe_topics.print_table() + } else { + describe_topics.display_encoding(output_type) + } +} + +// ----------------------------------- +// Implement - DescribeKfTopics +// ----------------------------------- + +impl DescribeKfTopics { + /// Convert vector of KfTopicMetadata to vector of DescribeTopic{KfTopicMetadata} + fn new(mut topics_metadata: Vec) -> Self { + let mut describe_topics = vec![]; + + while topics_metadata.len() > 0 { + describe_topics.push(DescribeKfTopic { + topic_metadata: topics_metadata.remove(0), + }); + } + + DescribeKfTopics { + label: "topic", + label_plural: "topics", + describe_objects: describe_topics, + } + } +} + +// ----------------------------------- +// Implement - EncoderOutputHandler +// ----------------------------------- + +impl EncoderOutputHandler for DescribeKfTopics { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.describe_objects + } +} + +// ----------------------------------- +// Implement - DescribeObjectHandler +// ----------------------------------- + +impl DescribeObjectHandler for DescribeKfTopic { + fn is_ok(&self) -> bool { + self.topic_metadata.topic.is_some() + } + fn is_error(&self) -> bool { + self.topic_metadata.error.is_some() + } + + /// validate topic + fn validate(&self) -> Result<(), CliError> { + let name = &self.topic_metadata.name; + + if let Some(error) = self.topic_metadata.error { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("topic '{}' {}", name, error.to_sentence()), + ))) + } else if self.topic_metadata.topic.is_none() { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("topic '{}', undefined error", name), + ))) + } else { + Ok(()) + } + } +} + +// ----------------------------------- +// Implement - TableOutputHandler +// ----------------------------------- + +impl TableOutputHandler for DescribeKfTopic { + /// table header implementation + fn header(&self) -> Row { + row!["ID", "STATUS", "LEADER", "REPLICAS", "ISR"] + } + + /// return errors in string format + fn errors(&self) -> Vec { + vec![] + } + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + if let Some(topic) = &self.topic_metadata.topic { + for partition in &topic.partition_map { + rows.push(row![ + r -> partition.id, + c -> partition.status, + c -> partition.leader, + l -> format!("{:?}", partition.replicas), + l -> format!("{:?}", partition.isr), + ]); + } + } + + rows + } +} + +// ----------------------------------- +// Implement - KeyValOutputHandler +// ----------------------------------- + +impl KeyValOutputHandler for DescribeKfTopic { + /// key value hash map implementation + fn key_vals(&self) -> Vec<(String, Option)> { + let mut key_vals = Vec::new(); + if let Some(topic) = &self.topic_metadata.topic { + key_vals.push(("Name".to_owned(), Some(self.topic_metadata.name.clone()))); + key_vals.push(("Internal".to_owned(), Some(topic.is_internal.to_string()))); + + key_vals.push(( + "Partition Count".to_owned(), + Some(topic.partitions.to_string()), + )); + key_vals.push(( + "Replication Factor".to_owned(), + Some(topic.replication_factor.to_string()), + )); + key_vals.push(("Partition Replicas".to_owned(), None)); + key_vals.push(("-----------------".to_owned(), None)); + } + key_vals + } +} diff --git a/cli/src/topic/helpers/proc_describe_sc.rs b/cli/src/topic/helpers/proc_describe_sc.rs new file mode 100644 index 0000000000..7c94e9c81b --- /dev/null +++ b/cli/src/topic/helpers/proc_describe_sc.rs @@ -0,0 +1,206 @@ +//! +//! # Fluvio SC - Describe Topic Processing +//! +//! Communicates with Fluvio Streaming Controller to retrieve desired Topic +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use serde::Serialize; +use prettytable::Row; +use prettytable::cell; +use prettytable::row; + +use crate::error::CliError; +use crate::common::{DescribeObjects, DescribeObjectHandler}; +use crate::common::{KeyValOutputHandler, TableOutputHandler, EncoderOutputHandler}; + +use super::topic_metadata_sc::ScTopicMetadata; +use super::topic_metadata_sc::query_sc_topic_metadata; + +use crate::topic::describe::DescribeTopicsConfig; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +type DescribeScTopics = DescribeObjects; + +#[derive(Serialize, Debug)] +pub struct DescribeScTopic { + pub topic_metadata: ScTopicMetadata, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +// Connect to Fluvio Streaming Controller and query server for topics +pub fn process_sc_describe_topics( + server_addr: SocketAddr, + describe_topic_cfg: &DescribeTopicsConfig, +) -> Result<(), CliError> { + // query none for empty topic_names array + let topic_names = if describe_topic_cfg.topic_names.len() > 0 { + Some(describe_topic_cfg.topic_names.clone()) + } else { + None + }; + + let topics_metadata = query_sc_topic_metadata(server_addr, topic_names)?; + let describe_topics = DescribeScTopics::new(topics_metadata); + + // print table + let output_type = &describe_topic_cfg.output; + if output_type.is_table() { + describe_topics.print_table() + } else { + describe_topics.display_encoding(output_type) + } +} + +// ----------------------------------- +// Implement - DescribeScTopics +// ----------------------------------- + +impl DescribeScTopics { + /// Convert vector of ScTopicMetadata to vector of DescribeTopic{ScTopicMetadata} + fn new(mut topics_metadata: Vec) -> Self { + let mut describe_topics = vec![]; + + while topics_metadata.len() > 0 { + describe_topics.push(DescribeScTopic { + topic_metadata: topics_metadata.remove(0), + }); + } + + DescribeScTopics { + label: "topic", + label_plural: "topics", + describe_objects: describe_topics, + } + } +} + +// ----------------------------------- +// Implement - EncoderOutputHandler +// ----------------------------------- + +impl EncoderOutputHandler for DescribeScTopics { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.describe_objects + } +} + +// ----------------------------------- +// Implement - DescribeObjectHandler +// ----------------------------------- + +impl DescribeObjectHandler for DescribeScTopic { + fn is_ok(&self) -> bool { + self.topic_metadata.topic.is_some() + } + + fn is_error(&self) -> bool { + self.topic_metadata.error.is_some() + } + + /// validate topic + fn validate(&self) -> Result<(), CliError> { + let name = &self.topic_metadata.name; + if let Some(error) = self.topic_metadata.error { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("topic '{}' {}", name, error.to_sentence()), + ))) + } else if self.topic_metadata.topic.is_none() { + Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("topic '{}', undefined error", name), + ))) + } else { + Ok(()) + } + } +} + +// ----------------------------------- +// Implement - TableOutputHandler +// ----------------------------------- + +impl TableOutputHandler for DescribeScTopic { + /// table header implementation + fn header(&self) -> Row { + row!["ID", "LEADER", "REPLICAS", "LIVE-REPLICAS",] + } + + /// return errors in string format + fn errors(&self) -> Vec { + vec![] + } + + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + if let Some(topic) = &self.topic_metadata.topic { + if let Some(ref partitions) = topic.partition_map { + for partition in partitions { + rows.push(row![ + r -> partition.id, + c -> partition.leader, + l -> format!("{:?}", partition.replicas), + l -> format!("{:?}", partition.live_replicas), + ]); + } + } + } + + rows + } +} + +// ----------------------------------- +// Implement - KeyValOutputHandler +// ----------------------------------- + +impl KeyValOutputHandler for DescribeScTopic { + /// key value hash map implementation + fn key_vals(&self) -> Vec<(String, Option)> { + let mut key_vals = Vec::new(); + if let Some(topic) = &self.topic_metadata.topic { + let reason = if topic.reason.len() > 0 { + topic.reason.clone() + } else { + "-".to_owned() + }; + key_vals.push(("Name".to_owned(), Some(self.topic_metadata.name.clone()))); + key_vals.push(("Type".to_owned(), Some(topic.type_label().to_string()))); + if topic.assigned_partitions.is_some() { + key_vals.push(( + "Assigned Partitions".to_owned(), + Some(topic.assigned_partitions.as_ref().unwrap().clone()), + )); + } + key_vals.push(("Partition Count".to_owned(), Some(topic.partitions_str()))); + key_vals.push(( + "Replication Factor".to_owned(), + Some(topic.replication_factor_str()), + )); + key_vals.push(( + "Ignore Rack Assignment".to_owned(), + Some(topic.ignore_rack_assign_str().to_string()), + )); + key_vals.push(("Status".to_owned(), Some(topic.status_label().to_string()))); + key_vals.push(("Reason".to_owned(), Some(reason))); + key_vals.push(("Partition Map".to_owned(), None)); + key_vals.push(("-----------------".to_owned(), None)); + } + key_vals + } +} diff --git a/cli/src/topic/helpers/proc_list_kf.rs b/cli/src/topic/helpers/proc_list_kf.rs new file mode 100644 index 0000000000..e2e452b609 --- /dev/null +++ b/cli/src/topic/helpers/proc_list_kf.rs @@ -0,0 +1,114 @@ +//! +//! # Kafka - List Topic Processing +//! +//! Communicates with Kafka Controller to retrieve all Topics +//! + +use std::net::SocketAddr; + +use serde::Serialize; +use prettytable::Row; +use prettytable::row; +use prettytable::cell; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::common::{EncoderOutputHandler, TableOutputHandler}; + +use super::topic_metadata_kf::KfTopicMetadata; +use super::topic_metadata_kf::query_kf_topic_metadata; + +use crate::topic::list::ListTopicsConfig; + +// ----------------------------------- +// Data Structures (Serializable) +// ----------------------------------- + +#[derive(Serialize, Debug)] +struct ListTopics { + topics: Vec, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +// Retrieve and print topics in desired format +pub fn process_list_topics( + server_addr: SocketAddr, + list_topic_cfg: &ListTopicsConfig, +) -> Result<(), CliError> { + let topics = query_kf_topic_metadata(server_addr, None)?; + let list_topics = ListTopics { topics }; + process_server_response(&list_topics, &list_topic_cfg.output) +} + +/// Process server based on output type +fn process_server_response( + list_topics: &ListTopics, + output_type: &OutputType, +) -> Result<(), CliError> { + // expecting array with one or more elements + if list_topics.topics.len() > 0 { + if output_type.is_table() { + list_topics.display_errors(); + list_topics.display_table(false); + } else { + list_topics.display_encoding(output_type)?; + } + } else { + println!("No topics found"); + } + Ok(()) +} + +// ----------------------------------- +// Output Handlers +// ----------------------------------- +impl TableOutputHandler for ListTopics { + /// table header implementation + fn header(&self) -> Row { + row!["NAME", "INTERNAL", "PARTITIONS", "REPLICAS",] + } + + /// return errors in string format + fn errors(&self) -> Vec { + let mut errors = vec![]; + for topic_metadata in &self.topics { + if let Some(error) = &topic_metadata.error { + errors.push(format!( + "Topic '{}': {}", + topic_metadata.name, + error.to_sentence() + )); + } + } + errors + } + + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + for topic_metadata in &self.topics { + if let Some(topic) = &topic_metadata.topic { + rows.push(row![ + l -> topic_metadata.name, + c -> topic.is_internal.to_string(), + c -> topic.partitions.to_string(), + c -> topic.replication_factor.to_string(), + ]); + } + } + rows + } +} + +impl EncoderOutputHandler for ListTopics { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.topics + } +} diff --git a/cli/src/topic/helpers/proc_list_sc.rs b/cli/src/topic/helpers/proc_list_sc.rs new file mode 100644 index 0000000000..564498b740 --- /dev/null +++ b/cli/src/topic/helpers/proc_list_sc.rs @@ -0,0 +1,125 @@ +//! +//! # Fluvio SC - List Topic Processing +//! +//! Retrieve all Topics and print to screen +//! + +use std::net::SocketAddr; + +use prettytable::Row; +use prettytable::row; +use prettytable::cell; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::common::{EncoderOutputHandler, TableOutputHandler}; + +use super::topic_metadata_sc::ScTopicMetadata; +use super::topic_metadata_sc::query_sc_topic_metadata; + +use crate::topic::list::ListTopicsConfig; + +// ----------------------------------- +// ListTopics Data Structure +// ----------------------------------- + +#[derive(Debug)] +struct ListTopics { + topics: Vec, +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +// Retrieve and print topics in desired format +pub fn process_list_topics( + server_addr: SocketAddr, + list_topic_cfg: &ListTopicsConfig, +) -> Result<(), CliError> { + let topics = query_sc_topic_metadata(server_addr, None)?; + let list_topics = ListTopics { topics }; + + format_response_output(&list_topics, &list_topic_cfg.output) +} + +/// Process server based on output type +fn format_response_output( + list_topics: &ListTopics, + output_type: &OutputType, +) -> Result<(), CliError> { + // expecting array with one or more elements + if list_topics.topics.len() > 0 { + if output_type.is_table() { + list_topics.display_errors(); + list_topics.display_table(false); + } else { + list_topics.display_encoding(output_type)?; + } + } else { + println!("No topics found"); + } + Ok(()) +} + +// ----------------------------------- +// Output Handlers +// ----------------------------------- +impl TableOutputHandler for ListTopics { + /// table header implementation + fn header(&self) -> Row { + row![ + "NAME", + "TYPE", + "PARTITIONS", + "REPLICAS", + "IGNORE-RACK", + "STATUS", + "REASON" + ] + } + + /// return errors in string format + fn errors(&self) -> Vec { + let mut errors = vec![]; + for topic_metadata in &self.topics { + if let Some(error) = &topic_metadata.error { + errors.push(format!( + "Topic '{}': {}", + topic_metadata.name, + error.to_sentence() + )); + } + } + errors + } + + /// table content implementation + fn content(&self) -> Vec { + let mut rows: Vec = vec![]; + for topic_metadata in &self.topics { + if let Some(topic) = &topic_metadata.topic { + rows.push(row![ + l -> topic_metadata.name, + c -> topic.type_label(), + c -> topic.partitions_str(), + c -> topic.replication_factor_str(), + c -> topic.ignore_rack_assign_str(), + c -> topic.status_label(), + l -> topic.reason, + ]); + } + } + rows + } +} + +impl EncoderOutputHandler for ListTopics { + /// serializable data type + type DataType = Vec; + + /// serializable data to be encoded + fn data(&self) -> &Vec { + &self.topics + } +} diff --git a/cli/src/topic/helpers/topic_metadata_kf.rs b/cli/src/topic/helpers/topic_metadata_kf.rs new file mode 100644 index 0000000000..1511adda28 --- /dev/null +++ b/cli/src/topic/helpers/topic_metadata_kf.rs @@ -0,0 +1,155 @@ +//! +//! # Kafka -- Query Topics +//! +//! Retrieves Kafka Metadata and convert Topics and to KfTopicMetadata +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use serde::Serialize; + +use kf_protocol::message::metadata::MetadataResponseTopic; +use kf_protocol::message::metadata::MetadataResponsePartition; +use kf_protocol::message::metadata::KfMetadataResponse; +use kf_protocol::api::ErrorCode as KfErrorCode; +use future_helper::run_block_on; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::query_kf_metadata; +use crate::common::kf_get_api_versions; + +// ----------------------------------- +// Data Structures (Serializable) +// ----------------------------------- + +#[derive(Serialize, Debug)] +pub struct KfTopicMetadata { + pub name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub topic: Option, +} + +#[derive(Serialize, Debug)] +pub struct Topic { + pub is_internal: bool, + pub partitions: i32, + pub replication_factor: i32, + pub partition_map: Vec, +} + +#[derive(Serialize, Debug)] +pub struct PartitionReplica { + pub id: i32, + pub leader: i32, + pub replicas: Vec, + pub isr: Vec, + + pub status: String, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- +impl KfTopicMetadata { + pub fn new(response_topic: &MetadataResponseTopic) -> Self { + // if error is present, convert it + let error = if response_topic.error_code.is_error() { + Some(response_topic.error_code) + } else { + None + }; + + // convert topic + let topic = if error.is_none() { + Some(Topic::new(&response_topic)) + } else { + None + }; + + // build topic metadata + KfTopicMetadata { + name: response_topic.name.clone(), + error: error, + topic: topic, + } + } +} + +impl Topic { + pub fn new(response_topic: &MetadataResponseTopic) -> Self { + // convert partition replicas + let mut partitions: Vec = vec![]; + for response_partition in &response_topic.partitions { + partitions.push(PartitionReplica::new(response_partition)); + } + + // compute partitions & replication factor + let partition_cnt = response_topic.partitions.len() as i32; + let replication_factor_cnt = if partition_cnt > 0 { + response_topic.partitions[0].replica_nodes.len() as i32 + } else { + 0 + }; + + Topic { + is_internal: response_topic.is_internal, + partitions: partition_cnt, + replication_factor: replication_factor_cnt, + partition_map: partitions, + } + } +} + +impl PartitionReplica { + pub fn new(response_partition: &MetadataResponsePartition) -> Self { + PartitionReplica { + id: response_partition.partition_index, + leader: response_partition.leader_id, + replicas: response_partition.replica_nodes.clone(), + isr: response_partition.isr_nodes.clone(), + status: response_partition.error_code.to_string(), + } + } +} + +// ----------------------------------- +// Process Request +// ----------------------------------- + +/// Query Kafka server for Topics and convert to Topic Metadata +pub fn query_kf_topic_metadata( + server_addr: SocketAddr, + names: Option>, +) -> Result, CliError> { + match run_block_on(get_version_and_query_metadata(server_addr, names)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot retrieve metadata: {}", err), + ))), + Ok(metadata) => { + let mut topic_metadata_list: Vec = vec![]; + for topic in &metadata.topics { + topic_metadata_list.push(KfTopicMetadata::new(topic)); + } + Ok(topic_metadata_list) + } + } +} + +// Connect to Kafka Controller, get version and query metadata +async fn get_version_and_query_metadata( + server_addr: SocketAddr, + names: Option>, +) -> Result { + let mut conn = Connection::new(&server_addr).await?; + let versions = kf_get_api_versions(&mut conn).await?; + + query_kf_metadata(&mut conn, names, &versions).await +} diff --git a/cli/src/topic/helpers/topic_metadata_sc.rs b/cli/src/topic/helpers/topic_metadata_sc.rs new file mode 100644 index 0000000000..e5e536a515 --- /dev/null +++ b/cli/src/topic/helpers/topic_metadata_sc.rs @@ -0,0 +1,238 @@ +//! +//! # Fluvio SC - Query Topics +//! +//! Communicates with Fluvio Streaming Controller to retrieve Topics and convert +//! them to ScTopicMetadata +//! + +use std::net::SocketAddr; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use serde::Serialize; +use log::trace; + +use future_helper::run_block_on; + +use sc_api::apis::ScApiKey; +use sc_api::topic::FlvFetchTopicsRequest; +use sc_api::topic::FlvFetchTopicResponse; +use sc_api::topic::FlvFetchTopic; +use sc_api::topic::FlvTopicResolution; +use sc_api::topic::FlvTopicSpecMetadata; +use sc_api::topic::FlvPartitionReplica; +use sc_api::errors::FlvErrorCode; + +use crate::error::CliError; +use crate::common::Connection; +use crate::common::sc_get_api_versions; +use crate::common::sc_lookup_version; + +// ----------------------------------- +// ScTopicMetadata (Serializable) +// ----------------------------------- + +#[derive(Serialize, Debug)] +pub struct ScTopicMetadata { + pub name: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub error: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub topic: Option, +} + +#[derive(Serialize, Debug)] +pub struct Topic { + pub type_computed: bool, + + #[serde(skip_serializing_if = "Option::is_none")] + pub assigned_partitions: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub partitions: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub replication_factor: Option, + + pub ignore_rack_assignment: bool, + pub status: TopicResolution, + pub reason: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub partition_map: Option>, +} + +#[derive(Serialize, Debug)] +pub struct PartitionReplica { + pub id: i32, + pub leader: i32, + pub replicas: Vec, + pub live_replicas: Vec, +} + +#[derive(Serialize, Debug)] +pub enum TopicResolution { + Provisioned, + Init, + Pending, + InsufficientResources, + InvalidConfig, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- +impl ScTopicMetadata { + pub fn new(fetched_topic_metadata: &FlvFetchTopicResponse) -> Self { + // if topic is present, convert it + let topic = if let Some(fetched_topic) = &fetched_topic_metadata.topic { + Some(Topic::new(fetched_topic)) + } else { + None + }; + + // if error is present, convert it + let error = if fetched_topic_metadata.error_code.is_error() { + Some(fetched_topic_metadata.error_code) + } else { + None + }; + + // topic metadata with all parameters converted + ScTopicMetadata { + name: fetched_topic_metadata.name.clone(), + error: error, + topic: topic, + } + } +} + +impl Topic { + pub fn new(fetched_topic: &FlvFetchTopic) -> Self { + let topic_resolution = TopicResolution::new(&fetched_topic.status.resolution); + + // convert partition replicas + let partition_replicas = + if let Some(flv_partition_replicas) = &fetched_topic.partition_replicas { + let mut partition_replicas: Vec = vec![]; + for flv_partition_replica in flv_partition_replicas { + partition_replicas.push(PartitionReplica::new(flv_partition_replica)); + } + Some(partition_replicas) + } else { + None + }; + + // create Topic + Topic { + type_computed: fetched_topic.spec.is_computed(), + assigned_partitions: fetched_topic.spec.partition_map_str(), + partitions: fetched_topic.spec.partitions(), + replication_factor: fetched_topic.spec.replication_factor(), + ignore_rack_assignment: fetched_topic.spec.ignore_rack_assignment(), + status: topic_resolution, + reason: fetched_topic.status.reason_str().clone(), + partition_map: partition_replicas, + } + } + pub fn status_label(&self) -> &'static str { + TopicResolution::resolution_label(&self.status) + } + + pub fn type_label(&self) -> &'static str { + FlvTopicSpecMetadata::type_label(&self.type_computed) + } + + pub fn ignore_rack_assign_str(&self) -> &'static str { + FlvTopicSpecMetadata::ignore_rack_assign_str(&self.ignore_rack_assignment) + } + + pub fn partitions_str(&self) -> String { + FlvTopicSpecMetadata::partitions_str(&self.partitions) + } + + pub fn replication_factor_str(&self) -> String { + FlvTopicSpecMetadata::replication_factor_str(&self.replication_factor) + } +} + +impl PartitionReplica { + pub fn new(flv_partition_replica: &FlvPartitionReplica) -> Self { + PartitionReplica { + id: flv_partition_replica.id, + leader: flv_partition_replica.leader, + replicas: flv_partition_replica.replicas.clone(), + live_replicas: flv_partition_replica.live_replicas.clone(), + } + } +} + +impl TopicResolution { + pub fn new(flv_topic_resolution: &FlvTopicResolution) -> Self { + match flv_topic_resolution { + FlvTopicResolution::Provisioned => TopicResolution::Provisioned, + FlvTopicResolution::Init => TopicResolution::Init, + FlvTopicResolution::Pending => TopicResolution::Pending, + FlvTopicResolution::InsufficientResources => TopicResolution::InsufficientResources, + FlvTopicResolution::InvalidConfig => TopicResolution::InvalidConfig, + } + } + + pub fn resolution_label(resolution: &TopicResolution) -> &'static str { + match resolution { + TopicResolution::Provisioned => "provisioned", + TopicResolution::Init => "initializing", + TopicResolution::Pending => "pending", + TopicResolution::InsufficientResources => "no-resource-for-replica-map", + TopicResolution::InvalidConfig => "invalid-config", + } + } +} + +// ----------------------------------- +// Query Server & Convert to Metadata +// ----------------------------------- + +/// Query Fluvio SC server for Topics and convert to Topic Metadata +pub fn query_sc_topic_metadata( + server_addr: SocketAddr, + names: Option>, +) -> Result, CliError> { + match run_block_on(send_request_to_server(server_addr, names)) { + Err(err) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + format!("cannot retrieve topics: {}", err), + ))), + Ok(fetched_topics) => { + let mut topic_metadata_list: Vec = vec![]; + for fetched_topic in &fetched_topics { + topic_metadata_list.push(ScTopicMetadata::new(fetched_topic)); + } + Ok(topic_metadata_list) + } + } +} + +async fn send_request_to_server<'a>( + server_addr: SocketAddr, + names: Option>, +) -> Result, CliError> { + // look-up version + let mut conn = Connection::new(&server_addr).await?; + let versions = sc_get_api_versions(&mut conn).await?; + let version = sc_lookup_version(ScApiKey::FlvFetchTopics, &versions); + + // generate request + let mut request = FlvFetchTopicsRequest::default(); + request.names = names; + + trace!("query topic req '{}': {:#?}", server_addr, request); + + let response = conn.send_request(request, version).await?; + + trace!("query topic res '{}': {:#?}", server_addr, response); + + Ok(response.topics) +} diff --git a/cli/src/topic/list.rs b/cli/src/topic/list.rs new file mode 100644 index 0000000000..026d885d24 --- /dev/null +++ b/cli/src/topic/list.rs @@ -0,0 +1,90 @@ +//! +//! # List Topics CLI +//! +//! CLI tree and processing to list Topics +//! +use std::io::Error as IoError; +use std::io::ErrorKind; + +use structopt::StructOpt; + +use crate::error::CliError; +use crate::common::OutputType; +use crate::profile::{ProfileConfig, TargetServer}; + +use super::helpers::process_sc_list_topics; +use super::helpers::process_kf_list_topics; + +// ----------------------------------- +// CLI Options +// ----------------------------------- + +#[derive(Debug, StructOpt)] +pub struct ListTopicsOpt { + /// Address of Streaming Controller + #[structopt(short = "c", long = "sc", value_name = "host:port")] + sc: Option, + + /// Address of Kafka Controller + #[structopt( + short = "k", + long = "kf", + value_name = "host:port", + conflicts_with = "sc" + )] + kf: Option, + + ///Profile name + #[structopt(short = "P", long = "profile")] + pub profile: Option, + + /// Output + #[structopt( + short = "O", + long = "output", + value_name = "type", + raw(possible_values = "&OutputType::variants()", case_insensitive = "true") + )] + output: Option, +} + +// ----------------------------------- +// Parsed Config +// ----------------------------------- + +#[derive(Debug)] +pub struct ListTopicsConfig { + pub output: OutputType, +} + +// ----------------------------------- +// CLI Processing +// ----------------------------------- + +/// Process list topics cli request +pub fn process_list_topics(opt: ListTopicsOpt) -> Result<(), CliError> { + let (target_server, list_topic_cfg) = parse_opt(opt)?; + + match target_server { + TargetServer::Kf(server_addr) => process_kf_list_topics(server_addr, &list_topic_cfg), + TargetServer::Sc(server_addr) => process_sc_list_topics(server_addr, &list_topic_cfg), + TargetServer::Spu(_) => Err(CliError::IoError(IoError::new( + ErrorKind::Other, + "SPU does not implement list topics", + ))), + } +} + +/// Validate cli options and generate config +fn parse_opt(opt: ListTopicsOpt) -> Result<(TargetServer, ListTopicsConfig), CliError> { + let profile_config = ProfileConfig::new(&opt.sc, &opt.kf, &opt.profile)?; + let target_server = profile_config.target_server()?; + + // transfer config parameters + let list_topics_cfg = ListTopicsConfig { + output: opt.output.unwrap_or(OutputType::default()), + }; + + // return server separately from topic result + Ok((target_server, list_topics_cfg)) +} diff --git a/cli/src/topic/mod.rs b/cli/src/topic/mod.rs new file mode 100644 index 0000000000..a84fc08fac --- /dev/null +++ b/cli/src/topic/mod.rs @@ -0,0 +1,64 @@ +mod helpers; +mod create; +mod delete; +mod describe; +mod list; + +use structopt::StructOpt; + +use create::CreateTopicOpt; +use delete::DeleteTopicOpt; +use describe::DescribeTopicsOpt; +use list::ListTopicsOpt; + +use create::process_create_topic; +use delete::process_delete_topic; +use describe::process_describe_topics; +use list::process_list_topics; + +use super::CliError; + +#[derive(Debug, StructOpt)] +#[structopt(name = "topic", author = "", about = "Topic operations")] +pub enum TopicOpt { + #[structopt(name = "create", author = "", template = "{about} + +{usage} + +{all-args} +",about = "Create a topic")] + Create(CreateTopicOpt), + + #[structopt(name = "delete", author = "", template = "{about} + +{usage} + +{all-args} +",about = "Delete a topic")] + Delete(DeleteTopicOpt), + + #[structopt(name = "describe", author = "", template = "{about} + +{usage} + +{all-args} +",about = "Show details of a topic")] + Describe(DescribeTopicsOpt), + + #[structopt(name = "list", author = "", template = "{about} + +{usage} + +{all-args} +",about = "Show all topics")] + List(ListTopicsOpt), +} + +pub(crate) fn process_topic(topic_opt: TopicOpt) -> Result<(), CliError> { + match topic_opt { + TopicOpt::Create(create_topic_opt) => process_create_topic(create_topic_opt), + TopicOpt::Delete(delete_topic_opt) => process_delete_topic(delete_topic_opt), + TopicOpt::Describe(describe_topics_opt) => process_describe_topics(describe_topics_opt), + TopicOpt::List(list_topics_opt) => process_list_topics(list_topics_opt), + } +} diff --git a/cli/test-data/group-cfg/valid_config.json b/cli/test-data/group-cfg/valid_config.json new file mode 100644 index 0000000000..bdf0c65c1b --- /dev/null +++ b/cli/test-data/group-cfg/valid_config.json @@ -0,0 +1,19 @@ +{ + "storage": { + "logDir": "/tmp/fluvio", + "size": "2Gi" + }, + "replication": { + "inSyncReplicaMin ": 5 + }, + "env": [ + { + "name": "ENV-VAR1", + "value": "one" + }, + { + "name": "ENV-VAR2", + "value": "two" + } + ] +} \ No newline at end of file diff --git a/cli/test-data/input-files/icon1.png b/cli/test-data/input-files/icon1.png new file mode 100644 index 0000000000..0bc6b41696 Binary files /dev/null and b/cli/test-data/input-files/icon1.png differ diff --git a/cli/test-data/input-files/icon2.png b/cli/test-data/input-files/icon2.png new file mode 100644 index 0000000000..c396a0e1cc Binary files /dev/null and b/cli/test-data/input-files/icon2.png differ diff --git a/cli/test-data/input-files/json-file.json b/cli/test-data/input-files/json-file.json new file mode 100644 index 0000000000..de82c8cc51 --- /dev/null +++ b/cli/test-data/input-files/json-file.json @@ -0,0 +1,12 @@ +{ + "partitions": [ + { + "id": 0, + "replicas": [ + 5001, + 5002, + 5003 + ] + } + ] +} \ No newline at end of file diff --git a/cli/test-data/input-files/text-file.txt b/cli/test-data/input-files/text-file.txt new file mode 100644 index 0000000000..f833565907 --- /dev/null +++ b/cli/test-data/input-files/text-file.txt @@ -0,0 +1,4 @@ +Lorem Ipsum is simply dummy text. +Lorem Ipsum has been the industry's standard since 1500's. +It has survived over five centuries. +Still here today. \ No newline at end of file diff --git a/cli/test-data/profiles/default.toml b/cli/test-data/profiles/default.toml new file mode 100644 index 0000000000..7c06625e94 --- /dev/null +++ b/cli/test-data/profiles/default.toml @@ -0,0 +1,13 @@ +version = "1.0" + +[sc] +host = "127.0.0.1" +port = 9033 + +[spu] +host = "127.0.0.1" +port = 9034 + +[kf] +host = "127.0.0.1" +port = 9093 diff --git a/cli/test-data/profiles/invalid.toml b/cli/test-data/profiles/invalid.toml new file mode 100644 index 0000000000..de0971ff1b --- /dev/null +++ b/cli/test-data/profiles/invalid.toml @@ -0,0 +1,9 @@ +version = "1.0" + +[sc] +host = "127.0.0.1" + + + + + diff --git a/cli/test-data/topics/out_of_order.json b/cli/test-data/topics/out_of_order.json new file mode 100644 index 0000000000..0ed06e404f --- /dev/null +++ b/cli/test-data/topics/out_of_order.json @@ -0,0 +1,28 @@ +{ + "partitions": [ + { + "id": 0, + "replicas": [ + 5001, + 5002, + 5003 + ] + }, + { + "id": 2, + "replicas": [ + 5003, + 5001, + 5002 + ] + }, + { + "id": 1, + "replicas": [ + 5002, + 5003, + 5001 + ] + } + ] +} \ No newline at end of file diff --git a/cli/test-data/topics/valid_assignment.json b/cli/test-data/topics/valid_assignment.json new file mode 100644 index 0000000000..e2a7460615 --- /dev/null +++ b/cli/test-data/topics/valid_assignment.json @@ -0,0 +1,28 @@ +{ + "partitions": [ + { + "id": 0, + "replicas": [ + 5001, + 5002, + 5003 + ] + }, + { + "id": 1, + "replicas": [ + 5002, + 5003, + 5001 + ] + }, + { + "id": 2, + "replicas": [ + 5003, + 5001, + 5002 + ] + } + ] +} \ No newline at end of file diff --git a/cli/test-data/topics/valid_assignment1.json b/cli/test-data/topics/valid_assignment1.json new file mode 100644 index 0000000000..4de46c72bd --- /dev/null +++ b/cli/test-data/topics/valid_assignment1.json @@ -0,0 +1,10 @@ +{ + "partitions": [ + { + "id": 0, + "replicas": [ + 5001 + ] + } + ] +} \ No newline at end of file diff --git a/cli/test-data/topics/valid_kf_assignment.json b/cli/test-data/topics/valid_kf_assignment.json new file mode 100644 index 0000000000..13fe61754f --- /dev/null +++ b/cli/test-data/topics/valid_kf_assignment.json @@ -0,0 +1,25 @@ +{ + "partitions": [ + { + "id": 0, + "replicas": [ + 1, + 2 + ] + }, + { + "id": 1, + "replicas": [ + 2, + 3 + ] + }, + { + "id": 2, + "replicas": [ + 3, + 1 + ] + } + ] +} \ No newline at end of file diff --git a/dev-tools/log/debug-sc-all b/dev-tools/log/debug-sc-all new file mode 100755 index 0000000000..99c64c750d --- /dev/null +++ b/dev-tools/log/debug-sc-all @@ -0,0 +1,5 @@ +#!/bin/sh +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +RELATIVE_PATH="$DIR"/../../target/debug + +RUST_LOG=sc_server=trace,cluster_cache=debug,k8_client=error,tokio_reactor=error "$RELATIVE_PATH"/sc-server diff --git a/dev-tools/log/debug-sc-client b/dev-tools/log/debug-sc-client new file mode 100755 index 0000000000..c387905410 --- /dev/null +++ b/dev-tools/log/debug-sc-client @@ -0,0 +1 @@ +RUST_LOG=sc_server=debug,cluster_cache=debug,k8_client=trace,tokio_reactor=error target/debug/sc-server diff --git a/dev-tools/log/debug-sc-min b/dev-tools/log/debug-sc-min new file mode 100755 index 0000000000..054a0d5593 --- /dev/null +++ b/dev-tools/log/debug-sc-min @@ -0,0 +1,5 @@ +#!/bin/sh +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +RELATIVE_PATH="$DIR"/../../target/debug + +RUST_LOG=sc_server=debug,cluster_cache=debug,k8_client=error,tokio_reactor=error "$RELATIVE_PATH"/sc-server $@ diff --git a/dev-tools/log/debug-sc-server b/dev-tools/log/debug-sc-server new file mode 100755 index 0000000000..adfb47f06e --- /dev/null +++ b/dev-tools/log/debug-sc-server @@ -0,0 +1 @@ +RUST_LOG=sc_server=trace,k8_config=debug,k8_client=debug,tokio_reactor=error target/debug/sc-server diff --git a/dev-tools/log/debug-spu-all b/dev-tools/log/debug-spu-all new file mode 100755 index 0000000000..079394190f --- /dev/null +++ b/dev-tools/log/debug-spu-all @@ -0,0 +1,9 @@ +#!/bin/sh +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +RELATIVE_PATH="$DIR"/../../target/debug + +ARG1=${1} +ARG2=${2} +ARG3=${3} + +RUST_LOG=spu_server=trace,future_helper=trace,tokio_reactor=error "$RELATIVE_PATH"/spu-server -i "$ARG1" -p 0.0.0.0:"$ARG2" -v 0.0.0.0:"$ARG3" diff --git a/dev-tools/log/debug-spu-min b/dev-tools/log/debug-spu-min new file mode 100755 index 0000000000..ef88d199bd --- /dev/null +++ b/dev-tools/log/debug-spu-min @@ -0,0 +1,9 @@ +#!/bin/sh +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +RELATIVE_PATH="$DIR"/../../target/debug + +ARG1=${1} +ARG2=${2} +ARG3=${3} + +RUST_LOG=spu_server=debug,tokio_reactor=error "$RELATIVE_PATH"/spu-server -i "$ARG1" -p 0.0.0.0:"$ARG2" -v 0.0.0.0:"$ARG3" diff --git a/dev-tools/log/test-sc-min b/dev-tools/log/test-sc-min new file mode 100755 index 0000000000..86746c59e2 --- /dev/null +++ b/dev-tools/log/test-sc-min @@ -0,0 +1,3 @@ +#!/bin/sh +ARG1=${1} +RUST_LOG=sc_server=trace,future_helper=error,tokio_reactor=error cargo test "$ARG1" diff --git a/dev-tools/log/test-spu-min b/dev-tools/log/test-spu-min new file mode 100755 index 0000000000..c5bec6d651 --- /dev/null +++ b/dev-tools/log/test-spu-min @@ -0,0 +1,3 @@ +#!/bin/sh +ARG1=${1} +RUST_LOG=spu_server=trace,future_helper=error,tokio_reactor=error cargo test "$ARG1" diff --git a/dev-tools/minikube-mycube.sh b/dev-tools/minikube-mycube.sh new file mode 100755 index 0000000000..0000b8ed74 --- /dev/null +++ b/dev-tools/minikube-mycube.sh @@ -0,0 +1,4 @@ +#!/bin/bash +kubectl config set-cluster mycube --server=https://minikubeCA:8443 --certificate-authority=.minikube/ca.crt +kubectl config set-context mycube --user=minikube --cluster=mycube +kubectl config use-context mycube \ No newline at end of file diff --git a/dev-tools/minikube-tunnel.sh b/dev-tools/minikube-tunnel.sh new file mode 100755 index 0000000000..ca84343271 --- /dev/null +++ b/dev-tools/minikube-tunnel.sh @@ -0,0 +1,2 @@ +#!/bin/bash +nohup minikube tunnel > /tmp/tunnel.out 2> /tmp/tunnel.out & diff --git a/future-aio/Cargo.toml b/future-aio/Cargo.toml new file mode 100644 index 0000000000..aba2775fac --- /dev/null +++ b/future-aio/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "future-aio" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.8" +nix = "0.15.0" +bytes = "0.4.12" +tokio_1 = { version = "0.1.18", package = "tokio" } +tokio-threadpool-1 = { version = "0.1.13", package = "tokio-threadpool" } +memmap = "0.7.0" +futures-preview = { version = "0.3.0-alpha.17" } +future-helper = { path = "../future-helper"} +futures_1 = { version = "0.1.25", package = "futures" } +pin-utils = "0.1.0-alpha.4" + + +[dev-dependencies] +future-helper = { path = "../future-helper", features=["fixture"]} +num_cpus = { version = "1.10.1" } +utils = { path= "../utils", features= ["fixture"]} \ No newline at end of file diff --git a/future-aio/rust-toolchain b/future-aio/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/future-aio/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/future-aio/src/compat/mod.rs b/future-aio/src/compat/mod.rs new file mode 100644 index 0000000000..f8df26b5ec --- /dev/null +++ b/future-aio/src/compat/mod.rs @@ -0,0 +1,3 @@ +mod sink; + +pub use sink::Compat01As03Sink; diff --git a/future-aio/src/compat/sink.rs b/future-aio/src/compat/sink.rs new file mode 100644 index 0000000000..be2854f61c --- /dev/null +++ b/future-aio/src/compat/sink.rs @@ -0,0 +1,215 @@ +use std::pin::Pin; +use std::marker::Unpin; +use std::task::Context; + +use futures_1::executor::{ + spawn as spawn01, Notify as Notify01, NotifyHandle as NotifyHandle01, + Spawn as Spawn01, UnsafeNotify as UnsafeNotify01, +}; +use futures_1::{ + Async as Async01, AsyncSink as AsyncSink01, + Sink as Sink01, Stream as Stream01, +}; +use futures::{task as task03, Stream as Stream03}; +use futures::sink::Sink as Sink03; + + +struct NotifyWaker(task03::Waker); + +// from futures source +#[derive(Clone)] +struct WakerToHandle<'a>(&'a task03::Waker); + +impl<'a> From> for NotifyHandle01 { + fn from(handle: WakerToHandle<'a>) -> NotifyHandle01 { + let ptr = Box::new(NotifyWaker(handle.0.clone())); + + unsafe { NotifyHandle01::new(Box::into_raw(ptr)) } + } +} + +impl Notify01 for NotifyWaker { + fn notify(&self, _: usize) { + self.0.wake_by_ref(); + } +} + +unsafe impl UnsafeNotify01 for NotifyWaker { + unsafe fn clone_raw(&self) -> NotifyHandle01 { + WakerToHandle(&self.0).into() + } + + unsafe fn drop_raw(&self) { + let ptr: *const dyn UnsafeNotify01 = self; + drop(Box::from_raw(ptr as *mut dyn UnsafeNotify01)); + } +} + + + +#[derive(Debug)] +#[must_use = "futures do nothing unless polled"] +pub struct Compat01As03Sink { + pub(crate) inner: Spawn01, + pub(crate) buffer: Option, + pub(crate) close_started: bool, +} + +impl Unpin for Compat01As03Sink {} + +impl Compat01As03Sink { + /// Wraps a futures 0.1 Sink object in a futures 0.3-compatible wrapper. + pub fn new(inner: S) -> Compat01As03Sink { + Compat01As03Sink { + inner: spawn01(inner), + buffer: None, + close_started: false + } + } + + fn in_notify( + &mut self, + cx: &mut Context, + f: impl FnOnce(&mut S) -> R, + ) -> R { + let notify = &WakerToHandle(cx.waker()); + self.inner.poll_fn_notify(notify, 0, f) + } + + #[allow(dead_code)] + pub(crate) fn get_inner(&self) -> &S { + self.inner.get_ref() + } + + +} + +impl Stream03 for Compat01As03Sink +where + S: Stream01, +{ + type Item = Result; + + fn poll_next( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> task03::Poll> { + match self.in_notify(cx, |f| f.poll()) { + Ok(Async01::Ready(Some(t))) => task03::Poll::Ready(Some(Ok(t))), + Ok(Async01::Ready(None)) => task03::Poll::Ready(None), + Ok(Async01::NotReady) => task03::Poll::Pending, + Err(e) => task03::Poll::Ready(Some(Err(e))), + } + } +} + +impl Sink03 for Compat01As03Sink +where + S: Sink01, +{ + type Error = S::SinkError; + + fn start_send( + mut self: Pin<&mut Self>, + item: SinkItem + ) -> Result<(), Self::Error> { + debug_assert!(self.buffer.is_none()); + self.buffer = Some(item); + Ok(()) + } + + fn poll_ready( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> task03::Poll> { + match self.buffer.take() { + Some(item) => match self.in_notify(cx, |f| f.start_send(item)) { + Ok(AsyncSink01::Ready) => task03::Poll::Ready(Ok(())), + Ok(AsyncSink01::NotReady(i)) => { + self.buffer = Some(i); + task03::Poll::Pending + } + Err(e) => task03::Poll::Ready(Err(e)), + }, + None => task03::Poll::Ready(Ok(())), + } + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> task03::Poll> { + let item = self.buffer.take(); + match self.in_notify(cx, |f| match item { + Some(i) => match f.start_send(i) { + Ok(AsyncSink01::Ready) => f.poll_complete().map(|i| (i, None)), + Ok(AsyncSink01::NotReady(t)) => { + Ok((Async01::NotReady, Some(t))) + } + Err(e) => Err(e), + }, + None => f.poll_complete().map(|i| (i, None)), + }) { + Ok((Async01::Ready(_), _)) => task03::Poll::Ready(Ok(())), + Ok((Async01::NotReady, item)) => { + self.buffer = item; + task03::Poll::Pending + } + Err(e) => task03::Poll::Ready(Err(e)), + } + } + + fn poll_close( + mut self: Pin<&mut Self>, + cx: &mut Context, + ) -> task03::Poll> { + let item = self.buffer.take(); + let close_started = self.close_started; + + match self.in_notify(cx, |f| match item { + Some(i) => match f.start_send(i) { + Ok(AsyncSink01::Ready) => { + match f.poll_complete() { + Ok(Async01::Ready(_)) => { + match ::close(f) { + Ok(i) => Ok((i, None, true)), + Err(e) => Err(e) + } + }, + Ok(Async01::NotReady) => Ok((Async01::NotReady, None, false)), + Err(e) => Err(e) + } + }, + Ok(AsyncSink01::NotReady(t)) => { + Ok((Async01::NotReady, Some(t), close_started)) + } + Err(e) => Err(e), + }, + None => if close_started { + match ::close(f) { + Ok(i) => Ok((i, None, true)), + Err(e) => Err(e) + } + } else { + match f.poll_complete() { + Ok(Async01::Ready(_)) => { + match ::close(f) { + Ok(i) => Ok((i, None, true)), + Err(e) => Err(e) + } + }, + Ok(Async01::NotReady) => Ok((Async01::NotReady, None, close_started)), + Err(e) => Err(e) + } + }, + }) { + Ok((Async01::Ready(_), _, _)) => task03::Poll::Ready(Ok(())), + Ok((Async01::NotReady, item, close_started)) => { + self.buffer = item; + self.close_started = close_started; + task03::Poll::Pending + } + Err(e) => task03::Poll::Ready(Err(e)), + } + } +} \ No newline at end of file diff --git a/future-aio/src/fs/async_file.rs b/future-aio/src/fs/async_file.rs new file mode 100644 index 0000000000..60e4735e46 --- /dev/null +++ b/future-aio/src/fs/async_file.rs @@ -0,0 +1,380 @@ +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::io::Write; +use std::io::Read; +use std::fs::File as SyncFile; +use std::fs::Metadata as SyncMetadata; +use std::fs::metadata as sync_metadata_fn; +use std::io::SeekFrom; +use std::io::Seek; +use std::path::Path; +use std::path::PathBuf; +use std::pin::Pin; +use std::task::Poll; +use std::task::Context; + + +#[cfg(unix)] +use std::os::unix::io::AsRawFd; +use std::os::unix::io::RawFd; + +use log::trace; +use futures::io::AsyncWrite; +use futures::io::AsyncRead; +use futures::Future; +use pin_utils::pin_mut; + + +use crate::asyncify; +use crate::AsyncWrite2; +use super::AsyncFileSlice; + + +#[derive(Debug)] +pub struct AsyncFile{ + file: SyncFile, + path: PathBuf +} + + +impl Drop for AsyncFile { + fn drop(&mut self) { + trace!("dropping file: {:#?}",self.path); + } +} + + + +impl std::fmt::Display for AsyncFile { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + + write!(f, "path = {:#?},fd = {:#?}", self.path,self.file.as_raw_fd()) + } +} + +impl AsyncFile { + + fn new

(file: SyncFile,path: P) -> Self where P: AsRef{ + AsyncFile { + file, + path: path.as_ref().to_path_buf() + } + } + + /// open for write only + pub async fn create

(path: P) -> Result where P: AsRef + { + let file_path = path.as_ref(); + asyncify( move || SyncFile::create(file_path)).await.map(|sf| AsyncFile::new(sf,file_path)) + + } + + /// open for only read + pub async fn open

(path: P) -> Result + where P: AsRef + { + let file_path = path.as_ref(); + let sf = asyncify( || SyncFile::open(file_path)).await?; + Ok(AsyncFile::new(sf,file_path)) + } + + /// open for read and write + pub async fn open_read_write

(path: P) -> Result + where P: AsRef + { + let file_path = path.as_ref(); + let mut option = std::fs::OpenOptions::new(); + option.read(true) + .write(true) + .create(true) + .append(false); + + let sf = asyncify( || option.open(file_path)).await?; + Ok(AsyncFile::new(sf,file_path)) + } + + pub async fn open_read_append

(path: P) -> Result + where P: AsRef + { + let file_path = path.as_ref(); + let mut option = std::fs::OpenOptions::new(); + option.read(true) + .create(true) + .append(true); + + let sf = asyncify( || option.open(file_path)).await?; + Ok(AsyncFile::new(sf,file_path)) + } + + + pub fn get_metadata

(path: P) -> impl Future> + where P: AsRef + { + asyncify( move || sync_metadata_fn(path)) + } + + + pub fn from_std(file: SyncFile,path: PathBuf) -> Self { + AsyncFile::new(file,path) + } + + pub fn get_path(&self) -> &Path { + self.path.as_path() + } + + + pub fn set_len(&mut self,size: u64) -> impl Future> + '_{ + asyncify( move || self.file.set_len(size)) + } + + pub fn sync_all(&mut self) -> impl Future> + '_ { + asyncify( move || self.file.sync_all()) + } + + pub fn metadata(&self) -> impl Future> + '_ { + asyncify( move || self.file.metadata()) + } + + pub fn write<'a>(&'a mut self, buf: &'a [u8]) -> impl Future> + 'a { + asyncify( move || self.file.write(buf)) + } + + + pub fn read<'a>(&'a mut self,buf: &'a mut [u8]) -> impl Future> + 'a { + asyncify( move || self.file.read(buf)) + } + + pub fn seek(&mut self, pos: SeekFrom) -> impl Future> + '_ { + asyncify( move || self.file.seek(pos)) + } + + + + pub async fn try_clone(&self) -> Result { + let sf = asyncify(|| self.file.try_clone()).await?; + Ok(AsyncFile::from_std(sf,self.path.clone())) + } + + + // create open + pub async fn read_clone(&self) -> Result { + trace!("creatinhg clone for path: {:#?}",self.path); + Self::open(&self.path).await + } + + pub async fn reset_to_beggining(&mut self) -> Result<(),IoError> { + self.seek(SeekFrom::Start(0)).await.map(|_| ()) + } + + /// return raw slice with fiel descriptor, this doesn't not check + pub fn raw_slice(&self, position: u64, len: u64) -> AsyncFileSlice { + AsyncFileSlice::new( + self.as_raw_fd(), + position, + len + ) + } + + /// Extract slice of file using file descriptor + pub async fn as_slice(&self, position: u64,desired_len_opt: Option ) -> Result { + + let metadata = self.metadata().await?; + let len = metadata.len(); + + if position >= len { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "position is greater than available len", + )); + } + let slice_len = if let Some(desired_len) = desired_len_opt { + if position + desired_len >= len { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not available bytes", + )); + } + desired_len + } else { + len - position + }; + + trace!("file trace: position: {}, len: {}", position,len); + + Ok(self.raw_slice(position,slice_len)) + } + +} + + +impl AsyncWrite for AsyncFile { + + fn poll_write(mut self: Pin<&mut Self>, ctx: &mut Context, buf: &[u8]) -> Poll> { + trace!("writing: {} bytes",buf.len()); + let ft = self.write(buf); + pin_mut!(ft); + ft.poll(ctx) + } + + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let ft = self.sync_all(); + pin_mut!(ft); + ft.poll(cx) + } + + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + +} + + +impl AsyncWrite2 for AsyncFile {} + + +impl AsyncRead for AsyncFile { + + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { + trace!("reading bytes"); + let ft = self.read(buf); + pin_mut!(ft); + ft.poll(cx) + } + +} + + +impl AsRawFd for AsyncFile { + fn as_raw_fd(&self) -> RawFd { + self.file.as_raw_fd() + } +} + + + + +#[cfg(test)] +mod tests { + + + + use std::env::temp_dir; + use std::fs::File; + use std::io::Error as IoError; + use std::io::Write; + use std::io::Seek; + use std::io::SeekFrom; + use std::io::Read; + + use futures::io::AsyncReadExt; + use futures::io::AsyncWriteExt; + + use future_helper::test_async; + use utils::fixture::ensure_clean_file; + use super::AsyncFile; + + + // sync seek write and read + // this is used for implementating async version + #[test] + fn test_sync_seek_write() -> Result<(),std::io::Error> { + + let mut option = std::fs::OpenOptions::new(); + option.read(true) + .write(true) + .create(true) + .append(false); + + let mut file = option.open("/tmp/x1")?; + file.seek(SeekFrom::Start(0))?; + file.write_all(b"test")?; + // file.write_all(b"kkk")?; + file.sync_all()?; + + let mut f2 = File::open("/tmp/x1")?; + let mut contents = String::new(); + f2.read_to_string(&mut contents)?; + assert_eq!(contents,"test"); + Ok(()) + + } + + + + #[test_async] + async fn async_file_write_read_multiple() -> Result<(), IoError> { + + let test_file_path = temp_dir().join("file_write_test"); + ensure_clean_file(&test_file_path); + + + let mut file = AsyncFile::create(&test_file_path).await?; + file.seek(SeekFrom::Start(0)).await?; + file.write_all(b"test").await?; + + let mut f2 = AsyncFile::create(&test_file_path).await?; + f2.seek(SeekFrom::Start(0)).await?; + f2.write_all(b"xyzt").await?; + + let mut output = Vec::new(); + let mut rfile = AsyncFile::open(&test_file_path).await?; + rfile.read_to_end(&mut output).await?; + assert_eq!(output.len(),4); + let contents = String::from_utf8(output).expect("conversion"); + assert_eq!(contents,"xyzt"); + + Ok(()) + + } + + + #[test_async] + async fn async_file_write_read_same() -> Result<(), IoError> { + + let test_file_path = temp_dir().join("read_write_test"); + ensure_clean_file(&test_file_path); + + let mut output = Vec::new(); + let mut file = AsyncFile::open_read_write(&test_file_path).await?; + file.write_all(b"test").await?; + file.seek(SeekFrom::Start(0)).await?; + file.read_to_end(&mut output).await?; + assert_eq!(output.len(),4); + let contents = String::from_utf8(output).expect("conversion"); + assert_eq!(contents,"test"); + + Ok(()) + + } + + #[test_async] + async fn async_file_write_append_same() -> Result<(), IoError> { + + let test_file_path = temp_dir().join("read_append_test"); + ensure_clean_file(&test_file_path); + + let mut output = Vec::new(); + let mut file = AsyncFile::open_read_append(&test_file_path).await?; + file.write_all(b"test").await?; + file.seek(SeekFrom::Start(0)).await?; + file.write_all(b"xyz").await?; + file.seek(SeekFrom::Start(0)).await?; + file.read_to_end(&mut output).await?; + assert_eq!(output.len(),7); + let contents = String::from_utf8(output).expect("conversion"); + assert_eq!(contents,"testxyz"); + + Ok(()) + + } + + + + + + + +} + diff --git a/future-aio/src/fs/file_01.rs b/future-aio/src/fs/file_01.rs new file mode 100644 index 0000000000..2339a0c227 --- /dev/null +++ b/future-aio/src/fs/file_01.rs @@ -0,0 +1,79 @@ +use std::io; +use futures_01::Future; +use futures_01::future::poll_fn; +use tokio_fs::file::File; + +#[allow(dead_code)] +pub fn file_clone(mut file: File) -> impl Future { + poll_fn( move || file.poll_try_clone()) +} + + +pub fn file_sync(mut file: File) -> impl Future { + poll_fn( move || file.poll_sync_all()) +} + + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + use std::sync::atomic::{AtomicUsize, Ordering}; + use future_helper::Runtime; + use futures_01::Poll; + use futures_01::Async; + use futures_01::Future; + use log::debug; + + + struct TestFuture { + counter: i32 + } + + impl Future for TestFuture { + + type Item = i32; + type Error = (); + + fn poll(&mut self) -> Poll { + + //let old_count = self.counter.fetch_add(1, Ordering::SeqCst); + self.counter = self.counter+1; + debug!("live count: {}", self.counter); + + Ok(Async::Ready(self.counter)) + } + } + + + #[test] + fn test_future() { + + let mut rt = Runtime::new().unwrap(); + + let counter = Arc::new(AtomicUsize::new(0)); + let counter1 = counter.clone(); + let counter2 = counter.clone(); + let tf2 = TestFuture{ counter: 0}; + let stf2 = tf2.shared(); + let c1 = stf2.clone(); + rt.spawn(stf2.map( move | count | { + debug!("count: {}",*count); + counter1.clone().fetch_add(*count as usize, Ordering::SeqCst); + () + } ).map_err(|_| ())); + rt.spawn(c1.map( move | count | { + debug!("second count: {}",*count); + counter2.clone().fetch_add(*count as usize, Ordering::SeqCst); + () + } ).map_err(|_| ())); + + + rt.shutdown_on_idle().wait().unwrap(); + + let value = Arc::try_unwrap(counter).unwrap(); + assert_eq!(value.into_inner(),2); + + } + +} diff --git a/future-aio/src/fs/file_slice.rs b/future-aio/src/fs/file_slice.rs new file mode 100644 index 0000000000..f862e1fe43 --- /dev/null +++ b/future-aio/src/fs/file_slice.rs @@ -0,0 +1,44 @@ +#[cfg(unix)] +use std::os::unix::io::AsRawFd; +use std::os::unix::io::RawFd; + + + +/// Slice of the file +/// This works only on raw fd +#[derive(Default,Debug)] +pub struct AsyncFileSlice { + fd: RawFd, + position: u64, + len: u64 +} + +impl AsyncFileSlice { + + pub fn new(fd: RawFd,position: u64,len: u64) -> Self { + Self { + fd, + position, + len + } + } + + pub fn position(&self) -> u64 { + self.position + } + + pub fn len(&self) -> u64 { + self.len + } + + +} + + +impl AsRawFd for AsyncFileSlice { + + fn as_raw_fd(&self) -> RawFd { + self.fd + } +} + diff --git a/future-aio/src/fs/mmap.rs b/future-aio/src/fs/mmap.rs new file mode 100644 index 0000000000..223391585f --- /dev/null +++ b/future-aio/src/fs/mmap.rs @@ -0,0 +1,241 @@ +// memory mapped file + +use std::fs::OpenOptions; +use std::io::Error as IoError; +use std::path::Path; + +use memmap::Mmap; +use memmap::MmapMut; + +use crate::asyncify; +use crate::fs::AsyncFile; + +/// memory mapped file for writing +pub struct MemoryMappedMutFile(pub MmapMut); + +impl MemoryMappedMutFile { + + + pub async fn create<'a>(m_path: &'a Path, len: u64) -> Result<(Self, AsyncFile), IoError> + { + let (m_map, mfile) = asyncify (move || { + let mfile = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(m_path) + .unwrap(); + + mfile.set_len(len)?; + + unsafe { MmapMut::map_mut(&mfile) }.map(|mm_file| (mm_file, mfile)) + }).await?; + + Ok(( + MemoryMappedMutFile::from_mmap(m_map), + AsyncFile::from_std(mfile,m_path.to_owned()), + )) + } + + + fn from_mmap(mmap: MmapMut) -> MemoryMappedMutFile { + MemoryMappedMutFile(mmap) + } + + // get memory file + pub fn get_mut_mem_file(&mut self) -> &mut MmapMut { + &mut self.0 + } + + /// write bytes at location, + /// return number bytes written + pub fn write_bytes(&mut self, pos: usize, bytes: &Vec) { + let m_file = self.get_mut_mem_file(); + let m_array = &mut m_file[..]; + for i in 0..bytes.len() { + m_array[i + pos] = bytes[i]; + } + } + + pub async fn flush_ft(&self) -> Result<(),IoError> { + asyncify(move || self.0.flush()).await + } + + pub async fn flush_async_ft(&self) -> Result<(), IoError> { + asyncify(move || self.0.flush_async()).await + } + + pub async fn flush_range_ft( + &self, + offset: usize, + len: usize, + ) -> Result<(), IoError> { + asyncify(move || self.0.flush_range(offset, len)).await + } +} + +/// memory mmapped file for reading +pub struct MemoryMappedFile(pub Mmap); + +impl MemoryMappedFile { + + /// open memory file, specify minimum size + pub async fn open

(path: P,min_len: u64) -> Result<(Self, AsyncFile), IoError> + where + P: AsRef + Send + 'static, + { + let m_path = path.as_ref(); + let (m_map, mfile) = asyncify(|| { + let mfile = OpenOptions::new().read(true).open(m_path).unwrap(); + let meta = mfile.metadata().unwrap(); + if meta.len() == 0 { + mfile.set_len(min_len)?; + } + + unsafe { Mmap::map(&mfile) }.map(|mm_file| (mm_file, mfile)) + }).await?; + + Ok(( + MemoryMappedFile::from_mmap(m_map), + AsyncFile::from_std(mfile,m_path.to_owned()), + )) + } + + fn from_mmap(mmap: Mmap) -> MemoryMappedFile { + MemoryMappedFile(mmap) + } + + // get memory file + pub fn get_mem_file(&mut self) -> &mut Mmap { + &mut self.0 + } +} + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + use std::fs::File; + use std::io::Error as IoError; + use std::io::Read; + + use future_helper::test_async; + + use super::MemoryMappedMutFile; + use utils::fixture::ensure_clean_file; + + #[test_async] + async fn test_mmap_write_slice() -> Result<(),IoError> { + + let index_path = temp_dir().join("test.index"); + ensure_clean_file(&index_path.clone()); + + let result = MemoryMappedMutFile::create(&index_path,3).await; + assert!(result.is_ok()); + + let (mut mm_file, _) = result.unwrap(); + + let mm = mm_file.get_mut_mem_file(); + let src = [0x01, 0x02, 0x03]; + mm.copy_from_slice(&src); + + mm_file.flush_ft().await?; + + let mut f = File::open(&index_path)?; + let mut buffer = vec![0; 3]; + f.read(&mut buffer)?; + assert_eq!(buffer[0], 0x01); + assert_eq!(buffer[1], 0x02); + assert_eq!(buffer[2], 0x03); + + Ok(()) + } + + #[test_async] + async fn test_mmap_write_pair_slice() -> Result<(),IoError> { + + let index_path = temp_dir().join("pairslice.index"); + ensure_clean_file(&index_path.clone()); + + let result = MemoryMappedMutFile::create(&index_path, 24).await; + assert!(result.is_ok()); + + let (mut mm_file, _) = result.unwrap(); + + let mm = mm_file.get_mut_mem_file(); + let src: [(u32, u32); 3] = [(5, 10), (11, 22), (50, 100)]; + let (_, bytes, _) = unsafe { src.align_to::() }; + assert_eq!(bytes.len(), 24); + + mm.copy_from_slice(&bytes); + + mm_file.flush_ft().await?; + + let (mut mm_file2, _) = + MemoryMappedMutFile::create(&index_path, 24).await?; + let mm2 = mm_file2.get_mut_mem_file(); + let (_, pairs, _) = unsafe { mm2.align_to::<(u32, u32)>() }; + assert_eq!(pairs.len(), 3); + assert_eq!(pairs[0].0, 5); + assert_eq!(pairs[2].1, 100); + + Ok(()) + + } + + #[test_async] + async fn test_mmap_write_with_pos() -> Result<(),IoError> { + + let index_path = temp_dir().join("testpos.index"); + ensure_clean_file(&index_path.clone()); + + let (mut mm_file, _) = MemoryMappedMutFile::create(&index_path, 10).await?; + + let src = vec![0x05, 0x10, 0x44]; + mm_file.write_bytes(5, &src); + + mm_file.flush_ft().await?; + + let mut f = File::open(&index_path)?; + let mut buffer = vec![0; 10]; + f.read(&mut buffer)?; + assert_eq!(buffer[5], 0x05); + assert_eq!(buffer[6], 0x10); + assert_eq!(buffer[7], 0x44); + + Ok(()) + } + + /* + use std::fs::OpenOptions; + use std::path::PathBuf; + use memmap::MmapMut; + + + #[test] + fn debug_kafka_inspect() -> io::Result<()> { + + let path = "/tmp/kafka-logs/test-0/00000000000000000000.index"; + let file = OpenOptions::new() + .read(true) + .write(true) + .open(path)?; + + let mut mmap = unsafe { MmapMut::map_mut(&file)? }; + println!("file size: {}",mmap.len()); + Ok(()) + } + + #[test] + fn debug_file_inspect() -> io::Result<()> { + + let path = "/tmp/kafka-logs/test-0/00000000000000000000.index"; + let file = File::open(path)?; + let metadata = file.metadata()?; + + println!("file len: {:#?}",metadata.len()); + Ok(()) + } + */ + +} diff --git a/future-aio/src/fs/mod.rs b/future-aio/src/fs/mod.rs new file mode 100644 index 0000000000..476e81f1d4 --- /dev/null +++ b/future-aio/src/fs/mod.rs @@ -0,0 +1,33 @@ +mod async_file; +mod sink; +mod mmap; +mod file_slice; + +pub use self::async_file::AsyncFile; +pub use self::file_slice::AsyncFileSlice; +pub use self::sink::FileSink; +pub use self::sink::FileSinkError; +pub use self::sink::FileSinkOption; +pub use self::mmap::MemoryMappedFile; +pub use self::mmap::MemoryMappedMutFile; + + +use std::io; +use std::path::Path; + + +#[cfg(feature = "tokio2")] +pub async fn create_dir_all>(path: P) -> Result<(), io::Error> { + + tokio_2::fs::create_dir_all(path).await +} + +#[cfg(not(feature = "tokio2"))] +use futures::Future; + +#[cfg(not(feature = "tokio2"))] +pub fn create_dir_all>(path: P) -> impl Future> { + + use futures::compat::Future01CompatExt; + tokio_1::fs::create_dir_all(path).compat() +} diff --git a/future-aio/src/fs/shared_file.rs b/future-aio/src/fs/shared_file.rs new file mode 100644 index 0000000000..9959199d75 --- /dev/null +++ b/future-aio/src/fs/shared_file.rs @@ -0,0 +1,189 @@ +/// async file that can be shared +use std::io::Error as IoError; +use std::pin::Pin; +use std::sync::Arc; +use std::sync::RwLock; +use std::task::Context; +use std::io::SeekFrom; + +use log::trace; + +use futures::io::AsyncRead; + +use futures::Poll; +use futures::Future; +use pin_utils::unsafe_unpinned; +use pin_utils::pin_mut; + +use super::AsyncFile; + +type InnerFile = Arc>; + +#[derive(Clone)] +pub struct SharedAsyncFile { + inner: InnerFile +} + + +impl SharedAsyncFile { + + fn new(file: AsyncFile) -> Self { + SharedAsyncFile { + inner: Arc::new(RwLock::new(file)) + } + } + + + fn read<'a>(&self, buf: &'a mut [u8]) -> SharedAsyncFileRead<'a> { + SharedAsyncFileRead { + inner: self.inner.clone(), + buf + } + } + + pub fn seek(&self, pos: SeekFrom) -> SharedSeekFuture { + SharedSeekFuture::new(self.inner.clone(),pos) + } + +} + + +impl AsyncRead for SharedAsyncFile { + + fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) -> Poll> { + self.inner.write().unwrap().poll_read(cx,buf) + } + +} + +impl From for SharedAsyncFile { + fn from(file: AsyncFile) -> Self { + SharedAsyncFile::new(file) + } +} + + +pub struct SharedSeekFuture { + inner: InnerFile, + seek: SeekFrom +} + + +impl Unpin for SharedSeekFuture {} + +impl SharedSeekFuture { + fn new(file: InnerFile, seek: SeekFrom) -> Self { + SharedSeekFuture { + inner: file, + seek + } + } +} + +impl Future for SharedSeekFuture { + + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + trace!("reading bytes"); + let this = &mut *self; + let mut inner = this.inner.write().unwrap(); + let ft = inner.seek(this.seek.clone()); + pin_mut!(ft); + ft.poll(cx) + } +} + + + + +/// Based on Futures Read struct +/// Read future on shared file +/// Only allow one read at time using lock +pub struct SharedAsyncFileRead<'a> { + inner: InnerFile, + buf: &'a mut [u8], +} + + +impl<'a> SharedAsyncFileRead<'a> { + unsafe_unpinned!(buf: &'a mut [u8]); + + fn new<'b: 'a>(inner: InnerFile, buf: &'b mut [u8]) -> Self { + Self { + inner: inner.clone(), + buf + } + } +} + +impl Unpin for SharedAsyncFileRead<'_> {} + +impl Future for SharedAsyncFileRead<'_> { + + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + trace!("reading bytes"); + let this = &mut *self; + let mut inner = this.inner.write().unwrap(); + inner.poll_read(cx,this.buf) + } +} + + + + +#[cfg(test)] +mod tests { + + + + use std::env::temp_dir; + use std::io::Error as IoError; + use std::io::SeekFrom; + + use futures::io::AsyncWriteExt; + use futures::io::AsyncReadExt; + + use future_helper::test_async; + use utils::fixture::ensure_clean_file; + use crate::fs::AsyncFile; + use super::SharedAsyncFile; + + + #[test_async] + async fn test_shared_read() -> Result<(), IoError> { + + let test_file_path = temp_dir().join("shared_read"); + ensure_clean_file(&test_file_path); + + + let mut file = AsyncFile::create(&test_file_path).await?; + file.write_all(b"test").await?; + + let mut buffer = [0; 4]; + let read_file = AsyncFile::open(&test_file_path).await?; + let shared_file = SharedAsyncFile::new(read_file); + let read_len = shared_file.read(&mut buffer).await?; + assert_eq!(read_len,4); + let contents = String::from_utf8(buffer.to_vec()).expect("conversion"); + assert_eq!(contents,"test"); + + let mut output = Vec::new(); + let mut file2 = shared_file.clone(); + file2.seek(SeekFrom::Start(0)).await?; + file2.read_to_end(&mut output).await?; + let contents = String::from_utf8(output).expect("conversion"); + assert_eq!(contents,"test"); + + + Ok(()) + + } + +} + + + + diff --git a/future-aio/src/fs/sink.rs b/future-aio/src/fs/sink.rs new file mode 100644 index 0000000000..e77fcf5a62 --- /dev/null +++ b/future-aio/src/fs/sink.rs @@ -0,0 +1,343 @@ +// sink +use std::io; +use std::path::Path; +use std::pin::Pin; +use std::mem; +use std::io::Error as IoError; +use std::task::Context; +use std::fmt; + +use log::trace; +use log::debug; +use futures::io::AsyncWrite; +use futures::sink::Sink; +use futures::ready; +use futures::Future; +use futures::Poll; +use pin_utils::unsafe_pinned; +use pin_utils::unsafe_unpinned; + +use crate::fs::AsyncFile; +use crate::fs::AsyncFileSlice; +use crate::AsyncWrite2; + +#[derive(Debug)] +pub enum FileSinkError { + IoError(io::Error), + MaxLenReached, // exceed max limit +} + + +impl fmt::Display for FileSinkError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::IoError(err) => write!(f, "{}", err), + Self::MaxLenReached => write!(f,"max len reached") + } + } +} + + + +impl From for FileSinkError { + fn from(error: io::Error) -> Self { + FileSinkError::IoError(error) + } +} + +#[derive(Default)] +pub struct FileSinkOption { + pub max_len: Option, +} + +enum WriteState { + Ready, + Received(B), + Writing, + Flush +} + +impl WriteState { + + fn buffer(self) -> B { + match self { + WriteState::Received(item) => item, + _ => panic!("should only called when it is item") + } + } +} + + +/// File Sink +/// When starts, it calls write to produce future which write to file +/// future is stored in the write_store so it can be poll by consumer of the Sink +/// Since WriteAll is created in the function, it need to have different lifetime than struct itself +/// lifetime b can moved to method since it's no longer needed at Impl Sink Trait +/// parameter T is added so we can pass reference to [u8] +pub struct FileSink { + option: FileSinkOption, + current_len: u64, // file size + pending_len: u64, + writer: AsyncFile, + write_state: WriteState, +} + +impl FileSink { + + pub async fn create

(path: P, option: FileSinkOption) -> Result, io::Error> + where P: AsRef + { + let file = AsyncFile::create(path).await?; + Ok(FileSink { + writer: file, + write_state: WriteState::Ready, + current_len: 0, + pending_len: 0, + option, + }) + } + + pub async fn open_write

(path: P, option: FileSinkOption) -> Result, io::Error> + where P: AsRef + { + let file_path = path.as_ref(); + let file = AsyncFile::open(file_path).await?; + let metadata = AsyncFile::get_metadata(file_path).await?; + let len = metadata.len(); + + Ok(FileSink { + writer: file, + write_state: WriteState::Ready, + current_len: len, + pending_len: 0, + option, + }) + } + + pub async fn open_append

(path: P, option: FileSinkOption) -> Result, io::Error> + where P: AsRef + { + let file_path = path.as_ref(); + let file = AsyncFile::open_read_append(file_path).await?; + let metadata = AsyncFile::get_metadata(file_path).await?; + let len = metadata.len(); + + Ok(FileSink { + writer: file, + write_state: WriteState::Ready, + current_len: len, + pending_len: 0, + option, + }) + } + + + pub fn get_mut_writer(&mut self) -> &mut AsyncFile { + &mut self.writer + } + + pub fn get_writer(&self) -> &AsyncFile { + &self.writer + } + + pub async fn create_reader(&self) -> Result { + self.writer.read_clone().await + } + + pub fn get_current_len(&self) -> u64 { + self.current_len + } + + pub fn get_pending_len(&self) -> u64 { + self.pending_len + } + + pub async fn clone_writer(&self) -> Result { + self.writer.try_clone().await + } + + pub fn slice_from(&self, position: u64, len: u64) -> Result { + + Ok(self.writer.raw_slice(position ,len)) + + } + + +} + +impl Unpin for FileSink where B: AsRef<[u8]>{} + + +impl FileSink where B: AsRef<[u8]> { + unsafe_pinned!(writer: AsyncFile); + unsafe_unpinned!(write_state: WriteState); + unsafe_unpinned!(pending_len: u64); + unsafe_unpinned!(current_len: u64); + + + // write buffer if available + fn try_empty_write(mut self: Pin<&mut Self>,cx: &mut Context, + ) -> Poll> { + trace!( + "write buf current len: {}, pending len: {}", + self.current_len, + self.pending_len + ); + match self.as_mut().write_state() { + WriteState::Received(_) => { + let item = mem::replace(self.as_mut().write_state(),WriteState::Writing); + trace!("pending write available, polling write"); + let mut writer = self.as_mut().writer(); + let mut write_state = writer.write_buf_all(item.buffer()); + let pin_state = unsafe { Pin::new_unchecked(&mut write_state)}; + ready!(pin_state.poll(cx))?; + mem::replace(self.as_mut().write_state(),WriteState::Flush); + Poll::Ready(Ok(())) + }, + _ => panic!("sink is not received mode ") + } + } + + fn send_private(mut self: Pin<&mut Self>, item: B) -> Result<(), FileSinkError> where B: AsRef<[u8]> { + assert_eq!( + self.pending_len, 0, + "pending len should always be 0 when start sending" + ); + let item_ref = item.as_ref(); + let len = item_ref.len() as u64; + trace!("start writing bytes len: {}, file len: {}", len,self.current_len); + if let Some(max_len) = self.option.max_len { + if self.current_len + len > max_len { + debug!("pending will exceed max: {}",max_len); + return Err(FileSinkError::MaxLenReached); + } + } + + + *self.as_mut().pending_len() = len; + trace!("set to received mode"); + mem::replace(self.as_mut().write_state(),WriteState::Received(item)); + Ok(()) + + + } + + +} + +impl Sink for FileSink where B:AsRef<[u8]> + Sync + Send { + + type Error = FileSinkError; + + fn poll_ready( self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + match self.write_state { + WriteState::Ready => Poll::Ready(Ok(())), + _ => Poll::Pending + } + } + + fn start_send(self: Pin<&mut Self>, item: B) -> Result<(), Self::Error> { + self.send_private(item) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // do lots of conv between pinned self and regular pinned + // maybe this could be solved with unpin? + if let Poll::Ready(Err(e)) = self.as_mut().try_empty_write(cx) { + return Poll::Ready(Err(e.into())); + } + let result = self.as_mut().writer().poll_flush(cx).map_err(|err| err.into()); + match result { + Poll::Ready(Ok(_)) => { + let current_len = self.current_len + self.pending_len; + *(self.as_mut().current_len())= current_len; + *(self.as_mut().pending_len()) = 0; + mem::replace(self.as_mut().write_state(),WriteState::Ready); + trace!("flush, new len: {}",self.current_len); + Poll::Ready(Ok(())) + } + _ => result, + } + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } +} + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use std::env::temp_dir; + use std::fs::remove_file; + use std::fs::File; + use log::debug; + use std::io::Read; + use std::path::PathBuf; + use future_helper::test_async; + + use super::FileSink; + use super::FileSinkError; + use super::FileSinkOption; + + + const TEST_FILE_NAME: &str = "file_test_01"; + const MAX_TEST_FILE_NAME: &str = "file_test_max"; + + fn ensure_clean_file(log_path: &PathBuf) { + debug!("removing log: {}", log_path.display()); + // delete message log if it exists + if let Ok(_) = remove_file(log_path) { + debug!("remove existing log file"); + } else { + debug!("no existing log file"); + } + } + + #[test_async] + async fn test_sink_file_write_happy_path() -> Result<(),FileSinkError> { + let test_file = temp_dir().join(TEST_FILE_NAME); + ensure_clean_file(&test_file); + + let mut f_sink = FileSink::create(&test_file, FileSinkOption::default()).await?; + + let bytes = vec![0x01, 0x02, 0x03]; + f_sink.send(bytes).await?; + + let test_file = temp_dir().join(TEST_FILE_NAME); + let mut f = File::open(test_file)?; + let mut buffer = vec![0; 3]; + f.read(&mut buffer)?; + assert_eq!(buffer[0], 0x01); + assert_eq!(buffer[1], 0x02); + assert_eq!(buffer[2], 0x03); + Ok(()) + } + + /// example of async test + #[test_async] + async fn test_sink_file_max_reached() -> Result<(), FileSinkError> { + + let test_file = temp_dir().join(MAX_TEST_FILE_NAME); + ensure_clean_file(&test_file); + + let option = FileSinkOption { max_len: Some(10) }; + + let mut f_sink = FileSink::create(&test_file, option).await?; + + let bytes = vec![0x01; 8]; + // first send let should be 8 + debug!("====> first write ====="); + f_sink.send(bytes.clone()).await?; + assert_eq!(f_sink.current_len, 8); + assert_eq!(f_sink.pending_len, 0); + debug!("=====> second write ===="); + // should fail at this point because there isn't enough capacity + let res = f_sink.send(bytes).await; + assert!(res.is_err(), "should reached max"); + Ok(()) + + } + +} diff --git a/future-aio/src/fs/stream.rs b/future-aio/src/fs/stream.rs new file mode 100644 index 0000000000..5b47277e7d --- /dev/null +++ b/future-aio/src/fs/stream.rs @@ -0,0 +1,25 @@ + +use futures::Stream; + +use crate::fs::AsyncFile; + +pub struct AsyncFileStream { + file: AsyncFile +} + +impl AsyncFileStream { + pub fn new(file: AsyncFile) -> Self { + Self { + file + } + } +} + + +impl Stream for AsyncFileStream { + + fn poll_next(self: Pin<&mut Self>, lw: &Waker) -> Poll> { + + } + +} \ No newline at end of file diff --git a/future-aio/src/io_util_1.rs b/future-aio/src/io_util_1.rs new file mode 100644 index 0000000000..e9fc347c68 --- /dev/null +++ b/future-aio/src/io_util_1.rs @@ -0,0 +1,95 @@ +use std::marker::Unpin; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +use futures::future::Future; +use futures_1::Async as Async_01; +use std::fmt::Debug; +use std::io; + +pub struct BlockIO { + f: Option, +} + +impl Unpin for BlockIO {} + +pub fn asyncify(f: F) -> BlockIO +where + F: FnOnce() -> Result, +{ + BlockIO { f: Some(f) } +} + +impl Future for BlockIO +where + F: FnOnce() -> Result, + R: Debug, + E: From +{ + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + let result = tokio_threadpool_1::blocking(self.f.take().unwrap()); + match result { + Ok(Async_01::Ready(Ok(v))) => Poll::Ready(Ok(v.into())), + Ok(Async_01::Ready(Err(err))) => Poll::Ready(Err(err)), + Ok(Async_01::NotReady) => Poll::Pending, + Err(_) => Poll::Ready(Err(blocking_err().into())), + } + } +} + +use std::io::ErrorKind::Other; +pub fn blocking_err() -> io::Error { + io::Error::new( + Other, + "`blocking` annotated I/O must be called \ + from the context of the Tokio runtime.", + ) +} + + + +#[cfg(test)] +mod test { + + use std::io; + use std::io::Error as IoError; + use std::{thread, time}; + use future_helper::test_async; + + use super::asyncify; + + #[test_async] + async fn test_block_io_ok() -> Result<(), ()> { + + let bk = asyncify(|| { + thread::sleep(time::Duration::from_millis(2000)); + Ok::(2) + }); + + let result = bk.await; + + assert!(result.is_ok()); + assert_eq!(result.unwrap(), 2); + Ok(()) + } + + #[test_async] + async fn test_block_io_err() -> Result<(),()> { + + let bk = asyncify(|| { + thread::sleep(time::Duration::from_millis(100)); + let result: io::Result<()> = Result::Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "server has terminated connection", + )); + result + }); + + let result = bk.await; + assert!(result.is_err()); + Ok(()) + } +} \ No newline at end of file diff --git a/future-aio/src/io_util_3.rs b/future-aio/src/io_util_3.rs new file mode 100644 index 0000000000..c0122bad47 --- /dev/null +++ b/future-aio/src/io_util_3.rs @@ -0,0 +1,40 @@ + +use std::io; +use std::io::ErrorKind::Other; +use std::task::Poll; +use std::task::Poll::Ready; +use std::task::Poll::Pending; + +use futures::future::poll_fn; + +/// Borrowed from Tokio io utils but generalize to return any error + +fn blocking_io(f: F) -> Poll> +where + F: FnOnce() -> Result, + E: From +{ + match tokio_threadpool_2::blocking(f) { + Ready(Ok(v)) => Ready(v), + Ready(Err(_)) => Ready(Err(blocking_err().into())), + Pending => Pending, + } +} + +pub async fn asyncify(f: F) -> Result +where + F: FnOnce() -> Result, + E: From +{ + let mut f = Some(f); + poll_fn(move |_| blocking_io(|| f.take().unwrap()())).await +} + +fn blocking_err() -> io::Error { + io::Error::new( + Other, + "`blocking` annotated I/O must be called \ + from the context of the Tokio runtime.", + ) +} + diff --git a/future-aio/src/lib.rs b/future-aio/src/lib.rs new file mode 100644 index 0000000000..350a6e0da3 --- /dev/null +++ b/future-aio/src/lib.rs @@ -0,0 +1,29 @@ +#![feature(generators)] + +#[cfg(not(feature = "tokio2"))] +mod compat; +pub mod fs; + +#[cfg(not(feature = "tokio2"))] +mod io_util_1; +#[cfg(feature = "tokio2")] +mod io_util_3; + + +mod write; +mod zero_copy; +pub mod net; + +#[cfg(feature = "tokio2")] +use self::io_util_3::asyncify; +#[cfg(not(feature = "tokio2"))] +use self::io_util_1::asyncify; + +pub use self::write::AsyncWrite2; +pub use self::write::WriteBufAll; +pub use self::zero_copy::ZeroCopyWrite; +pub use self::zero_copy::SendFileError; + +pub use bytes::Bytes; +pub use bytes::BytesMut; +pub use bytes::BufMut; \ No newline at end of file diff --git a/future-aio/src/net/mod.rs b/future-aio/src/net/mod.rs new file mode 100644 index 0000000000..ce7c5b0fd9 --- /dev/null +++ b/future-aio/src/net/mod.rs @@ -0,0 +1,21 @@ + +#[cfg(feature = "tokio2")] +mod tcp_stream_3; + +#[cfg(not(feature = "tokio2"))] +mod tcp_stream_1; + +#[cfg(feature = "tokio2")] +use tcp_stream_3 as common_tcp_stream; + +#[cfg(not(feature = "tokio2"))] +use tcp_stream_1 as common_tcp_stream; + +pub use self::common_tcp_stream::AsyncTcpListener; +pub use self::common_tcp_stream::AsyncTcpStream; +pub use self::common_tcp_stream::TcpStreamSplitStream; +pub use self::common_tcp_stream::TcpStreamSplitSink; +pub use self::common_tcp_stream::TcpStreamSplit; + + + diff --git a/future-aio/src/net/tcp_stream_1.rs b/future-aio/src/net/tcp_stream_1.rs new file mode 100644 index 0000000000..60cc755ea1 --- /dev/null +++ b/future-aio/src/net/tcp_stream_1.rs @@ -0,0 +1,382 @@ +use std::io::Error as IoError; +/// wrapper over tokio stream +/// should be compatible with romio tcp stream but +/// wrapper over tokio tcp to make it usable now +use std::net::SocketAddr; +use std::fmt::Display; +use std::fmt::Formatter; +use std::fmt::Result as FmtResult; + +use futures::compat::Compat01As03; +use futures::compat::Future01CompatExt; +use futures::compat::Stream01CompatExt; +use futures::Stream; + +use bytes::Bytes; +use bytes::BytesMut; +use bytes::BufMut; + +use futures_1::stream::SplitSink as SplitSink01; +use futures_1::stream::SplitStream as SplitStream01; +use futures_1::Stream as Stream01; +use tokio_1::codec::Decoder; +use tokio_1::codec::Encoder; +use tokio_1::codec::Framed; +use tokio_1::net::TcpListener as TokioTcpListner; +use tokio_1::net::TcpStream as TokioTcpStream; + + +#[cfg(unix)] +use std::os::unix::io::AsRawFd; +use std::os::unix::io::RawFd; + +use crate::compat::Compat01As03Sink; +use crate::ZeroCopyWrite; + + + +pub struct AsyncTcpListener(TokioTcpListner); + +impl AsyncTcpListener { + pub fn bind(addr: &SocketAddr) -> Result { + let listener = TokioTcpListner::bind(addr)?; + Ok(AsyncTcpListener(listener)) + } + + pub fn incoming(self) -> impl Stream> { + self.0 + .incoming() + .map(|tcp_stream01| tcp_stream01.into()) + .compat() + } +} + + +/// This should be same as Future TcpStream like Romeo +/// but use tokio io for compatibility +pub struct AsyncTcpStream(TokioTcpStream); + + +impl Display for AsyncTcpStream { + + + fn fmt(&self, f: &mut Formatter) -> FmtResult { + + if let Ok(local_addr) = self.local_addr() { + write!(f, "local: {} ",local_addr)?; + } + if let Ok(peer_addr) = self.peer_addr() { + write!(f, "peer: {} ",peer_addr)?; + } + + write!(f,"fd: {}",self.as_raw_fd()) + + } +} + + +impl From for AsyncTcpStream { + fn from(tcp: TokioTcpStream) -> Self { + AsyncTcpStream(tcp) + } +} + +impl AsyncTcpStream { + pub async fn connect(addr: &SocketAddr) -> Result { + let inner_tcp = TokioTcpStream::connect(addr).compat().await?; + Ok(inner_tcp.into()) + } + + pub fn local_addr(&self) -> Result { + self.0.local_addr() + } + + pub fn peer_addr(&self) -> Result { + self.0.peer_addr() + } + + + pub fn split(self) -> TcpStreamSplit + where Self: Into> + { + self.into() + } + +} + + + +impl AsRawFd for AsyncTcpStream { + + fn as_raw_fd(&self) -> RawFd { + self.0.as_raw_fd() + } + +} + +impl ZeroCopyWrite for AsyncTcpStream {} + + + +pub type TcpStreamSplitSink = Compat01As03Sink>, Bytes>; +pub type TcpStreamSplitStream = Compat01As03>>; + +unsafe impl Sync for TcpStreamSplitSink{} + + +#[derive(Debug)] +pub struct TcpStreamSplit { + sink: TcpStreamSplitSink, + stream: TcpStreamSplitStream +} + +impl Into> for AsyncTcpStream where C: Default + Decoder + Encoder { + + fn into(self) -> TcpStreamSplit { + + let (sink, stream) = C::default().framed(self.0).split(); + TcpStreamSplit { + sink: Compat01As03Sink::new(sink), + stream: Compat01As03::new(stream) + } + } +} + +impl TcpStreamSplit { + + pub fn get_sink(&self) -> &TcpStreamSplitSink{ + &self.sink + } + + + pub fn get_mut_sink(&mut self) -> &mut TcpStreamSplitSink{ + &mut self.sink + } + + pub fn get_mut_stream(&mut self) -> &mut TcpStreamSplitStream { + &mut self.stream + } + + pub fn sink(self) -> TcpStreamSplitSink{ + self.sink + } + + + pub fn stream(self) -> TcpStreamSplitStream { + self.stream + } + + + // convert into tutple + pub fn as_tuple(self) -> (TcpStreamSplitSink,TcpStreamSplitStream) { + (self.sink,self.stream) + } +} + +// borrowed from bytes codec but implemented default +#[derive(Default)] +pub struct SimpleCodec(()); + +impl Decoder for SimpleCodec { + type Item = BytesMut; + type Error = IoError; + + fn decode(&mut self, buf: &mut BytesMut) -> Result,IoError> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } +} + +impl Encoder for SimpleCodec { + type Item = Bytes; + type Error = IoError; + + fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(),IoError> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} + + + +#[cfg(test)] +mod tests { + + use std::io::Error; + use std::net::SocketAddr; + use std::thread; + use std::time; + + use bytes::BufMut; + use bytes::Bytes; + use bytes::BytesMut; + use futures::sink::SinkExt; + use futures::stream::StreamExt; + use futures::future::join; + use future_helper::sleep; + use log::debug; + + use future_helper::test_async; + use future_helper::spawn; + + use super::AsyncTcpListener; + use super::AsyncTcpStream; + use super::TcpStreamSplit; + use super::SimpleCodec; + + fn to_bytes(bytes: Vec) -> Bytes { + let mut buf = BytesMut::with_capacity(bytes.len()); + buf.put_slice(&bytes); + buf.freeze() + } + + #[test_async] + async fn future_join() -> Result<(), Error> { + + // with join, futures are dispatched on same thread + // since ft1 starts first and + // blocks on thread, it will block future2 + // should see ft1,ft1,ft2,ft2 + + //let mut ft_id = 0; + + let ft1 = async { + + debug!("ft1: starting sleeping for 1000ms"); + // this will block ft2. both ft1 and ft2 share same thread + thread::sleep(time::Duration::from_millis(1000)); + debug!("ft1: woke from sleep"); + + }; + + let ft2 = async { + debug!("ft2: starting sleeping for 500ms"); + thread::sleep(time::Duration::from_millis(500)); + debug!("ft2: woke up"); + + }; + + let core_threads = num_cpus::get().max(1); + debug!("num threads: {}",core_threads); + let _rt = join(ft1,ft2).await; + assert!(true); + Ok(()) + } + + + + #[test_async] + async fn future_spawn() -> Result<(), Error> { + + // with spawn, futures are dispatched on separate thread + // in this case, thread sleep on ft1 won't block + // should see ft1, ft2, ft2, ft1 + + let ft1 = async { + + debug!("ft1: starting sleeping for 1000ms"); + thread::sleep(time::Duration::from_millis(1000)); // give time for server to come up + debug!("ft1: woke from sleep"); + + }; + + let ft2 = async { + + debug!("ft2: starting sleeping for 500ms"); + thread::sleep(time::Duration::from_millis(500)); + debug!("ft2: woke up"); + + }; + + let core_threads = num_cpus::get().max(1); + debug!("num threads: {}",core_threads); + + spawn(ft1); + spawn(ft2); + // wait for all futures complete + thread::sleep(time::Duration::from_millis(2000)); + + assert!(true); + + + Ok(()) + } + + + + #[test_async] + async fn test_async_tcp() -> Result<(), Error> { + let addr = "127.0.0.1:9998".parse::().expect("parse"); + + let server_ft = async { + + debug!("server: binding"); + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server: successfully binding. waiting for incoming"); + let mut incoming = listener.incoming(); + while let Some(stream) = incoming.next().await { + debug!("server: got connection from client"); + let tcp_stream = stream?; + let split: TcpStreamSplit = tcp_stream.split(); + let mut sink = split.sink(); + debug!("server: seding values to client"); + let data = vec![0x05, 0x0a, 0x63]; + sink.send(to_bytes(data)).await?; + sleep(time::Duration::from_micros(1)).await; + debug!("server: sending 2nd value to client"); + let data2 = vec![0x20,0x11]; + sink.send(to_bytes(data2)).await?; + return Ok(()) as Result<(),Error> + + } + + Ok(()) as Result<(), Error> + }; + + let client_ft = async { + + debug!("client: sleep to give server chance to come up"); + sleep(time::Duration::from_millis(100)).await; + debug!("client: trying to connect"); + let tcp_stream = AsyncTcpStream::connect(&addr).await?; + debug!("client: got connection. waiting"); + let split: TcpStreamSplit = tcp_stream.split(); + let mut stream = split.stream(); + if let Some(value) = stream.next().await { + debug!("client :received first value from server"); + let mut bytes = value?; + let values = bytes.take(); + assert_eq!(values[0],0x05); + assert_eq!(values[1],0x0a); + assert_eq!(values[2],0x63); + assert_eq!(values.len(),3); + } else { + assert!(false,"no value received"); + } + + if let Some(value) = stream.next().await { + debug!("client: received 2nd value from server"); + let mut bytes = value?; + let values = bytes.take(); + assert_eq!(values.len(),2); + + } else { + assert!(false,"no value received"); + } + + + Ok(()) as Result<(), Error> + }; + + + let _rt = join(client_ft,server_ft).await; + + Ok(()) + } + +} \ No newline at end of file diff --git a/future-aio/src/net/tcp_stream_3.rs b/future-aio/src/net/tcp_stream_3.rs new file mode 100644 index 0000000000..59dda70427 --- /dev/null +++ b/future-aio/src/net/tcp_stream_3.rs @@ -0,0 +1,377 @@ +use std::io::Error as IoError; +/// wrapper over tokio stream +/// should be compatible with romio tcp stream but +/// wrapper over tokio tcp to make it usable now +use std::net::SocketAddr; +use std::fmt::Display; +use std::fmt::Formatter; +use std::fmt::Result as FmtResult; + +use futures::stream::StreamExt; +use futures::stream::SplitSink; +use futures::Stream; +use futures::stream::SplitStream; + +use bytes::Bytes; +use bytes::BytesMut; +use bytes::BufMut; +use tokio_2::codec::Framed as TkFramed; +use tokio_2::codec::Encoder as TkEncoder; +use tokio_2::codec::Decoder as TkDecoder; +use tokio_2::net::TcpStream as TkTcpStream; +use tokio_2::net::TcpListener as TkTcpListner; + +use crate::ZeroCopyWrite; + + +#[cfg(unix)] +use std::os::unix::io::AsRawFd; +use std::os::unix::io::RawFd; + + + +pub struct AsyncTcpListener(TkTcpListner); + +impl AsyncTcpListener { + pub fn bind(addr: &SocketAddr) -> Result { + let listener = TkTcpListner::bind(addr)?; + Ok(AsyncTcpListener(listener)) + } + + pub fn incoming(self) -> impl Stream> { + self.0 + .incoming() + .map(|tcp_stream | tcp_stream.map(|inner| inner.into())) + } +} + + +/// This should be same as Future TcpStream like Romeo +/// but use tokio io for compatibility +pub struct AsyncTcpStream(TkTcpStream); + + +impl Display for AsyncTcpStream { + + + fn fmt(&self, f: &mut Formatter) -> FmtResult { + + if let Ok(local_addr) = self.local_addr() { + write!(f, "local: {} ",local_addr)?; + } + if let Ok(peer_addr) = self.peer_addr() { + write!(f, "peer: {} ",peer_addr)?; + } + + write!(f,"fd: {}",self.as_raw_fd()) + + } +} + + +impl From for AsyncTcpStream { + fn from(tcp: TkTcpStream) -> Self { + AsyncTcpStream(tcp) + } +} + +impl AsyncTcpStream { + pub async fn connect(addr: &SocketAddr) -> Result { + let inner_tcp = TkTcpStream::connect(addr).await?; + Ok(inner_tcp.into()) + } + + pub fn local_addr(&self) -> Result { + self.0.local_addr() + } + + pub fn peer_addr(&self) -> Result { + self.0.peer_addr() + } + + + pub fn split(self) -> TcpStreamSplit + where Self: Into>, C: Unpin + TkEncoder + { + self.into() + } + +} + + + +impl AsRawFd for AsyncTcpStream { + + fn as_raw_fd(&self) -> RawFd { + self.0.as_raw_fd() + } + +} + +impl ZeroCopyWrite for AsyncTcpStream {} + + +pub type TcpStreamSplitStream = SplitStream>; +pub type TcpStreamSplitSink = SplitSink,::Item>; +//unsafe impl Sync for TcpStreamSplitSink{} + + +pub struct TcpStreamSplit + where C: TkEncoder + Unpin, +{ + sink: TcpStreamSplitSink, + stream: TcpStreamSplitStream +} + +impl Into> for AsyncTcpStream where C: Default + TkDecoder + TkEncoder + Unpin { + + fn into(self) -> TcpStreamSplit { + + let (sink, stream) = C::default().framed(self.0).split(); + TcpStreamSplit { + sink, + stream + } + } +} + +impl TcpStreamSplit + where C: TkEncoder + Unpin +{ + + pub fn get_sink(&self) -> &SplitSink,C::Item>{ + &self.sink + } + + + pub fn get_mut_sink(&mut self) -> &mut SplitSink,C::Item>{ + &mut self.sink + } + + pub fn get_mut_stream(&mut self) -> &mut TcpStreamSplitStream { + &mut self.stream + } + + pub fn sink(self) -> SplitSink,C::Item> { + self.sink + } + + + pub fn stream(self) -> TcpStreamSplitStream { + self.stream + } + + + // convert into tutple + pub fn as_tuple(self) -> ( SplitSink,C::Item>,TcpStreamSplitStream) { + (self.sink,self.stream) + } +} + +// borrowed from bytes codec but implemented default +#[derive(Default)] +pub struct SimpleCodec(()); + +impl TkDecoder for SimpleCodec { + type Item = BytesMut; + type Error = IoError; + + fn decode(&mut self, buf: &mut BytesMut) -> Result,IoError> { + if buf.len() > 0 { + let len = buf.len(); + Ok(Some(buf.split_to(len))) + } else { + Ok(None) + } + } +} + +impl TkEncoder for SimpleCodec { + type Item = Bytes; + type Error = IoError; + + fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(),IoError> { + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} + + + +#[cfg(test)] +mod tests { + + use std::io::Error; + use std::net::SocketAddr; + use std::thread; + use std::time; + + use bytes::BufMut; + use bytes::Bytes; + use bytes::BytesMut; + use futures::sink::SinkExt; + use futures::stream::StreamExt; + use futures::future::join; + use future_helper::sleep; + use log::debug; + + use future_helper::test_async; + use future_helper::spawn; + + use super::AsyncTcpListener; + use super::AsyncTcpStream; + use super::TcpStreamSplit; + use super::SimpleCodec; + + fn to_bytes(bytes: Vec) -> Bytes { + let mut buf = BytesMut::with_capacity(bytes.len()); + buf.put_slice(&bytes); + buf.freeze() + } + + #[test_async] + async fn future_join() -> Result<(), Error> { + + // with join, futures are dispatched on same thread + // since ft1 starts first and + // blocks on thread, it will block future2 + // should see ft1,ft1,ft2,ft2 + + //let mut ft_id = 0; + + let ft1 = async { + + debug!("ft1: starting sleeping for 1000ms"); + // this will block ft2. both ft1 and ft2 share same thread + thread::sleep(time::Duration::from_millis(1000)); + debug!("ft1: woke from sleep"); + // ft_id = 1; + Ok(()) as Result<(),()> + }; + + let ft2 = async { + debug!("ft2: starting sleeping for 500ms"); + thread::sleep(time::Duration::from_millis(500)); + debug!("ft2: woke up"); + // ft_id = 2; + Ok(()) as Result<(), ()> + }; + + let core_threads = num_cpus::get().max(1); + debug!("num threads: {}",core_threads); + let _rt = join(ft1,ft2).await; + assert!(true); + Ok(()) + } + + + + #[test_async] + async fn future_spawn() -> Result<(), Error> { + + // with spawn, futures are dispatched on separate thread + // in this case, thread sleep on ft1 won't block + // should see ft1, ft2, ft2, ft1 + + let ft1 = async { + + debug!("ft1: starting sleeping for 1000ms"); + thread::sleep(time::Duration::from_millis(1000)); // give time for server to come up + debug!("ft1: woke from sleep"); + }; + + let ft2 = async { + + debug!("ft2: starting sleeping for 500ms"); + thread::sleep(time::Duration::from_millis(500)); + debug!("ft2: woke up"); + }; + + let core_threads = num_cpus::get().max(1); + debug!("num threads: {}",core_threads); + + spawn(ft1); + spawn(ft2); + // wait for all futures complete + thread::sleep(time::Duration::from_millis(2000)); + + assert!(true); + + + Ok(()) + } + + + + #[test_async] + async fn test_async_tcp() -> Result<(), Error> { + let addr = "127.0.0.1:9998".parse::().expect("parse"); + + let server_ft = async { + + debug!("server: binding"); + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server: successfully binding. waiting for incoming"); + let mut incoming = listener.incoming(); + while let Some(stream) = incoming.next().await { + debug!("server: got connection from client"); + let tcp_stream = stream?; + let split: TcpStreamSplit = tcp_stream.split(); + let mut sink = split.sink(); + debug!("server: seding values to client"); + let data = vec![0x05, 0x0a, 0x63]; + sink.send(to_bytes(data)).await?; + sleep(time::Duration::from_micros(1)).await; + debug!("server: sending 2nd value to client"); + let data2 = vec![0x20,0x11]; + sink.send(to_bytes(data2)).await?; + return Ok(()) as Result<(),Error> + + } + + Ok(()) as Result<(), Error> + }; + + let client_ft = async { + + debug!("client: sleep to give server chance to come up"); + sleep(time::Duration::from_millis(100)).await; + debug!("client: trying to connect"); + let tcp_stream = AsyncTcpStream::connect(&addr).await?; + debug!("client: got connection. waiting"); + let split: TcpStreamSplit = tcp_stream.split(); + let mut stream = split.stream(); + if let Some(value) = stream.next().await { + debug!("client :received first value from server"); + let mut bytes = value?; + let values = bytes.take(); + assert_eq!(values[0],0x05); + assert_eq!(values[1],0x0a); + assert_eq!(values[2],0x63); + assert_eq!(values.len(),3); + } else { + assert!(false,"no value received"); + } + + if let Some(value) = stream.next().await { + debug!("client: received 2nd value from server"); + let mut bytes = value?; + let values = bytes.take(); + assert_eq!(values.len(),2); + + } else { + assert!(false,"no value received"); + } + + + Ok(()) as Result<(), Error> + }; + + + let _rt = join(client_ft,server_ft).await; + + Ok(()) + } + +} diff --git a/future-aio/src/write.rs b/future-aio/src/write.rs new file mode 100644 index 0000000000..de414dacca --- /dev/null +++ b/future-aio/src/write.rs @@ -0,0 +1,57 @@ + +use std::io; +use std::pin::Pin; +use std::mem::replace; +use std::task::Context; + +use futures::io::AsyncWrite; +use futures::Poll; +use futures::ready; +use futures::future::Future; + + + +/// Derived from future io Writeall, +/// Instead of buf restricted to[u8], it supports asref +#[derive(Debug)] +pub struct WriteBufAll<'a, W: ?Sized + 'a + Unpin,B> { + writer: &'a mut W, + buf: B +} + +// Pinning is never projected to fields +impl Unpin for WriteBufAll<'_, W,B> {} + +impl<'a, W: AsyncWrite + ?Sized + Unpin,B> WriteBufAll<'a, W,B > { + pub(super) fn new(writer: &'a mut W, buf: B) -> Self { + WriteBufAll { writer, buf } + } +} + +impl Future for WriteBufAll<'_, W,B> where B: AsRef<[u8]> { + type Output = io::Result<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = &mut *self; + let mut buf = this.buf.as_ref(); + while !buf.is_empty() { + let n = ready!(Pin::new(&mut this.writer).poll_write(cx, buf))?; + { + let (_, rest) = replace(&mut buf, &[]).split_at(n); + buf = rest; + } + if n == 0 { + return Poll::Ready(Err(io::ErrorKind::WriteZero.into())) + } + } + + Poll::Ready(Ok(())) + } +} + +pub trait AsyncWrite2: AsyncWrite + Unpin { + + fn write_buf_all<'a,B>(&'a mut self, buf: B) -> WriteBufAll<'a, Self,B> where B: AsRef<[u8]> { + WriteBufAll::new(self, buf) + } +} diff --git a/future-aio/src/zero_copy.rs b/future-aio/src/zero_copy.rs new file mode 100644 index 0000000000..bb1518e70b --- /dev/null +++ b/future-aio/src/zero_copy.rs @@ -0,0 +1,181 @@ + +use std::io::Error as IoError; +use std::fmt; + +#[cfg(unix)] +use std::os::unix::io::AsRawFd; + +use std::pin::Pin; +use std::task::Context; + +use futures::Future; +use futures::Poll; +use pin_utils::pin_mut; +use nix::sys::sendfile::sendfile; +use nix::Error as NixError; + +use crate::fs::AsyncFileSlice; +use crate::asyncify; + +/// zero copy write +pub trait ZeroCopyWrite { + + + fn zero_copy_write<'a>(&'a mut self, source: &'a AsyncFileSlice) -> ZeroCopyFuture<'a,Self> { + ZeroCopyFuture { + source, + writer: self + } + } + +} + + + +#[derive(Debug)] +pub enum SendFileError { + IoError(IoError), + NixError(NixError) +} + + +impl fmt::Display for SendFileError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::IoError(err) => write!(f, "{}", err), + Self::NixError(err) => write!(f,"{}",err), + } + } +} + + + +impl From for SendFileError { + fn from(error: IoError) -> Self { + SendFileError::IoError(error) + } +} + +impl From for SendFileError { + fn from(error: NixError) -> Self { + SendFileError::NixError(error) + } +} + +/// similar to Writeall +#[allow(dead_code)] +pub struct ZeroCopyFuture<'a,W: ?Sized + 'a> { + writer: &'a mut W, + source: &'a AsyncFileSlice, +} + +impl <'a,W>ZeroCopyFuture<'a,W> { + + #[allow(dead_code)] + pub fn new(writer: &'a mut W, source: &'a AsyncFileSlice) -> ZeroCopyFuture<'a,W> { + ZeroCopyFuture { + writer, + source + } + } +} + +impl Future for ZeroCopyFuture<'_,W> where W: ?Sized + AsRawFd { + + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + + let size = self.source.len(); + let target_fd = self.writer.as_raw_fd(); + let source_fd = self.source.as_raw_fd(); + + #[cfg(target_os="linux")] + let ft = asyncify( move || { + let mut offset = self.source.position() as i64; + sendfile(target_fd,source_fd,Some(&mut offset),size as usize).map_err(|err| err.into()) + }); + + + #[cfg(target_os="macos")] + let ft = asyncify( move || { + let offset = self.source.position(); + log::trace!("mac zero copy source fd: {} offset: {} len: {}, target: fd{}",source_fd,offset,size,target_fd); + let (res,len) = sendfile(source_fd,target_fd,offset as i64,Some(size as i64),None,None); + match res { + Ok(_) => { + log::trace!("mac zero copy bytes transferred: {}",len); + Ok(len as usize) + }, + Err(err) => Err(err.into()) + } + }); + + pin_mut!(ft); + ft.poll(cx) + + } + +} + + +#[cfg(test)] +mod tests { + + use std::net::TcpListener; + use std::net::SocketAddr; + use std::io::Error; + use std::thread; + use std::time; + use std::io::Read; + + + use log::debug; + use future_helper::test_async; + + use crate::fs::AsyncFile; + use crate::net::AsyncTcpStream; + use super::ZeroCopyWrite; + use super::SendFileError; + + + #[test_async] + async fn test_copy() -> Result<(),SendFileError> { + + let handle = thread::spawn(move || { + let listener = TcpListener::bind("127.0.0.1:9999")?; + + for st_res in listener.incoming() { + let mut stream = st_res?; + debug!("server: got connection. waiting"); + let mut buf = [0; 30]; + let len = stream.read(&mut buf)?; + assert_eq!(len,30); + return Ok(()) as Result<(),Error> + + } + Ok(()) as Result<(),Error> + }); + + // test data + let file = AsyncFile::open("test-data/apirequest.bin").await?; + thread::sleep(time::Duration::from_millis(100)); // give time for server to come up + let addr = "127.0.0.1:9999".parse::().expect("parse"); + + let mut stream = AsyncTcpStream::connect(&addr).await?; + + let fslice = file.as_slice(0,None).await?; + stream.zero_copy_write(&fslice).await?; + match handle.join() { + Err(_) => assert!(false,"thread not finished"), + _ => () + } + + Ok(()) + } + +} + + + + diff --git a/future-aio/test-data/apirequest.bin b/future-aio/test-data/apirequest.bin new file mode 100644 index 0000000000..dfe2683fba Binary files /dev/null and b/future-aio/test-data/apirequest.bin differ diff --git a/future-helper-03/Cargo.toml b/future-helper-03/Cargo.toml new file mode 100644 index 0000000000..ae1c4b2faf --- /dev/null +++ b/future-helper-03/Cargo.toml @@ -0,0 +1,28 @@ +[package] +edition = "2018" +name = "future-helper" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +description = "friendly wrapper around rust future and tokio" + +[features] +default = ["tokio1"] +fixture = ["async-test-derive"] +tokio2 = ["tokio_2"] +tokio1 = ["tokio_1","futures_1"] + +[dependencies] +log = "0.4.8" +tokio_2 = { version = "0.2.0-alpha.1", package = "tokio", optional = true } +tokio_1 = { version = "0.1.18", package = "tokio", optional = true } +bytes = "0.4.12" +futures-preview = { version = "0.3.0-alpha.17", features = ["compat"] } +futures_1 = { version = "0.1.25", package = "futures", optional = true } +pin-utils = "0.1.0-alpha.4" +async-test-derive = { path = "async-test-derive", optional = true } + + + +[dev-dependencies] +lazy_static = "1.2.0" +utils = { path= "../utils"} diff --git a/future-helper-03/async-test-derive/Cargo.toml b/future-helper-03/async-test-derive/Cargo.toml new file mode 100644 index 0000000000..763c53c335 --- /dev/null +++ b/future-helper-03/async-test-derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "async-test-derive" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "0.4.24" +quote = "0.6.10" +syn = { version = "0.15.23", features = ["full"] } +log = "0.4.6" diff --git a/future-helper-03/async-test-derive/src/lib.rs b/future-helper-03/async-test-derive/src/lib.rs new file mode 100644 index 0000000000..80fe467528 --- /dev/null +++ b/future-helper-03/async-test-derive/src/lib.rs @@ -0,0 +1,42 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; +use syn::ItemFn; +use syn::Ident; +use proc_macro2::Span; + + +#[proc_macro_attribute] +pub fn test_async(_attr: TokenStream, item: TokenStream) -> TokenStream { + + let input = syn::parse_macro_input!(item as ItemFn); + let name = &input.ident; + let sync_name = format!("{}_sync",name); + let out_fn_iden = Ident::new(&sync_name, Span::call_site()); + + let expression = quote! { + + #[test] + fn #out_fn_iden() { + + use future_helper::FutureHelper; + + utils::init_logger(); + + #input + + let ft = async { + #name().await + }; + + if let Err(err) = future_helper::run_block_on(ft) { + assert!(false,"error: {:?}",err); + } + + } + }; + + expression.into() + +} \ No newline at end of file diff --git a/future-helper-03/rust-toolchain b/future-helper-03/rust-toolchain new file mode 100644 index 0000000000..9ea1223ad3 --- /dev/null +++ b/future-helper-03/rust-toolchain @@ -0,0 +1 @@ +nightly-2019-08-27 \ No newline at end of file diff --git a/future-helper-03/src/lib.rs b/future-helper-03/src/lib.rs new file mode 100644 index 0000000000..5c6192559a --- /dev/null +++ b/future-helper-03/src/lib.rs @@ -0,0 +1,178 @@ +#![feature(trace_macros)] + +#[cfg(feature = "fixture")] +mod test_util; +mod util; + +#[cfg(feature = "fixture")] +pub use async_test_derive::test_async; + +pub use util::sleep; + +#[cfg(feature = "tokio2")] +pub mod tk { + pub use tokio_2::runtime::Runtime; + pub use tokio_2::spawn; + pub use tokio_2::net::TcpStream as TkTcpStream; + pub use tokio_2::net::TcpListener as TkTcpListner; +} + +#[cfg(not(feature = "tokio2"))] +pub mod tk { + pub use tokio_1::runtime::Runtime; + pub use tokio_1::spawn; + pub use tokio_1::net::TcpStream as TkTcpStream; + pub use tokio_1::net::TcpListener as TkTcpListner; +} + +#[cfg(not(feature = "tokio2"))] +use futures_1::Future as Future01; +use futures::future::Future; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use log::trace; +use log::error; + + +/// run tokio loop, pass runtime as parameter to closure +/// this differ from tokio run which uses global spawn +pub fn run(spawn_closure: F) +where + F: Future + Send + 'static +{ + #[cfg(feature = "tokio2")] { + match tk::Runtime::new() { + Ok(rt) => { + rt.spawn(spawn_closure); + rt.shutdown_on_idle(); + }, + Err(err) => error!("can't create runtime: {}",err) + } + } + + #[cfg(not(feature = "tokio2"))] + match tk::Runtime::new() { + Ok(mut rt) => { + rt.spawn(futures_1::lazy(|| { + spawn(spawn_closure); + Ok(()) + })); + rt.shutdown_on_idle().wait().unwrap(); + }, + Err(err) => error!("can't create runtime: {}",err) + } +} + +/// use new future API +pub trait FutureHelper { + /// block until closure is completed + fn block_on_ft3(&mut self, f: F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future>; + + fn block_on_all_ft3(self, f: F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future>; + + + /// spawn closure + fn spawn3(&mut self, future: F) -> &mut Self + where + F: Future> + Send + 'static; +} + + +#[cfg(not(feature = "tokio2"))] +pub fn spawn1(future: F) +where + F: Future> + Send + 'static, +{ + tk::spawn(future.boxed().compat()); +} + + +pub fn spawn(future: F) +where + F: Future + 'static + Send, +{ + trace!("spawning future"); + + #[cfg(feature = "tokio2")] + tk::spawn(future); + + #[cfg(not(feature = "tokio2"))] + spawn1(async { + future.await; + Ok(()) as Result<(),()> + }); + +} + + +/// create new executor and block until futures is completed +#[cfg(not(feature = "tokio2"))] +pub fn run_block_on(f:F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future> +{ + tk::Runtime::new().unwrap().block_on(Box::pin(f).compat()) +} + + +/// run block for i/o bounded futures +/// this is work around tokio runtime issue + #[cfg(feature = "tokio2")] +pub fn run_block_on(f:F) -> F::Output + where + F: Send + 'static + Future, + F::Output: Send + std::fmt::Debug +{ + let (tx, rx) = tokio_2::sync::oneshot::channel(); + let rt = tokio_2::runtime::Runtime::new().unwrap(); + rt.spawn(async move { + tx.send(f.await).unwrap(); + }); + rt.block_on(rx).unwrap() +} + + + +#[cfg(test)] +mod test { + + use lazy_static::lazy_static; + use std::sync::Arc; + use std::sync::Mutex; + use std::{thread, time}; + + use super::run; + use super::spawn; + + + #[test] + fn test_spawn3() { + lazy_static! { + static ref COUNTER: Arc> = Arc::new(Mutex::new(0)); + } + + assert_eq!(*COUNTER.lock().unwrap(), 0); + + let ft = async { + thread::sleep(time::Duration::from_millis(100)); + *COUNTER.lock().unwrap() = 10; + }; + + run(async { + spawn(ft); + }); + + assert_eq!(*COUNTER.lock().unwrap(), 10); + } + +} diff --git a/future-helper-03/src/test_util.rs b/future-helper-03/src/test_util.rs new file mode 100644 index 0000000000..dce0dcecfe --- /dev/null +++ b/future-helper-03/src/test_util.rs @@ -0,0 +1,104 @@ +/// run async expression and assert based on result value +#[macro_export] +macro_rules! assert_async_block { + ($ft_exp:expr) => {{ + + let ft = $ft_exp; + match future_helper::run_block_on(ft) { + Ok(_) => log::debug!("finished run"), + Err(err) => assert!(false,"error {:?}",err) + } + + }}; +} + + + +#[cfg(test)] +mod test { + + use std::io::Error; + use std::pin::Pin; + use std::task::Context; + use std::task::Poll; + + use futures::Future; + use futures::future::poll_fn; + + + use crate::test_async; + + // fake future helper so that this can be resolved in this crate + mod future_helper { + pub use crate::run_block_on; + pub use crate::FutureHelper; + pub use crate::assert_async_block; + } + + + // actual test run + + #[test_async] + async fn async_derive_test() -> Result<(),Error> { + assert!(true,"I am live"); + Ok(()) + } + + + + #[test] + fn test_1_sync_example () { + + async fn test_1() -> Result<(),Error>{ + assert!(true,"works"); + Ok(()) + } + + let ft = async { + test_1().await + }; + + assert_async_block!(ft); + } + + + struct TestFuture { + + } + + impl Future for TestFuture { + + type Output = u16; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + Poll::Ready(2) + } + + } + + + #[test_async] + async fn test_future() -> Result<(),Error> { + + let t = TestFuture{}; + let v: u16 = t.await; + assert_eq!(v,2); + Ok(()) + } + + + fn test_poll(_cx: &mut Context) -> Poll { + Poll::Ready(4) + } + + #[test_async] + async fn test_future_with_poll() -> Result<(),Error> { + + assert_eq!(poll_fn(test_poll).await,4); + Ok(()) + } + + + + +} \ No newline at end of file diff --git a/future-helper-03/src/util.rs b/future-helper-03/src/util.rs new file mode 100644 index 0000000000..69c1aaeeaf --- /dev/null +++ b/future-helper-03/src/util.rs @@ -0,0 +1,89 @@ + +pub use fusable::FusableFuture; + +#[cfg(feature = "tokio2")] +pub use sleep_03::sleep; +#[cfg(not(feature = "tokio2"))] +pub use sleep_01::sleep; + + +#[cfg(feature = "tokio2")] +mod sleep_03 { + + use std::time::Duration; + use std::time::Instant; + + use tokio_2::timer::Delay; + use super::FusableFuture; + + pub type FuseableDelay = FusableFuture; + + pub fn sleep(duration: Duration) -> FuseableDelay { + let delay = Delay::new(Instant::now() + duration); + FuseableDelay::new(delay) + } +} + +#[cfg(not(feature = "tokio2"))] +mod sleep_01 { + + use std::time::Duration; + use std::time::Instant; + + use tokio_1::timer::Delay; + use futures::compat::Future01CompatExt; + use futures::future::FutureExt; + use futures::future::Future; + use super::FusableFuture; + + pub fn sleep(duration: Duration) -> impl Future { + let delay = Delay::new(Instant::now() + duration).compat(); + FusableFuture::new(delay.map(|_| ())) + } + + +} + +mod fusable { + + use std::task::Context; + use std::task::Poll; + use std::pin::Pin; + + + use futures::Future; + + + /// add unpin to the arbitrary future which can be potentially unpinned + pub struct FusableFuture { + inner: F + } + + impl Unpin for FusableFuture{} + + impl FusableFuture { + pin_utils::unsafe_pinned!(inner: F); + } + + impl FusableFuture { + + #[allow(unused)] + pub fn new(inner: F) -> Self { + Self { + inner + } + } + } + + impl Future for FusableFuture where F: Future { + + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + + self.inner().poll(cx) + } + + } + +} diff --git a/future-helper/Cargo.toml b/future-helper/Cargo.toml new file mode 100644 index 0000000000..07ea50f7a6 --- /dev/null +++ b/future-helper/Cargo.toml @@ -0,0 +1,23 @@ +[package] +edition = "2018" +name = "future-helper" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +description = "friendly wrapper around rust future and tokio" + +[features] +fixture = ["async-test-derive"] + +[dependencies] +log = "0.4.8" +tokio_1 = { version = "0.1.18", package = "tokio"} +bytes = "0.4.12" +futures-preview = { version = "0.3.0-alpha.17", features = ["compat"] } +futures_1 = { version = "0.1.25", package = "futures" } +pin-utils = "0.1.0-alpha.4" +async-test-derive = { path = "async-test-derive", optional = true } + + +[dev-dependencies] +lazy_static = "1.2.0" +utils = { path= "../utils"} diff --git a/future-helper/async-test-derive/Cargo.toml b/future-helper/async-test-derive/Cargo.toml new file mode 100644 index 0000000000..763c53c335 --- /dev/null +++ b/future-helper/async-test-derive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "async-test-derive" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "0.4.24" +quote = "0.6.10" +syn = { version = "0.15.23", features = ["full"] } +log = "0.4.6" diff --git a/future-helper/async-test-derive/src/lib.rs b/future-helper/async-test-derive/src/lib.rs new file mode 100644 index 0000000000..80fe467528 --- /dev/null +++ b/future-helper/async-test-derive/src/lib.rs @@ -0,0 +1,42 @@ +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; +use syn::ItemFn; +use syn::Ident; +use proc_macro2::Span; + + +#[proc_macro_attribute] +pub fn test_async(_attr: TokenStream, item: TokenStream) -> TokenStream { + + let input = syn::parse_macro_input!(item as ItemFn); + let name = &input.ident; + let sync_name = format!("{}_sync",name); + let out_fn_iden = Ident::new(&sync_name, Span::call_site()); + + let expression = quote! { + + #[test] + fn #out_fn_iden() { + + use future_helper::FutureHelper; + + utils::init_logger(); + + #input + + let ft = async { + #name().await + }; + + if let Err(err) = future_helper::run_block_on(ft) { + assert!(false,"error: {:?}",err); + } + + } + }; + + expression.into() + +} \ No newline at end of file diff --git a/future-helper/rust-toolchain b/future-helper/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/future-helper/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/future-helper/src/lib.rs b/future-helper/src/lib.rs new file mode 100644 index 0000000000..8f76a796cc --- /dev/null +++ b/future-helper/src/lib.rs @@ -0,0 +1,169 @@ +#![feature(trace_macros)] + +#[cfg(feature = "fixture")] +mod test_util; +mod util; + +#[cfg(feature = "fixture")] +pub use async_test_derive::test_async; + +pub use util::sleep; + +pub mod tk { + pub use tokio_1::runtime::Runtime; + pub use tokio_1::spawn; + pub use tokio_1::net::TcpStream as TkTcpStream; + pub use tokio_1::net::TcpListener as TkTcpListner; +} + + +use futures_1::Future as Future01; +use futures::future::Future; +use futures::future::FutureExt; +use futures::future::TryFutureExt; +use log::trace; +use log::error; + + +/// run tokio loop, pass runtime as parameter to closure +/// this differ from tokio run which uses global spawn +pub fn run(spawn_closure: F) +where + F: Future + Send + 'static +{ + #[cfg(feature = "tokio2")] { + match tk::Runtime::new() { + Ok(rt) => { + rt.spawn(spawn_closure); + rt.shutdown_on_idle(); + }, + Err(err) => error!("can't create runtime: {}",err) + } + } + + #[cfg(not(feature = "tokio2"))] + match tk::Runtime::new() { + Ok(mut rt) => { + rt.spawn(futures_1::lazy(|| { + spawn(spawn_closure); + Ok(()) + })); + rt.shutdown_on_idle().wait().unwrap(); + }, + Err(err) => error!("can't create runtime: {}",err) + } +} + +/// use new future API +pub trait FutureHelper { + /// block until closure is completed + fn block_on_ft3(&mut self, f: F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future>; + + fn block_on_all_ft3(self, f: F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future>; + + + /// spawn closure + fn spawn3(&mut self, future: F) -> &mut Self + where + F: Future> + Send + 'static; +} + + +#[cfg(not(feature = "tokio2"))] +pub fn spawn1(future: F) +where + F: Future> + Send + 'static, +{ + tk::spawn(future.boxed().compat()); +} + + +pub fn spawn(future: F) +where + F: Future + 'static + Send, +{ + trace!("spawning future"); + + #[cfg(feature = "tokio2")] + tk::spawn(future); + + #[cfg(not(feature = "tokio2"))] + spawn1(async { + future.await; + Ok(()) as Result<(),()> + }); + +} + + +/// create new executor and block until futures is completed +#[cfg(not(feature = "tokio2"))] +pub fn run_block_on(f:F) -> Result + where + R: Send + 'static, + E: Send + 'static, + F: Send + 'static + Future> +{ + tk::Runtime::new().unwrap().block_on(Box::pin(f).compat()) +} + + +/// run block for i/o bounded futures +/// this is work around tokio runtime issue + #[cfg(feature = "tokio2")] +pub fn run_block_on(f:F) -> F::Output + where + F: Send + 'static + Future, + F::Output: Send + std::fmt::Debug +{ + let (tx, rx) = tokio_2::sync::oneshot::channel(); + let rt = tokio_2::runtime::Runtime::new().unwrap(); + rt.spawn(async move { + tx.send(f.await).unwrap(); + }); + rt.block_on(rx).unwrap() +} + + + +#[cfg(test)] +mod test { + + use lazy_static::lazy_static; + use std::sync::Arc; + use std::sync::Mutex; + use std::{thread, time}; + + use super::run; + use super::spawn; + + + #[test] + fn test_spawn3() { + lazy_static! { + static ref COUNTER: Arc> = Arc::new(Mutex::new(0)); + } + + assert_eq!(*COUNTER.lock().unwrap(), 0); + + let ft = async { + thread::sleep(time::Duration::from_millis(100)); + *COUNTER.lock().unwrap() = 10; + }; + + run(async { + spawn(ft); + }); + + assert_eq!(*COUNTER.lock().unwrap(), 10); + } + +} diff --git a/future-helper/src/test_util.rs b/future-helper/src/test_util.rs new file mode 100644 index 0000000000..dce0dcecfe --- /dev/null +++ b/future-helper/src/test_util.rs @@ -0,0 +1,104 @@ +/// run async expression and assert based on result value +#[macro_export] +macro_rules! assert_async_block { + ($ft_exp:expr) => {{ + + let ft = $ft_exp; + match future_helper::run_block_on(ft) { + Ok(_) => log::debug!("finished run"), + Err(err) => assert!(false,"error {:?}",err) + } + + }}; +} + + + +#[cfg(test)] +mod test { + + use std::io::Error; + use std::pin::Pin; + use std::task::Context; + use std::task::Poll; + + use futures::Future; + use futures::future::poll_fn; + + + use crate::test_async; + + // fake future helper so that this can be resolved in this crate + mod future_helper { + pub use crate::run_block_on; + pub use crate::FutureHelper; + pub use crate::assert_async_block; + } + + + // actual test run + + #[test_async] + async fn async_derive_test() -> Result<(),Error> { + assert!(true,"I am live"); + Ok(()) + } + + + + #[test] + fn test_1_sync_example () { + + async fn test_1() -> Result<(),Error>{ + assert!(true,"works"); + Ok(()) + } + + let ft = async { + test_1().await + }; + + assert_async_block!(ft); + } + + + struct TestFuture { + + } + + impl Future for TestFuture { + + type Output = u16; + + fn poll(self: Pin<&mut Self>, _cx: &mut Context) -> Poll { + Poll::Ready(2) + } + + } + + + #[test_async] + async fn test_future() -> Result<(),Error> { + + let t = TestFuture{}; + let v: u16 = t.await; + assert_eq!(v,2); + Ok(()) + } + + + fn test_poll(_cx: &mut Context) -> Poll { + Poll::Ready(4) + } + + #[test_async] + async fn test_future_with_poll() -> Result<(),Error> { + + assert_eq!(poll_fn(test_poll).await,4); + Ok(()) + } + + + + +} \ No newline at end of file diff --git a/future-helper/src/util.rs b/future-helper/src/util.rs new file mode 100644 index 0000000000..69c1aaeeaf --- /dev/null +++ b/future-helper/src/util.rs @@ -0,0 +1,89 @@ + +pub use fusable::FusableFuture; + +#[cfg(feature = "tokio2")] +pub use sleep_03::sleep; +#[cfg(not(feature = "tokio2"))] +pub use sleep_01::sleep; + + +#[cfg(feature = "tokio2")] +mod sleep_03 { + + use std::time::Duration; + use std::time::Instant; + + use tokio_2::timer::Delay; + use super::FusableFuture; + + pub type FuseableDelay = FusableFuture; + + pub fn sleep(duration: Duration) -> FuseableDelay { + let delay = Delay::new(Instant::now() + duration); + FuseableDelay::new(delay) + } +} + +#[cfg(not(feature = "tokio2"))] +mod sleep_01 { + + use std::time::Duration; + use std::time::Instant; + + use tokio_1::timer::Delay; + use futures::compat::Future01CompatExt; + use futures::future::FutureExt; + use futures::future::Future; + use super::FusableFuture; + + pub fn sleep(duration: Duration) -> impl Future { + let delay = Delay::new(Instant::now() + duration).compat(); + FusableFuture::new(delay.map(|_| ())) + } + + +} + +mod fusable { + + use std::task::Context; + use std::task::Poll; + use std::pin::Pin; + + + use futures::Future; + + + /// add unpin to the arbitrary future which can be potentially unpinned + pub struct FusableFuture { + inner: F + } + + impl Unpin for FusableFuture{} + + impl FusableFuture { + pin_utils::unsafe_pinned!(inner: F); + } + + impl FusableFuture { + + #[allow(unused)] + pub fn new(inner: F) -> Self { + Self { + inner + } + } + } + + impl Future for FusableFuture where F: Future { + + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + + self.inner().poll(cx) + } + + } + +} diff --git a/k8-client/Cargo.toml b/k8-client/Cargo.toml new file mode 100644 index 0000000000..aa4609ca34 --- /dev/null +++ b/k8-client/Cargo.toml @@ -0,0 +1,36 @@ +[package] +edition = "2018" +name = "k8-client" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[features] +k8 = [] +k8_stream = ["k8"] + + +[dependencies] +log = "0.4.6" +bytes = "0.4.12" +http = "0.1.16" +hyper = "0.12.33" +futures-preview = { version = "0.3.0-alpha.17", features = ["compat"]} +pin-utils = "0.1.0-alpha.4" +rustls = { version = "0.15.2", features = ["dangerous_configuration"] } +webpki = "0.19" +hyper-rustls = "0.16.1" +serde = { version ="1.0.98", features = ['derive'] } +serde_json = "1.0.40" +serde_qs = "0.5.0" +k8-metadata = { path = "../k8-metadata"} +future-helper = { path = "../future-helper" } +k8-diff = { path = "../k8-diff"} +k8-config = { path = "../k8-config"} +types = { path = "../types"} + +[dev-dependencies] +rand = "0.7.0" +k8-fixtures = { path = "k8-fixtures" } +lazy_static = "1.3.0" +future-helper = { path = "../future-helper", features=["fixture"]} +utils = { path = "../utils", features=["fixture"]} \ No newline at end of file diff --git a/k8-client/Makefile b/k8-client/Makefile new file mode 100644 index 0000000000..79cf411f85 --- /dev/null +++ b/k8-client/Makefile @@ -0,0 +1,6 @@ + run-integration-test: + cargo test --features=k8 + +# set up cluster roles +set-anonymous: + create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous \ No newline at end of file diff --git a/k8-client/README.md b/k8-client/README.md new file mode 100644 index 0000000000..32c7502b02 --- /dev/null +++ b/k8-client/README.md @@ -0,0 +1,28 @@ +# Kubernetes Rust Client + +This crate is used to get, list, update and delete Kubernetes Objects. +This is similar to Kubernetes Go Client: https://github.com/kubernetes/client-go + +Make sure you have setup your access to kubernetes cluster + +## create simple nginx pod +```kubectl create -f https://k8s.io/examples/pods/simple-pod.yaml``` + +## Get Pods using curl +```curl --header "Authorization: Bearer $TOKEN" --insecure $APISERVER/api/v1/namespaces/default/pods``` + +## Get Topics +``` curl --header "Authorization: Bearer $TOKEN" --insecure $APISERVER/apis/fluvio.infinyon.com/v1/namespaces/default/topics``` + + +## Update topic status + +```curl -X PUT -H "Content-Type: application/json" -d '{"apiVersion":"fluvio.infinyon.com/v1","kind":"Topic","metadata":{"name":"test","namespace":"default","resourceVersion":"3958"},"status":{"partitions":3}}' $APISERVER/apis/fluvio.infinyon.com/v1/namespaces/default/topics/test/status --header "Authorization: Bearer $TOKEN" --insecure``` + + +## Running K8 integration test + +By default, K8 dependent features are turn off. In order to K8 related integrations test. You must apply crd objects in the ```kub-rs``` crates. + +Then run following commands. This must be done in the k8-client crate. Rust does not support workspace level feature yet. +```cargo test --features k8``` diff --git a/k8-client/k8-fixtures/Cargo.toml b/k8-client/k8-fixtures/Cargo.toml new file mode 100644 index 0000000000..bb84772455 --- /dev/null +++ b/k8-client/k8-fixtures/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "k8-fixtures" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[dependencies] +rand = "0.6.0" +serde = "1.0.76" +serde_derive = "1.0.76" +serde_json = "1.0.27" +k8-client = { path = "../../k8-client"} +k8-metadata = { path = "../../k8-metadata"} + diff --git a/k8-client/k8-fixtures/src/lib.rs b/k8-client/k8-fixtures/src/lib.rs new file mode 100644 index 0000000000..2a2e018ebe --- /dev/null +++ b/k8-client/k8-fixtures/src/lib.rs @@ -0,0 +1,5 @@ +mod test_fixtures; + +pub use self::test_fixtures::create_topic_stream_result; +pub use self::test_fixtures::TestTopicWatch; +pub use self::test_fixtures::TestTopicWatchList; diff --git a/k8-client/k8-fixtures/src/test_fixtures.rs b/k8-client/k8-fixtures/src/test_fixtures.rs new file mode 100644 index 0000000000..4ad818cc93 --- /dev/null +++ b/k8-client/k8-fixtures/src/test_fixtures.rs @@ -0,0 +1,88 @@ +use rand::prelude::*; +use std::env; +use std::ffi::OsStr; +use std::fs::File; +use std::io::Read; +use std::path::{Path, PathBuf}; + +use k8_metadata::core::metadata::K8Watch; +use k8_client::TokenStreamResult; +use k8_metadata::topic::{TopicSpec, TopicStatus}; + +// +// Topic Watch Fixtures +// + +pub type TestTopicWatchList = Vec; + +pub struct TestTopicWatch { + pub operation: String, + pub name: String, + pub partitions: i32, + pub replication: i32, + pub ignore_rack_assignment: Option, +} + +pub fn create_topic_watch(ttw: &TestTopicWatch) -> K8Watch { + let target_dir = get_target_dir(); + let path = get_top_dir(&target_dir); + let mut contents = String::new(); + let (filename, file_has_options) = if ttw.ignore_rack_assignment.is_none() { + ( + String::from("k8-client/k8-fixtures/data/topic_no_options.tmpl"), + false, + ) + } else { + ( + String::from("k8-client/k8-fixtures/data/topic_all.tmpl"), + true, + ) + }; + let f = File::open(path.join(filename)); + f.unwrap().read_to_string(&mut contents).unwrap(); + + contents = contents.replace("{type}", &*ttw.operation); + contents = contents.replace("{name}", &*ttw.name); + contents = contents.replace("{partitions}", &*ttw.partitions.to_string()); + contents = contents.replace("{replication}", &*ttw.replication.to_string()); + contents = contents.replace( + "{12_digit_rand}", + &*format!("{:012}", thread_rng().gen_range(0, 999999)), + ); + if file_has_options { + contents = contents.replace( + "{rack_assignment}", + &*ttw.ignore_rack_assignment.unwrap().to_string(), + ); + } + serde_json::from_str(&contents).unwrap() +} + +pub fn create_topic_stream_result( + ttw_list: &TestTopicWatchList, +) -> TokenStreamResult { + let mut topic_watch_list = vec![]; + for ttw in ttw_list { + topic_watch_list.push(Ok(create_topic_watch(&ttw))); + } + Ok(topic_watch_list) +} + +// +// Utility APIs +// + +// Get absolute path to the "target" directory ("build" dir) +fn get_target_dir() -> PathBuf { + let bin = env::current_exe().expect("exe path"); + let mut target_dir = PathBuf::from(bin.parent().expect("bin parent")); + while target_dir.file_name() != Some(OsStr::new("target")) { + target_dir.pop(); + } + target_dir +} + +// Get absolute path to the project's top dir, given target dir +fn get_top_dir<'a>(target_dir: &'a Path) -> &'a Path { + target_dir.parent().expect("target parent") +} diff --git a/k8-client/rust-toolchain b/k8-client/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/k8-client/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/k8-client/src/client.rs b/k8-client/src/client.rs new file mode 100644 index 0000000000..8a79605559 --- /dev/null +++ b/k8-client/src/client.rs @@ -0,0 +1,545 @@ +use std::fmt::Debug; +use std::fmt::Display; + +use futures::compat::Future01CompatExt; +use futures::compat::Stream01CompatExt; +use futures::future::ready; +use futures::future::FutureExt; +use futures::stream::once; +use futures::stream::Stream; +use futures::stream::StreamExt; +use hyper; +use hyper::client::connect::HttpConnector; +use hyper::header::HeaderValue; +use hyper::header::ACCEPT; +use hyper::header::CONTENT_TYPE; +use hyper::http::request::Builder; +use hyper::rt::Stream as Stream01; +use hyper::Body; +use hyper::Client; +use hyper::Method; +use hyper::Request; +use hyper::StatusCode; +use hyper::Uri; +use log::debug; +use log::error; +use log::trace; +use serde::de::DeserializeOwned; +use serde::Serialize; +use serde_json; +use serde_json::Value; +use hyper_rustls::HttpsConnector; + +use k8_diff::Changes; +use k8_diff::Diff; +use k8_metadata::core::Crd; +use k8_metadata::core::Spec; +use k8_metadata::core::metadata::item_uri; +use k8_metadata::core::metadata::K8Meta; +use k8_metadata::core::metadata::items_uri; +use k8_metadata::core::metadata::InputK8Obj; +use k8_metadata::core::metadata::UpdateK8ObjStatus; +use k8_metadata::core::metadata::K8List; +use k8_metadata::core::metadata::K8Obj; +use k8_metadata::core::metadata::K8Status; +use k8_metadata::core::metadata::K8Watch; +use k8_metadata::core::options::ListOptions; +use k8_config::K8Config; + +use crate::wstream::WatchStream; +use crate::ClientError; +use crate::K8AuthHelper; + +// For error mapping: see: https://doc.rust-lang.org/nightly/core/convert/trait.From.html + +pub type TokenStreamResult = Result, ClientError>>, ClientError>; + +pub fn as_token_stream_result(events: Vec>) -> TokenStreamResult where S: Spec { + + Ok(events.into_iter().map(|event| Ok(event)).collect()) +} + +#[derive(Debug)] +pub enum ApplyResult +{ + None, + Created(K8Obj), + Patched(K8Obj) +} + +#[allow(dead_code)] +pub enum PatchMergeType { + Json, + JsonMerge, + StrategicMerge, // for aggegration API +} + +impl PatchMergeType { + fn for_spec(crd: &Crd) -> Self { + match crd.group { + "core" => PatchMergeType::StrategicMerge, + "apps" => PatchMergeType::StrategicMerge, + _ => PatchMergeType::JsonMerge, + } + } + + fn content_type(&self) -> &'static str { + match self { + PatchMergeType::Json => "application/json-patch+json", + PatchMergeType::JsonMerge => "application/merge-patch+json", + PatchMergeType::StrategicMerge => "application/strategic-merge-patch+json", + } + } +} + +/// used for comparing spec, +#[derive(Serialize, Debug, Clone)] +struct DiffSpec { + spec: S, +} + +impl DiffSpec +where + S: Serialize, +{ + fn from(spec: S) -> Self { + DiffSpec { spec } + } +} + +/// K8 Cluster accessible thru API +#[derive(Debug)] +pub struct K8Client { + client: Client>, + auth_helper: K8AuthHelper, + host: String +} + +impl K8Client { + pub fn new(config: K8Config) -> Result { + + let helper = K8AuthHelper::new(config); + match helper.build_https_connector() { + Ok(https) => { + let hyper_client = Client::builder().build::<_, (Body)>(https); + let host = helper.config.api_path().to_owned(); + + Ok(Self { + auth_helper: helper, + client: hyper_client, + host + }) + } + Err(err) => { + error!("error getting k8 client: {}", err); + std::process::exit(-1); + } + } + } + + /// handle request. this is async function + async fn handle_request( + &self, + req_result: Result, ClientError>, + ) -> Result + where + T: DeserializeOwned, + { + let req = req_result?; + + let resp = self.client.request(req).compat().await?; + let status = resp.status(); + debug!("response status: {:#?}", status); + + if status == StatusCode::NOT_FOUND { + return Err(ClientError::NotFound); + } + + let body = resp.into_body().concat2().compat().await?; + serde_json::from_slice(&body).map_err(|err| { + error!("parser error: {}", err); + let v = body.to_vec(); + let raw = String::from_utf8_lossy(&v).to_string(); + error!("raw: {}", raw); + let v: serde_json::Value = serde_json::from_slice(&body).expect("this shoud parse"); + trace!("json struct: {:#?}", v); + err.into() + }) + } + + // build default request for uri + fn default_req(&self, uri: Uri) -> Builder { + let mut builder = Request::builder(); + builder.uri(uri); + + match &self.auth_helper.config { + K8Config::Pod(pod_config) => { + trace!("setting bearer token from pod config"); + builder.header("Authorization",pod_config.token.to_owned()); + }, + _ => {} + } + + builder + } + + /// return stream based on uri + fn stream(&self, uri: Uri) -> impl Stream> + where + K8Watch: DeserializeOwned, + S: Spec + Debug, + S::Status: Debug, + { + debug!("streaming: {}", uri); + let req = self + .default_req(uri) + .method(Method::GET) + .body(Body::empty()) + .unwrap(); + + let req_ft = self.client.request(req).compat(); + + let ft = async move { + let resp = req_ft.await.unwrap(); + trace!("res status: {}", resp.status()); + trace!("res header: {:#?}", resp.headers()); + resp.into_body().compat() + }; + + WatchStream::new(ft.flatten_stream()).map(|chunks_result| { + chunks_result + .map(|chunk_list| { + chunk_list + .into_iter() + .map(|chunk_result| match chunk_result { + Ok(chunk) => { + let slice = chunk.as_slice(); + let result: Result, serde_json::Error> = + serde_json::from_slice(slice).map_err(|err| { + error!("parsing error: {}", err); + error!( + "line: {}", + String::from_utf8_lossy(slice).to_string() + ); + err + }); + match result { + Ok(obj) => { + trace!("deserialized: {:#?}", obj); + Ok(obj) + } + Err(err) => Err(err.into()), + } + } + Err(err) => Err(err.into()), + }) + .collect() + }) + .map_err(|err| err.into()) + }) + } + + fn hostname(&self) -> &str { + &self.host + } + + /// retrieval a single item + pub async fn retrieve_item( + &self, + metadata: &M + ) -> Result, ClientError> + where + K8Obj: DeserializeOwned, + S: Spec, + M: K8Meta + { + let uri = metadata.item_uri(self.hostname()); + debug!("retrieving item: {}", uri); + + let req = self + .default_req(uri) + .method(Method::GET) + .body(Body::empty()) + .map_err(|e| e.into()); + + self.handle_request(req).await + } + + pub async fn retrieve_items( + &self, + namespace: &str, + ) -> Result, ClientError> + where + K8List: DeserializeOwned, + S: Spec, + { + let uri = items_uri::(self.hostname(), namespace, None); + + debug!("retrieving items: {}", uri); + + let req = self + .default_req(uri) + .method(Method::GET) + .body(Body::empty()) + .map_err(|e| e.into()); + + self.handle_request(req).await + } + + pub async fn delete_item( + &self, + metadata: &M, + ) -> Result + where + S: Spec, + M: K8Meta + { + let uri = metadata.item_uri(self.hostname()); + debug!("delete item wiht url: {}", uri); + + let req = self + .default_req(uri) + .method(Method::DELETE) + .body(Body::empty()) + .map_err(|e| e.into()); + + self.handle_request(req).await + } + + pub async fn create_item( + &self, + value: InputK8Obj, + ) -> Result, ClientError> + where + InputK8Obj: Serialize + Debug, + K8Obj: DeserializeOwned, + S: Spec, + { + let uri = items_uri::(self.hostname(), &value.metadata.namespace, None); + debug!("creating '{}'", uri); + trace!("creating RUST {:#?}", &value); + + let req = || -> Result<_, ClientError> { + let bytes = serde_json::to_vec(&value)?; + + trace!( + "create raw: {}", + String::from_utf8_lossy(&bytes).to_string() + ); + + self.default_req(uri) + .method(Method::POST) + .header(CONTENT_TYPE, HeaderValue::from_static("application/json")) + .body(Body::from(bytes)) + .map_err(|e| e.into()) + }(); + + self.handle_request(req).await + } + + /// apply object, this is similar to ```kubectl apply``` + /// for now, this doesn't do any optimization + /// if object doesn't exist, it will be created + /// if object exist, it will be patched by using strategic merge diff + pub async fn apply( + &self, + value: InputK8Obj, + ) -> Result, ClientError> + where + InputK8Obj: Serialize + Debug, + K8Obj: DeserializeOwned + Debug, + S: Spec + Serialize + Debug + Clone, + { + debug!("applying '{}' changes", value.metadata.name); + trace!("applying {:#?}", value); + + match self.retrieve_item(&value.metadata).await { + Ok(item) => { + let mut old_spec = item.spec; + old_spec.make_same(&value.spec); + // we don't care about status + let new_spec = serde_json::to_value(DiffSpec::from(value.spec.clone()))?; + let old_spec = serde_json::to_value(DiffSpec::from(old_spec))?; + let diff = old_spec.diff(&new_spec)?; + match diff { + Diff::None => { + debug!("no diff detected, doing nothing"); + Ok(ApplyResult::None) + } + Diff::Patch(p) => { + let json_diff = serde_json::to_value(p)?; + debug!("detected diff: old vs. new spec"); + trace!("new spec: {:#?}", &new_spec); + trace!("old spec: {:#?}", &old_spec); + trace!("new/old diff: {:#?}", json_diff); + let patch_result = self.patch_spec(&value.metadata, &json_diff).await?; + Ok(ApplyResult::Patched(patch_result)) + } + _ => Err(ClientError::PatchError), + } + } + Err(err) => match err { + ClientError::NotFound => { + debug!("item '{}' not found, creating ...", value.metadata.name); + let created_item = self.create_item(value.into()).await?; + return Ok(ApplyResult::Created(created_item)); + } + _ => return Err(err), + }, + } + } + + /// update status + pub async fn update_status( + &self, + value: &UpdateK8ObjStatus, + ) -> Result, ClientError> + where + UpdateK8ObjStatus: Serialize + Debug, + K8Obj: DeserializeOwned, + S: Spec + { + let uri = item_uri::( + self.hostname(), + &value.metadata.name, + &value.metadata.namespace, + Some("/status"), + ); + debug!("updating '{}' status - uri: {}", value.metadata.name, uri); + trace!("update: {:#?}", &value); + + let req = || -> Result<_, ClientError> { + let bytes = serde_json::to_vec(&value)?; + trace!( + "update raw: {}", + String::from_utf8_lossy(&bytes).to_string() + ); + self.default_req(uri) + .method(Method::PUT) + .header(CONTENT_TYPE, HeaderValue::from_static("application/json")) + .body(Body::from(bytes)) + .map_err(|e| e.into()) + }(); + + self.handle_request(req).await + } + + /// patch existing with spec + pub async fn patch_spec( + &self, + metadata: &M, + patch: &Value, + ) -> Result, ClientError> + where + K8Obj: DeserializeOwned, + S: Spec + Debug, + M: K8Meta + Display + { + debug!("patching item at '{}'", metadata); + trace!("patch json value: {:#?}", patch); + let uri = metadata.item_uri(self.hostname()); + let merge_type = PatchMergeType::for_spec(S::metadata()); + + let req = || -> Result<_, ClientError> { + let bytes = serde_json::to_vec(&patch)?; + + trace!("patch raw: {}", String::from_utf8_lossy(&bytes).to_string()); + + self.default_req(uri) + .method(Method::PATCH) + .header(ACCEPT, HeaderValue::from_static("application/json")) + .header( + CONTENT_TYPE, + HeaderValue::from_static(merge_type.content_type()), + ) + .body(Body::from(bytes)) + .map_err(|e| e.into()) + }(); + + self.handle_request(req).await + } + + /// stream items since resource versions + pub fn watch_stream_since( + &self, + namespace: &str, + resource_version: Option, + ) -> impl Stream> + where + K8Watch: DeserializeOwned, + S: Spec + Debug, + S::Status: Debug, + { + + let opt = ListOptions { + watch: Some(true), + resource_version, + timeout_seconds: Some(3600), + ..Default::default() + }; + let uri = items_uri::(self.hostname(), namespace, Some(&opt)); + self.stream(uri) + } + + /// return all list of items and future changes as stream + pub fn watch_stream_now( + &self, + ns: String, + ) -> impl Stream> + '_ + where + K8Watch: DeserializeOwned, + K8List: DeserializeOwned, + S: Spec + Debug, + S::Status: Debug, + { + // future + let ft_stream = async move { + let namespace = ns.as_ref(); + let items_ft = self.retrieve_items(namespace); + let item_now_result = items_ft.await; + + match item_now_result { + Ok(item_now_list) => { + let resource_version = item_now_list.metadata.resource_version; + + let items_watch_stream = + self.watch_stream_since(namespace, Some(resource_version)); + + let items_list = item_now_list + .items + .into_iter() + .map(|item| Ok(K8Watch::ADDED(item))) + .collect(); + let list_stream = once(ready(Ok(items_list))); + + list_stream.chain(items_watch_stream).left_stream() + // list_stream + } + Err(err) => once(ready(Err(err))).right_stream(), + } + }; + + ft_stream.flatten_stream() + } + + /// Check if the object exists, return true or false. + pub async fn exists( + &self, + metadata: &M, + ) -> Result + where + K8Obj: DeserializeOwned + Serialize + Debug + Clone, + S: Spec + Serialize + Debug, + M: K8Meta + Display + { + debug!("check if '{}' exists", metadata); + + match self.retrieve_item(metadata).await { + Ok(_) => Ok(true), + Err(err) => match err { + ClientError::NotFound => Ok(false), + _ => Err(err), + }, + } + } +} diff --git a/k8-client/src/config.rs b/k8-client/src/config.rs new file mode 100644 index 0000000000..3b3e3c3582 --- /dev/null +++ b/k8-client/src/config.rs @@ -0,0 +1,142 @@ + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::fs::File; +use std::path::Path; +use std::io::Read; +use std::io::BufReader; +use std::sync::Arc; + + +use log::debug; +use log::trace; +use hyper_rustls::HttpsConnector; +use rustls::ClientConfig; +use rustls::PrivateKey; +use rustls::Certificate; +use rustls::ServerCertVerifier; +use rustls::RootCertStore; +use rustls::ServerCertVerified; +use rustls::internal::pemfile::certs; +use rustls::internal::pemfile::rsa_private_keys; +use rustls::TLSError; +use hyper::client::HttpConnector; +use webpki::DNSNameRef; + + +use k8_config::K8Config; +use k8_config::PodConfig; +use k8_config::KubeConfig; + + +fn retrieve_cert(reader: R) -> Result, IoError> where R: Read { + let mut reader = BufReader::new(reader); + certs(&mut reader).map_err(|_| IoError::new(ErrorKind::Other, format!("no cert found"))) +} + +fn retrieve_cert_from_file

(file_path: P) -> Result, IoError> + where P: AsRef +{ + let file = File::open(file_path)?; + retrieve_cert(file) +} + +fn retrieve_private_key

(filename: P) -> Result,IoError> + where P: AsRef +{ + let keyfile = File::open(filename)?; + let mut reader = BufReader::new(keyfile); + rsa_private_keys(&mut reader).map_err(|_| IoError::new( + ErrorKind::InvalidData, + "private key not founded" + )) +} + + + +struct NoVerifier {} + +impl ServerCertVerifier for NoVerifier { + fn verify_server_cert( + &self, + _roots: &RootCertStore, + _presented_certs: &[Certificate], + dns_name: DNSNameRef, + _ocsp_response: &[u8], + ) -> Result { + + trace!("decoding dns: {:#?}",dns_name); + Ok(ServerCertVerified::assertion()) + } +} + + +#[derive(Debug)] +pub struct K8AuthHelper { + pub config: K8Config +} + +impl K8AuthHelper { + + pub fn new(config: K8Config) -> Self { + Self { + config + } + } + + pub fn build_https_connector(&self) -> Result, IoError> { + + match &self.config { + K8Config::Pod(pod_config) => build_token_connector(&pod_config), + K8Config::KubeConfig(kube_config) => build_client_cert_connector(&kube_config.config) + } + } +} + +fn build_client_cert_connector(kube_config: &KubeConfig) -> Result, IoError> { + + let mut tls = ClientConfig::new(); + let mut http = HttpConnector::new(1); + http.enforce_http(false); + + let user = kube_config.current_user().unwrap(); + + // get certs for client-certificate + let client_certs = retrieve_cert_from_file(&user.user.client_certificate)?; + debug!("client certs: {:#?}",client_certs); + let mut private_keys = retrieve_private_key(&user.user.client_key)?; + + if private_keys.len() == 0 { + return Err(IoError::new( + ErrorKind::InvalidData, + "private key not founded" + )) + } + + debug!("retrieved client certs from kubeconfig"); + tls.set_single_client_cert(client_certs, private_keys.remove(0)); + tls.dangerous() + .set_certificate_verifier(Arc::new(NoVerifier {})); + Ok(HttpsConnector::from((http, tls))) +} + +fn build_token_connector(pod: &PodConfig) -> Result, IoError> { + + let mut http = HttpConnector::new(1); + http.enforce_http(false); + let mut tls = ClientConfig::new(); + + for cert in retrieve_cert(pod.ca.as_bytes())? { + tls.root_store + .add(&cert) + .map_err(|err| { + IoError::new(ErrorKind::Other, format!("cert error: {:#?}", err)) + }) + .expect("problem reading cert"); + } + + debug!("retrieve ca.crt for token authenication"); + Ok(HttpsConnector::from((http, tls))) +} + + diff --git a/k8-client/src/config_map.rs b/k8-client/src/config_map.rs new file mode 100644 index 0000000000..81d7cf0284 --- /dev/null +++ b/k8-client/src/config_map.rs @@ -0,0 +1,39 @@ +use serde::Deserialize; +use serde::Serialize; + +use k8_metadata::core::Crd; +use k8_metadata::core::CrdNames; +use k8_metadata::core::Spec; +use k8_metadata::core::Status; + + +// +// ConfigMap Object +const CONFIG_MAP_API: Crd = Crd { + group: "core", + version: "v1", + names: CrdNames { + kind: "ConfigMap", + plural: "configmaps", + singular: "configmap", + }, +}; + +impl Spec for ConfigMapSpec { + + type Status = ConfigMapStatus; + + fn metadata() -> &'static Crd { + &CONFIG_MAP_API + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ConfigMapSpec {} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ConfigMapStatus {} + +impl Status for ConfigMapStatus{} \ No newline at end of file diff --git a/k8-client/src/error.rs b/k8-client/src/error.rs new file mode 100644 index 0000000000..469171c0c4 --- /dev/null +++ b/k8-client/src/error.rs @@ -0,0 +1,73 @@ +use std::io::Error as IoError; + +use http; +use hyper; + +use std::env; +use std::fmt; +use k8_diff::DiffError; + +// For error mapping: see: https://doc.rust-lang.org/nightly/core/convert/trait.From.html + +#[derive(Debug)] +pub enum ClientError { + IoError(IoError), + HttpError(http::Error), + EnvError(env::VarError), + HyperError(hyper::Error), + JsonError(serde_json::Error), + DiffError(DiffError), + PatchError, + NotFound, +} + +impl From for ClientError { + fn from(error: IoError) -> Self { + ClientError::IoError(error) + } +} + +impl From for ClientError { + fn from(error: http::Error) -> Self { + ClientError::HttpError(error) + } +} + +impl From for ClientError { + fn from(error: env::VarError) -> Self { + ClientError::EnvError(error) + } +} + +impl From for ClientError { + fn from(error: hyper::Error) -> Self { + ClientError::HyperError(error) + } +} + +impl From for ClientError { + fn from(error: serde_json::Error) -> ClientError { + ClientError::JsonError(error) + } +} + +impl From for ClientError { + fn from(error: DiffError) -> Self { + ClientError::DiffError(error) + } +} + +impl fmt::Display for ClientError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ClientError::IoError(err) => write!(f, "{}", err), + ClientError::HttpError(err) => write!(f, "{}", err), + ClientError::EnvError(err) => write!(f, "{}", err), + ClientError::HyperError(err) => write!(f, "{}", err), + ClientError::JsonError(err) => write!(f, "{}", err), + ClientError::NotFound => write!(f, "not found"), + ClientError::DiffError(err) => write!(f, "{:#?}", err), + ClientError::PatchError => write!(f, "patch error"), + } + } +} diff --git a/k8-client/src/fixture.rs b/k8-client/src/fixture.rs new file mode 100644 index 0000000000..8f35d59761 --- /dev/null +++ b/k8-client/src/fixture.rs @@ -0,0 +1,3 @@ +/// common test fixtures + +pub const TEST_NS: &'static str = "test"; \ No newline at end of file diff --git a/k8-client/src/lib.rs b/k8-client/src/lib.rs new file mode 100644 index 0000000000..68b15d7df1 --- /dev/null +++ b/k8-client/src/lib.rs @@ -0,0 +1,34 @@ +mod client; +mod error; +mod pod; +mod service; +mod stateful; +mod wstream; +mod config; +mod config_map; +mod secret; + +#[cfg(feature = "k8")] +pub mod fixture; + +pub use self::client::ApplyResult; +pub use self::client::K8Client; +pub use self::client::as_token_stream_result; +pub use self::config::K8AuthHelper; +pub use self::client::TokenStreamResult; +pub use self::error::ClientError; +pub use self::pod::ContainerPortSpec; +pub use self::pod::ContainerSpec; +pub use self::pod::PodSpec; +pub use self::pod::PodStatus; +pub use self::pod::VolumeMount; +pub use self::service::ServicePort; +pub use self::service::ServiceSpec; +pub use self::service::ServiceStatus; +pub use self::service::LoadBalancerType; +pub use self::service::ExternalTrafficPolicy; +pub use self::stateful::*; +pub use self::config_map::ConfigMapSpec; +pub use self::config_map::ConfigMapStatus; +pub use self::secret::SecretSpec; +pub use self::secret::SecretStatus; diff --git a/k8-client/src/pod.rs b/k8-client/src/pod.rs new file mode 100644 index 0000000000..f793e6fbfa --- /dev/null +++ b/k8-client/src/pod.rs @@ -0,0 +1,212 @@ +use serde::Deserialize; +use serde::Serialize; + +use k8_metadata::core::metadata::Env; +use k8_metadata::core::Crd; +use k8_metadata::core::CrdNames; +use k8_metadata::core::Spec; +use k8_metadata::core::Status; +use k8_metadata::spu::Endpoint; + +// +// Pod Object + +const POD_API: Crd = Crd { + group: "core", + version: "v1", + names: CrdNames { + kind: "Pod", + plural: "pods", + singular: "pod", + }, +}; + +impl Spec for PodSpec { + + type Status = PodStatus; + + fn metadata() -> &'static Crd { + &POD_API + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PodSpec { + pub volumes: Option>, + pub containers: Vec, + pub restart_policy: Option, // TODO; should be enum + pub service_account_name: Option, + pub service_account: Option, + pub node_name: Option, + pub termination_grace_period_seconds: Option, + pub dns_policy: Option, + pub security_context: Option, + pub scheduler_name: Option +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PodSecurityContext { + pub fs_group: Option, + pub run_as_group: Option, + pub run_as_non_root: Option, + pub run_as_user: Option +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ContainerSpec { + pub name: String, + pub ports: Vec, + pub image: Option, + pub image_pull_policy: Option, // TODO: should be enum + pub volume_mounts: Vec, + pub env: Option>, + pub resource: Option, + pub termination_mssage_path: Option, + pub termination_message_policy: Option, + pub tty: Option +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ResourceRequirements { + pub api_groups: Vec, + pub resource_names: Vec, + pub resources: Vec, + pub verbs: Vec +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ContainerPortSpec { + pub container_port: u16, + pub name: Option, + pub protocol: Option, // TODO: This should be enum +} + +impl ContainerPortSpec { + pub fn new>(container_port: u16, name: T) -> Self { + ContainerPortSpec { + container_port, + name: Some(name.into()), + protocol: None, + } + } +} + +impl From<&Endpoint> for ContainerPortSpec { + fn from(end_point: &Endpoint) -> Self { + ContainerPortSpec { + container_port: end_point.port, + ..Default::default() + } + } +} + + + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +pub struct VolumeSpec { + pub name: String, + pub secret: Option, + pub persistent_volume_claim: Option, +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct VolumeMount { + pub mount_path: String, + pub mount_propagation: Option, + pub name: String, + pub read_only: Option, + pub sub_path: Option, +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SecretVolumeSpec { + pub default_mode: u16, + pub secret_name: String, + pub optional: Option, +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PersistentVolumeClaimVolumeSource { + claim_name: String, + read_only: bool, +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PodStatus { + pub phase: String, + #[serde(rename = "hostIP")] + pub host_ip: String, + #[serde(rename = "podIP")] + pub pod_ip: String, + pub start_time: String, + pub container_statuses: Vec, +} + +impl Status for PodStatus{} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ContainerStatus { + pub name: String, + pub state: ContainerState, + pub ready: bool, + pub restart_count: u8, + pub image: String, + #[serde(rename = "imageID")] + pub image_id: String, + #[serde(rename = "containerID")] + pub container_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ContainerState { + pub running: ContainerStateRunning, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ContainerStateRunning { + pub started_at: String, +} + +// +// Test Cases +// +#[cfg(test)] +mod test { + + use k8_metadata::core::metadata::item_uri; + use k8_metadata::core::metadata::items_uri; + use k8_metadata::core::metadata::DEFAULT_NS; + use k8_metadata::topic::TopicSpec; + + #[test] + fn test_topic_item_uri() { + let uri = item_uri::("https://localhost", "test", DEFAULT_NS, None); + assert_eq!( + uri, + "https://localhost/apis/fluvio.infinyon.com/v1/namespaces/default/topics/test" + ); + } + + #[test] + fn test_topic_items_uri() { + let uri = items_uri::("https://localhost", DEFAULT_NS, None); + assert_eq!( + uri, + "https://localhost/apis/fluvio.infinyon.com/v1/namespaces/default/topics" + ); + } + + +} diff --git a/k8-client/src/secret.rs b/k8-client/src/secret.rs new file mode 100644 index 0000000000..3c6cddfa91 --- /dev/null +++ b/k8-client/src/secret.rs @@ -0,0 +1,38 @@ +use serde::Deserialize; +use serde::Serialize; + +use k8_metadata::core::Crd; +use k8_metadata::core::CrdNames; +use k8_metadata::core::Spec; +use k8_metadata::core::Status; + +// +// Secret Object +const SECRET_API: Crd = Crd { + group: "core", + version: "v1", + names: CrdNames { + kind: "Secret", + plural: "secrets", + singular: "secret", + }, +}; + +impl Spec for SecretSpec { + + type Status = SecretStatus; + + fn metadata() -> &'static Crd { + &SECRET_API + } +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SecretSpec {} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SecretStatus {} + +impl Status for SecretStatus{} diff --git a/k8-client/src/service.rs b/k8-client/src/service.rs new file mode 100644 index 0000000000..a82b958a5f --- /dev/null +++ b/k8-client/src/service.rs @@ -0,0 +1,105 @@ +use serde::Deserialize; +use serde::Serialize; +use std::collections::HashMap; + +use k8_metadata::core::Crd; +use k8_metadata::core::CrdNames; +use k8_metadata::core::Spec; +use k8_metadata::core::Status; +use k8_metadata::spu::Endpoint; + + +const SERVICE_API: Crd = Crd { + group: "core", + version: "v1", + names: CrdNames { + kind: "Service", + plural: "services", + singular: "service", + }, +}; + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ServiceSpec { + #[serde(rename = "clusterIP")] + pub cluster_ip: String, + #[serde(rename = "externalIPs")] + pub external_ips: Option>, + #[serde(rename = "loadBalancerIP")] + pub load_balancer_ip: Option, + pub r#type: Option, + pub external_name: Option, + pub external_traffic_policy: Option, + pub ports: Vec, + pub selector: Option>, +} + +impl Spec for ServiceSpec { + + type Status = ServiceStatus; + + fn metadata() -> &'static Crd { + &SERVICE_API + } + + + fn make_same(&mut self,other: &Self) { + if other.cluster_ip == "" { + self.cluster_ip = "".to_owned(); + } + } + +} + + +impl From<&Endpoint> for ServicePort { + fn from(end_point: &Endpoint) -> Self { + ServicePort { + port: end_point.port, + ..Default::default() + } + } +} + + + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ServicePort { + pub name: Option, + pub node_port: Option, + pub port: u16, + pub target_port: Option, +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ServiceStatus { + ingress: Option> +} + +impl Status for ServiceStatus{} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum ExternalTrafficPolicy { + Local, + Cluster +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum LoadBalancerType { + ExternalName, + ClusterIP, + NodePort, + LoadBalancer +} + + + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct LoadBalancerIngress { + pub hostname: Option, + pub ip: Option +} \ No newline at end of file diff --git a/k8-client/src/stateful.rs b/k8-client/src/stateful.rs new file mode 100644 index 0000000000..8d03b44b10 --- /dev/null +++ b/k8-client/src/stateful.rs @@ -0,0 +1,209 @@ +use serde::Deserialize; +use serde::Serialize; + +use k8_metadata::core::metadata::LabelSelector; +use k8_metadata::core::metadata::TemplateSpec; + +use crate::PodSpec; + + +use k8_metadata::core::Crd; +use k8_metadata::core::CrdNames; +use k8_metadata::core::Spec; +use k8_metadata::core::Status; + + +const STATEFUL_API: Crd = Crd { + group: "apps", + version: "v1", + names: CrdNames { + kind: "StatefulSet", + plural: "statefulsets", + singular: "statefulset", + }, +}; + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct StatefulSetSpec { + pub pod_management_policy: Option, + pub replicas: Option, + pub revision_history_limit: Option, + pub selector: LabelSelector, + pub service_name: String, + pub template: TemplateSpec, + pub volume_claim_templates: Vec>, + pub update_strategy: Option +} + +impl Spec for StatefulSetSpec { + + type Status = StatefulSetStatus; + + fn metadata() -> &'static Crd { + &STATEFUL_API + } +} + + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct StatefulSetUpdateStrategy { + pub _type: String, + pub rolling_ipdate: Option + +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct RollingUpdateStatefulSetStrategy { + partition: u32 +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum PodMangementPolicy { + OrderedReady, + Parallel, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PersistentVolumeClaim { + pub access_modes: Vec, + pub storage_class_name: String, + pub resources: ResourceRequirements, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum VolumeAccessMode { + ReadWriteOnce, + ReadWrite, + ReadOnlyMany, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct ResourceRequirements { + pub requests: VolumeRequest, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct VolumeRequest { + pub storage: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct StatefulSetStatus { + pub replicas: u16, + pub collision_count: Option, + pub conditions: Option>, + pub current_replicas: Option, + pub current_revision: Option, + pub observed_generation: Option, + pub ready_replicas: Option, + pub update_revision: Option, + pub updated_replicas: Option, +} + +impl Status for StatefulSetStatus{} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum StatusEnum { + True, + False, + Unknown, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct StatefulSetCondition { + pub message: String, + pub reason: StatusEnum, + pub _type: String, +} + + +/* +#[cfg(test)] +mod test { + + use serde_json; + use serde_json::json; + + use super::LabelSelector; + use super::StatefulSetSpec; + use k8_diff::Changes; + use k8_metadata::cluster::ClusterSpec; + use k8_metadata::cluster::Configuration; + use k8_metadata::cluster::Cluster; + use k8_metadata::cluster::ClusterEndpoint; + + #[test] + fn test_label_selector() { + let selector = LabelSelector::new_labels(vec![("app".to_owned(), "test".to_owned())]); + + let maps = selector.match_labels; + assert_eq!(maps.len(), 1); + assert_eq!(maps.get("app").unwrap(), "test"); + } + + #[test] + fn test_cluster_to_stateful() { + let cluster = ClusterSpec { + cluster: Cluster { + replicas: Some(3), + rack: Some("rack1".to_string()), + public_endpoint: Some(ClusterEndpoint::new(9005)), + private_endpoint: Some(ClusterEndpoint::new(9006)), + controller_endpoint: Some(ClusterEndpoint::new(9004)), + }, + configuration: Some(Configuration::default()), + env: None, + }; + + let stateful: StatefulSetSpec = (&cluster).into(); + assert_eq!(stateful.replicas, Some(3)); + let mut stateful2 = stateful.clone(); + stateful2.replicas = Some(2); + + let state1_json = serde_json::to_value(stateful).expect("json"); + let state2_json = serde_json::to_value(stateful2).expect("json"); + let diff = state1_json.diff(&state2_json).expect("diff"); + let json_diff = serde_json::to_value(diff).unwrap(); + assert_eq!( + json_diff, + json!({ + "replicas": 2 + }) + ); + } + + + /* + * TODO: make this as utility + use std::io::Read; + use std::fs::File; + use k8_metadata::core::metadata::ObjectMeta; + use k8_metadata::core::metadata::K8Obj; + use super::StatefulSetStatus; + use super::TemplateSpec; + use super::PodSpec; + use super::ContainerSpec; + use super::ContainerPortSpec; + + #[test] + fn test_decode_statefulset() { + let file_name = "/private/tmp/f1.json"; + + let mut f = File::open(file_name).expect("open failed"); + let mut contents = String::new(); + f.read_to_string(&mut contents).expect("read file"); + // let st: StatefulSetSpec = serde_json::from_slice(&buffer).expect("error"); + let st: K8Obj = serde_json::from_str(&contents).expect("error"); + println!("st: {:#?}",st); + assert!(true); + } + */ + +} +*/ \ No newline at end of file diff --git a/k8-client/src/wstream.rs b/k8-client/src/wstream.rs new file mode 100644 index 0000000000..745e9d3c61 --- /dev/null +++ b/k8-client/src/wstream.rs @@ -0,0 +1,138 @@ + +use std::pin::Pin; +use std::marker::Unpin; +use std::task::Context; +use std::task::Poll; + +use bytes::Bytes; +use futures::stream::Stream; +use hyper::body::Chunk; +use hyper::error::Error as HyperError; +use log::error; +use log::trace; +use pin_utils::unsafe_pinned; +use pin_utils::unsafe_unpinned; +use std::mem; + +type ChunkList = Vec, HyperError>>; + +pub struct WatchStream +where + S: Stream, +{ + stream: S, + last_buffer: Vec, + chunks: ChunkList, +} + +impl Unpin for WatchStream where S: Stream {} + +impl WatchStream +where + S: Stream>, +{ + unsafe_pinned!(stream: S); + unsafe_unpinned!(last_buffer: Vec); + unsafe_unpinned!(chunks: ChunkList); + + pub fn new(stream: S) -> Self { + WatchStream { + stream, + last_buffer: Vec::new(), + chunks: Vec::new(), + } + } +} + +const SEPARATOR: u8 = b'\n'; + +impl Stream for WatchStream +where + S: Stream>, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut chunks = mem::replace(self.as_mut().chunks(), Vec::new()); + let last_buffer = mem::replace(self.as_mut().last_buffer(), Vec::new()); + let mut buf: Bytes = last_buffer.into(); + trace!( + "entering poll next with prev buf: {}, chunk: {}", + buf.len(), + chunks.len() + ); + + loop { + trace!( + "polling chunk with prev buf len: {}, chunk: {}", + buf.len(), + chunks.len() + ); + let poll_result = self.as_mut().stream().poll_next(cx); + match poll_result { + Poll::Pending => { + trace!( + "stream is pending. returning pending. saving buf len: {}", + buf.len() + ); + if buf.len() > 0 { + mem::replace(self.as_mut().last_buffer(), buf.to_vec()); + } + if chunks.len() > 0 { + mem::replace(self.as_mut().chunks(), chunks); + } + return Poll::Pending; + } + Poll::Ready(next_item) => match next_item { + None => { + trace!("none from stream. ready to return items"); + return Poll::Ready(None); + } + Some(chunk_result) => match chunk_result { + Ok(chunk) => { + trace!("chunk: {}", String::from_utf8_lossy(&chunk).to_string()); + buf.extend_from_slice(&chunk); + trace!( + "parsing chunk with len: {} accum buffer: {}", + chunk.len(), + buf.len() + ); + loop { + if let Some(i) = buf.iter().position(|&c| c == SEPARATOR) { + trace!("found separator at: {}", i); + let head = buf.slice(0, i); + buf = buf.slice(i + 1, buf.len()); + + chunks.push(Ok(head.to_vec())); + } else { + trace!("no separator found"); + break; + } + } + if buf.len() > 0 { + trace!( + "remainder chars count: {}. they will be added to accum buffer", + buf.len() + ); + trace!( + "current buf: {}", + String::from_utf8_lossy(&buf).to_string() + ); + } else { + trace!( + "end of loop buf is empty. returning {} chunks", + chunks.len() + ); + return Poll::Ready(Some(Ok(chunks))); + } + } + Err(err) => { + error!("error: {}", err); + return Poll::Ready(Some(Err(err.into()))); + } + }, + }, + } + } + } +} diff --git a/k8-client/tests/service.rs b/k8-client/tests/service.rs new file mode 100644 index 0000000000..40fd1c9be8 --- /dev/null +++ b/k8-client/tests/service.rs @@ -0,0 +1,76 @@ +#[cfg(feature = "k8")] +#[cfg(not(feature = "k8_stream"))] +mod integratino_tests { + + use lazy_static::lazy_static; + use log::debug; + use rand::distributions::Alphanumeric; + use rand::{thread_rng, Rng}; + use std::collections::HashMap; + + use future_helper::test_async; + use k8_client::fixture::TEST_NS; + use k8_client::ClientError; + use k8_client::InputK8Obj; + use k8_client::InputMetadata; + use k8_client::K8Client; + use k8_client::ServicePort; + use k8_client::ServiceSpec; + use k8_client::ServiceStatus; + use k8_metadata::core::Spec; + + use types::defaults::SPU_DEFAULT_NAME; + + // way to get static lifetime which is requirmeent for cluster + lazy_static! { + static ref K8CLIENT: K8Client = K8Client::new(None).expect("cluster not intialized"); + } + + fn new_service() -> InputK8Obj { + let mut rng = thread_rng(); + let rname: String = rng.sample_iter(&Alphanumeric).take(5).collect(); + let name = format!("test{}", rname); + + let mut labels = HashMap::new(); + labels.insert("app".to_owned(), SPU_DEFAULT_NAME.to_owned()); + let mut selector = HashMap::new(); + selector.insert("app".to_owned(), SPU_DEFAULT_NAME.to_owned()); + + let service_spec = ServiceSpec { + cluster_ip: "None".to_owned(), + ports: vec![ServicePort { + port: 9092, + ..Default::default() + }], + selector: Some(selector), + ..Default::default() + }; + + let new_item: InputK8Obj = InputK8Obj { + api_version: ServiceSpec::api_version(), + kind: ServiceSpec::kind(), + metadata: InputMetadata { + name: name.to_lowercase(), + labels: Some(labels), + namespace: TEST_NS.to_string(), + ..Default::default() + }, + spec: Some(service_spec), + status: None, + }; + + new_item + } + + #[test_async] + async fn test_client_create_and_delete_service() -> Result<(), ClientError> { + let new_item = new_service(); + debug!("item: {:#?}", &new_item); + let item = K8CLIENT.create_item::(&new_item).await?; + debug!("deleting: {:#?}", item); + K8CLIENT.delete_item::(&new_item.metadata).await?; + assert!(true, "passed"); + Ok(()) + } + +} diff --git a/k8-client/tests/stateful.rs b/k8-client/tests/stateful.rs new file mode 100644 index 0000000000..068a09a389 --- /dev/null +++ b/k8-client/tests/stateful.rs @@ -0,0 +1,27 @@ +#[cfg(feature = "k8")] +#[cfg(not(feature ="k8_stream"))] +mod integratino_tests { + + use lazy_static::lazy_static; + + use k8_client::K8Client; + use k8_client::StatefulSetSpec; + use k8_client::StatefulSetStatus; + use k8_client::ClientError; + use future_helper::test_async; + use k8_client::fixture::TEST_NS; + + // way to get static lifetime which is requirement for cluster + lazy_static! { + static ref K8CLIENT: K8Client = K8Client::new(None).expect("cluster not intialized"); + } + + // this assume we have at least one statefulset + #[test_async] + async fn test_client_get_statefulset() -> Result<(),ClientError> { + + + K8CLIENT.retrieve_items::(TEST_NS).await?; + Ok(()) as Result<(),ClientError> + } +} diff --git a/k8-client/tests/topic-stream.rs b/k8-client/tests/topic-stream.rs new file mode 100644 index 0000000000..9c23ca581c --- /dev/null +++ b/k8-client/tests/topic-stream.rs @@ -0,0 +1,58 @@ +#[cfg(feature = "k8_stream")] +mod integratino_tests { + + use futures::stream::StreamExt; + use lazy_static::lazy_static; + use pin_utils::pin_mut; + + use future_helper::test_async; + use k8_client::fixture::TEST_NS; + use k8_client::ClientError; + use k8_client::K8Client; + use k8_metadata::topic::TopicSpec; + + // way to get static lifetime which is requirement for cluster + lazy_static! { + static ref K8CLIENT: K8Client = K8Client::new(None).expect("cluster not intialized"); + } + + // print first 10 topics of topic stream, this should be only run as part of indiv test + #[test_async] + async fn test_client_print_stream() -> Result<(), ClientError> { + let stream = K8CLIENT.watch_stream_now::(TEST_NS.to_owned()); + pin_mut!(stream); + let mut count = 0; + let mut end = false; + while count < 10 && !end { + match stream.next().await { + Some(topic) => { + count = count + 1; + println!("topic: {:#?}", topic); + } + _ => { + end = true; + } + } + } + Ok(()) + } + + #[test_async] + async fn test_client_stream_topics() -> Result<(), ClientError> { + let stream = K8CLIENT.watch_stream_since::(TEST_NS, None); + pin_mut!(stream); + let result = stream.next().await; + match result { + Some(topic_result) => { + let topics = topic_result.expect("topics"); + assert!(topics.len() > 0, "there should be at least 1 topic"); + } + None => { + assert!(false, "there should be at least 1 topics"); + } + } + + Ok(()) + } + +} diff --git a/k8-client/tests/topic.rs b/k8-client/tests/topic.rs new file mode 100644 index 0000000000..a9525823ed --- /dev/null +++ b/k8-client/tests/topic.rs @@ -0,0 +1,185 @@ +#[cfg(feature = "k8")] +#[cfg(not(feature = "k8_stream"))] +mod integratino_tests { + + use lazy_static::lazy_static; + use log::debug; + use rand::distributions::Alphanumeric; + use rand::{thread_rng, Rng}; + use serde_json; + use serde_json::json; + use std::collections::BTreeMap; + + use future_helper::test_async; + use k8_client::fixture::TEST_NS; + use k8_client::ApplyResult; + use k8_client::ClientError; + use k8_client::InputK8Obj; + use k8_client::InputMetadata; + use k8_client::K8Client; + use k8_client::K8Obj; + use k8_client::{PodSpec, PodStatus}; + use k8_metadata::cluster::{ClusterSpec, ClusterStatus}; + use k8_metadata::core::Spec; + use k8_metadata::topic::{TopicSpec, TopicStatus, TopicStatusResolution}; + + // way to get static lifetime which is requirement for cluster + lazy_static! { + static ref K8CLIENT: K8Client = K8Client::new(None).expect("cluster not intialized"); + } + + fn new_topic() -> InputK8Obj { + let mut rng = thread_rng(); + let rname: String = rng.sample_iter(&Alphanumeric).take(5).collect(); + let name = format!("test{}", rname); + debug!("create topic with name: {}", &name); + let topic_spec = TopicSpec { + partitions: 2, + replication_factor: 5, + ignore_rack_assignment: Some(true), + }; + + let new_item: InputK8Obj = InputK8Obj { + api_version: TopicSpec::api_version(), + kind: TopicSpec::kind(), + metadata: InputMetadata { + name: name.to_lowercase(), + namespace: TEST_NS.to_string(), + ..Default::default() + }, + spec: Some(topic_spec), + status: None, + }; + + new_item + } + + #[test_async] + async fn test_client_get_topic() -> Result<(), ClientError> { + let topic: K8Obj = + K8CLIENT.retrieve_item(InputMetadata::named("test", TEST_NS)).await?; + assert_eq!(topic.kind.unwrap(), "Topic"); + assert_eq!(topic.metadata.name.unwrap(), "test"); + assert_eq!(topic.spec.as_ref().unwrap().partitions, 1); + + Ok(()) + } + + #[test_async] + async fn test_client_get_topics() -> Result<(), ClientError> { + let topics = K8CLIENT.retrieve_items::(TEST_NS).await?; + assert!(topics.items.len() > 0); + assert_eq!(topics.kind, "TopicList"); + let topic = &topics.items[0]; + assert_eq!(topic.kind.as_ref().unwrap(), "Topic"); + assert_eq!(topic.metadata.name, Some("test".to_owned())); + assert_eq!(topic.spec.as_ref().unwrap().partitions, 1); + Ok(()) + } + + #[test_async] + async fn test_client_create_and_delete_topic() -> Result<(), ClientError> { + let new_item = new_topic(); + K8CLIENT.create_item::(&new_item).await?; + K8CLIENT.delete_item::(&new_item.metadata).await?; + Ok(()) + } + + #[test_async] + async fn test_client_create_update_topic_status() -> Result<(), ClientError> { + let new_item = new_topic(); + let mut topic = K8CLIENT.create_item::(&new_item).await?; + + // assign topic status + let mut par: BTreeMap> = BTreeMap::new(); + par.insert(0, vec![0, 1]); + let status = TopicStatus { + resolution: TopicStatusResolution::Ok, + replica_map: Some(par), + target_replica_map: None, + reason: None, + }; + topic.status = Some(status); + + let topic = K8CLIENT.update_status(&topic).await?; + let status = topic.status.unwrap(); + assert_eq!(status.replica_map.unwrap().get(&0).unwrap().len(), 2); + + K8CLIENT.delete_item::(&new_item.metadata).await?; + + Ok(()) + } + + #[test_async] + async fn test_client_get_pods() -> Result<(), ClientError> { + K8CLIENT.retrieve_items::(TEST_NS).await?; + Ok(()) + } + + #[test_async] + async fn test_client_get_clusters() -> Result<(), ClientError> { + let clusters = K8CLIENT.retrieve_items::(TEST_NS).await?; + assert!( + clusters.items.len() > 0, + "at least 1 cluster should be there" + ); + Ok(()) + } + + #[test_async] + async fn test_client_patch_topic() -> Result<(), ClientError> { + let new_item = new_topic(); + let _ = K8CLIENT.create_item(&new_item).await?; + + let patch = json!({ + "spec": { + "partitions": 5 + } + }); + + K8CLIENT.patch::(&new_item.metadata, &patch).await?; + + let item: K8Obj = + K8CLIENT.retrieve_item(&new_item.metadata).await?; + let spec = item.spec.unwrap(); + assert_eq!(spec.partitions, 5); + + K8CLIENT.delete_item::(&new_item.metadata).await?; + + Ok(()) + } + + #[test_async] + async fn test_client_apply_topic() -> Result<(), ClientError> { + let mut new_item = new_topic(); + let status = K8CLIENT.apply(&new_item).await?; + match status { + ApplyResult::Created(_) => { + assert!(true, "created"); + // check to ensure item exists + let _item: K8Obj = + K8CLIENT.retrieve_item(&new_item.metadata).await?; + let spec = new_item.spec.as_mut().unwrap(); + spec.partitions = 5; + let status = K8CLIENT.apply(&new_item).await?; + match status { + ApplyResult::None => assert!(false, "no change"), + ApplyResult::Created(_) => assert!(false, "created, should not happen"), + ApplyResult::Patched(_) => { + let patch_item: K8Obj = + K8CLIENT.retrieve_item(&new_item.metadata).await?; + assert_eq!(patch_item.spec.unwrap().partitions, 5); + } + } + + K8CLIENT.delete_item::(&new_item.metadata).await?; + } + _ => { + assert!(false, "expected created"); + } + } + + Ok(()) + } + +} diff --git a/k8-config/Cargo.toml b/k8-config/Cargo.toml new file mode 100644 index 0000000000..288020d4a7 --- /dev/null +++ b/k8-config/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "k8-config" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +dirs = "2.0.2" +serde = { version ="1.0.98", features = ['derive'] } +serde_yaml = "0.8.9" + diff --git a/k8-config/README.md b/k8-config/README.md new file mode 100644 index 0000000000..4fd07b2d88 --- /dev/null +++ b/k8-config/README.md @@ -0,0 +1,3 @@ +# Kubernetes Configuration Utility + +This crate is used to read K8 Configuration YML in the home directory \ No newline at end of file diff --git a/k8-config/data/k8config.yaml b/k8-config/data/k8config.yaml new file mode 100644 index 0000000000..d8f39adb10 --- /dev/null +++ b/k8-config/data/k8config.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority: /Users/test/.minikube/ca.crt + server: https://192.168.0.0:8443 + name: minikube +contexts: +- context: + cluster: minikube + namespace: flv + user: minikube + name: flv +- context: + cluster: minikube + user: minikube + name: minikube +current-context: flv +kind: Config +preferences: {} +users: +- name: minikube + user: + client-certificate: /Users/test/.minikube/client.crt + client-key: /Users/test/.minikube/client.key diff --git a/k8-config/rust-toolchain b/k8-config/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/k8-config/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/k8-config/src/config.rs b/k8-config/src/config.rs new file mode 100644 index 0000000000..88b6a87ae1 --- /dev/null +++ b/k8-config/src/config.rs @@ -0,0 +1,147 @@ +use std::path::Path; +use std::fs::File; +use std::fs::read_to_string; +use std::io::Result as IoResult; + +use serde::Deserialize; +use dirs::home_dir; + +use crate::ConfigError; + +#[derive(Debug, PartialEq, Deserialize)] +pub struct Cluster { + pub name: String, + pub cluster: ClusterDetail, +} + + +#[derive(Debug, PartialEq, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct ClusterDetail { + pub insecure_skip_tls_verify: Option, + pub certificate_authority: String, + pub server: String, +} + +impl ClusterDetail { + pub fn ca(&self) -> IoResult { + read_to_string(&self.certificate_authority) + } +} + +#[derive(Debug, PartialEq, Deserialize)] +pub struct Context { + pub name: String, + pub context: ContextDetail, +} + + +#[derive(Debug, PartialEq, Deserialize)] +pub struct ContextDetail { + pub cluster: String, + pub user: String, + pub namespace: Option +} + +impl ContextDetail { + pub fn namespace(&self) -> &str { + match &self.namespace { + Some(nm) => &nm, + None => "default" + } + } +} + + +#[derive(Debug, PartialEq, Deserialize)] +pub struct User { + pub name: String, + pub user: UserDetail +} + +#[derive(Debug, PartialEq, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct UserDetail { + pub client_certificate: String, + pub client_key: String +} + + +#[derive(Debug, PartialEq, Deserialize)] +#[serde(rename_all = "kebab-case")] +pub struct KubeConfig { + #[serde(rename = "apiVersion")] + pub api_version: String, + pub clusters: Vec, + pub contexts: Vec, + pub current_context: String, + pub kind: String, + pub users: Vec, +} + + +impl KubeConfig { + + /// read from default home directory + pub fn from_home() -> Result { + let home_dir = home_dir().unwrap(); + Self::from_file(home_dir.join(".kube").join("config")) + } + + pub fn from_file>(path: T) -> Result { + let file = File::open(path)?; + Ok(serde_yaml::from_reader(file)?) + } + + pub fn current_context(&self) -> Option<&Context> { + self.contexts.iter().find(|c| c.name == self.current_context) + } + + pub fn current_cluster(&self) -> Option<&Cluster> { + if let Some(ctx) = self.current_context() { + self.clusters.iter().find(|c| c.name == ctx.context.cluster) + } else { + None + } + } + + pub fn current_user(&self) -> Option<&User> { + if let Some(ctx) = self.current_context() { + self.users.iter().find(|c| c.name == ctx.context.user) + } else { + None + } + } + + +} + + + + +#[cfg(test)] +mod test { + + use super::KubeConfig; + + #[test] + fn test_decode_default_config() { + let config = KubeConfig::from_file("data/k8config.yaml").expect("read"); + assert_eq!(config.api_version,"v1"); + assert_eq!(config.kind,"Config"); + assert_eq!(config.current_context,"flv"); + assert_eq!(config.clusters.len(),1); + let cluster = &config.clusters[0].cluster; + assert_eq!(cluster.server,"https://192.168.0.0:8443"); + assert_eq!(cluster.certificate_authority,"/Users/test/.minikube/ca.crt"); + assert_eq!(config.contexts.len(),2); + let ctx = &config.contexts[0].context; + assert_eq!(ctx.cluster,"minikube"); + assert_eq!(ctx.namespace.as_ref().unwrap(),"flv"); + + let current_cluster = config.current_cluster().expect("current"); + assert_eq!(current_cluster.name,"minikube"); + + } +} + diff --git a/k8-config/src/error.rs b/k8-config/src/error.rs new file mode 100644 index 0000000000..9e0d3e5e82 --- /dev/null +++ b/k8-config/src/error.rs @@ -0,0 +1,34 @@ +use std::fmt; +use std::io::Error as StdIoError; + +use serde_yaml::Error as SerdYamlError; + + +#[derive(Debug)] +pub enum ConfigError { + IoError(StdIoError), + SerdeError(SerdYamlError), + NoCurrentContext +} + +impl fmt::Display for ConfigError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::IoError(err) => write!(f, "{}", err), + Self::SerdeError(err) => write!(f,"{}",err), + Self::NoCurrentContext => write!(f,"no current context") + } + } +} + +impl From for ConfigError { + fn from(error: StdIoError) -> Self { + Self::IoError(error) + } +} + +impl From for ConfigError { + fn from(error: SerdYamlError) -> Self { + Self::SerdeError(error) + } +} diff --git a/k8-config/src/lib.rs b/k8-config/src/lib.rs new file mode 100644 index 0000000000..207820f37d --- /dev/null +++ b/k8-config/src/lib.rs @@ -0,0 +1,71 @@ + +mod config; +mod error; +mod pod; + +pub use error::ConfigError; +pub use config::KubeConfig; +pub use pod::PodConfig; + +use log::info; + +#[derive(Debug)] +pub struct KubeContext { + pub namespace: String, + pub api_path: String, + pub config: KubeConfig +} + +#[derive(Debug)] +pub enum K8Config { + Pod(PodConfig), + KubeConfig(KubeContext) +} + +impl Default for K8Config { + fn default() -> Self { + Self::Pod(PodConfig::default()) + } +} + +impl K8Config { + pub fn load() -> Result { + if let Some(pod_config) = PodConfig::load() { + info!("found pod config: {:#?}",pod_config); + Ok(K8Config::Pod(pod_config)) + } else { + info!("no pod config is found. trying to read kubeconfig"); + let config = KubeConfig::from_home()?; + info!("kube config: {:#?}",config); + // check if we have current cluster + + if let Some(current_cluster) = config.current_cluster() { + let ctx = config.current_context().unwrap(); + let k8contxt = KubeContext { + namespace: ctx.context.namespace().to_owned(), + api_path: current_cluster.cluster.server.clone(), + config + }; + Ok(K8Config::KubeConfig(k8contxt)) + } else { + Err(ConfigError::NoCurrentContext) + } + + } + } + + pub fn api_path(&self) -> &str { + match self { + Self::Pod(pod) => pod.api_path(), + Self::KubeConfig(config) => &config.api_path + } + } + + pub fn namespace(&self) -> &str { + match self { + Self::Pod(pod) => &pod.namespace, + Self::KubeConfig(config) => &config.namespace + } + } + +} \ No newline at end of file diff --git a/k8-config/src/pod.rs b/k8-config/src/pod.rs new file mode 100644 index 0000000000..2fb9227d4b --- /dev/null +++ b/k8-config/src/pod.rs @@ -0,0 +1,60 @@ +use std::path::Path; +use std::fs::read_to_string; + +use log::debug; +use log::trace; +use log::error; + +const BASE_DIR: &'static str = "/var/run/secrets/kubernetes.io/serviceaccount"; +const API_SERVER: &'static str = "https://kubernetes.default.svc"; + +///var/run/secrets/kubernetes.io/serviceaccount + +/// Configuration as Pod +#[derive(Debug, Default, Clone)] +pub struct PodConfig { + pub ca: String, + pub namespace: String, + pub token: String, +} + +impl PodConfig { + pub fn load() -> Option { + // first try to see if this base dir account exists, otherwise return non + let path = Path::new(BASE_DIR); + if !path.exists() { + debug!( + "pod config dir: {} is not founded, skipping pod config", + BASE_DIR + ); + return None; + } + + let ca = read_file("ca.crt")?; + let namespace = read_file("namespace")?; + let token = read_file("token")?; + + Some(Self { + ca, + namespace, + token, + }) + } + + pub fn api_path(&self) -> &'static str { + API_SERVER + } +} + +// read file +fn read_file(name: &str) -> Option { + let full_path = format!("{}/{}", BASE_DIR, name); + match read_to_string(&full_path) { + Ok(value) => Some(value), + Err(err) => { + error!("no {} founded as pod in {}", name,full_path); + trace!("unable to read pod: {} value: {}", name, err); + None + } + } +} diff --git a/k8-diff/Cargo.toml b/k8-diff/Cargo.toml new file mode 100644 index 0000000000..01a638373e --- /dev/null +++ b/k8-diff/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "k8-diff" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +serde = "1.0.98" +serde_json = "1.0.40" diff --git a/k8-diff/k8-dderive/Cargo.toml b/k8-diff/k8-dderive/Cargo.toml new file mode 100644 index 0000000000..6d58cbdd45 --- /dev/null +++ b/k8-diff/k8-dderive/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "k8-dderive" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + +[lib] +proc-macro = true + +[dependencies] +proc-macro2 = "0.4.24" +quote = "0.6.10" +syn = "0.15.21" +log = "0.4.6" diff --git a/k8-diff/k8-dderive/rust-toolchain b/k8-diff/k8-dderive/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/k8-diff/k8-dderive/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/k8-diff/k8-dderive/src/diff.rs b/k8-diff/k8-dderive/src/diff.rs new file mode 100644 index 0000000000..2d529127e7 --- /dev/null +++ b/k8-diff/k8-dderive/src/diff.rs @@ -0,0 +1,61 @@ +use quote::quote; +use proc_macro2::TokenStream; +use syn::Data; +use syn::DeriveInput; +use syn::Fields; + + + +pub fn geneate_diff_trait(input: &DeriveInput) -> TokenStream { + let name = &input.ident; + let decoded_field_tokens = decode_fields(&input.data); + + quote! { + + impl <'a>k8_diff::Changes<'a> for #name { + + fn diff(&self, new: &'a Self) -> k8_diff::Diff { + + let mut s_diff = k8_diff::DiffStruct::new(); + + #decoded_field_tokens + + if s_diff.no_change() { + return k8_diff::Diff::None + } + + k8_diff::Diff::Change(k8_diff::DiffValue::Struct(s_diff)) + } + } + + } +} + +fn decode_fields(data: &Data) -> TokenStream { + match *data { + Data::Struct(ref data) => { + match data.fields { + Fields::Named(ref fields) => { + let recurse = fields.named.iter().map(|f| { + let fname = &f.ident; + + quote! { + // s_diff.insert("replicas".to_owned(), self.replicas.diff(&new.replicas)); + s_diff.insert(stringify!(#fname).to_owned(), self.#fname.diff(&new.#fname)); + + } + + }); + + quote! { + #(#recurse)* + } + } + _ => unimplemented!(), + } + } + _ => unimplemented!(), + } +} + + diff --git a/k8-diff/k8-dderive/src/lib.rs b/k8-diff/k8-dderive/src/lib.rs new file mode 100644 index 0000000000..8b4287b143 --- /dev/null +++ b/k8-diff/k8-dderive/src/lib.rs @@ -0,0 +1,20 @@ +#![recursion_limit="128"] +extern crate proc_macro; + +mod diff; + +use proc_macro::TokenStream as TokenStream1; +use syn::DeriveInput; + + +#[proc_macro_derive(Difference)] +pub fn diff(input: TokenStream1) -> TokenStream1 { + + // Parse the string representation + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = diff::geneate_diff_trait(&ast); + expanded.into() +} + + diff --git a/k8-diff/src/json/diff.rs b/k8-diff/src/json/diff.rs new file mode 100644 index 0000000000..fa3eba058b --- /dev/null +++ b/k8-diff/src/json/diff.rs @@ -0,0 +1,79 @@ +use serde_json::Value; + + +use crate::Changes; +use crate::Diff; +use crate::DiffError; +use super::PatchObject; +use super::JsonDiff; + +impl Changes for Value { + + type Replace = Value; + type Patch = PatchObject; + + fn diff(&self, new: &Self) -> Result { + if *self == *new { + return Ok(Diff::None); + } + match self { + Value::Null => Ok(Diff::Replace(new.clone())), + _ => { + match new { + Value::Null => Ok(Diff::Delete), + Value::Bool(ref _val) => Ok(Diff::Replace(new.clone())), // for now, we only support replace + Value::Number(ref _val) => Ok(Diff::Replace(new.clone())), + Value::String(ref _val) => Ok(Diff::Replace(new.clone())), + Value::Array(ref _val) => Ok(Diff::Replace(new.clone())), + Value::Object(ref new_val) => match self { + Value::Object(ref old_val) => { + let patch = PatchObject::diff(old_val, new_val)?; + Ok(Diff::Patch(patch)) + } + _ => Err(DiffError::DiffValue), + }, + } + } + } + } +} + +#[cfg(test)] +mod test { + + use serde_json::json; + use serde_json::Value; + + use super::Changes; + + #[test] + fn test_null_comparision() { + let n1 = Value::Null; + let str1 = Value::String("test".to_owned()); + let str2 = Value::String("test".to_owned()); + + assert!(n1.diff(&str1).expect("diff").is_replace()); + assert!(str1.diff(&str2).expect("diff").is_none()); + } + + #[test] + fn test_object_comparision() { + let old_spec = json!({ + "replicas": 2, + "apple": 5 + }); + let new_spec = json!({ + "replicas": 3, + "apple": 5 + }); + + let diff = old_spec.diff(&new_spec).expect("diff"); + assert!(diff.is_patch()); + let patch = diff.as_patch_ref().get_inner_ref(); + assert_eq!(patch.len(), 1); + let diff_replicas = patch.get("replicas").unwrap(); + assert!(diff_replicas.is_replace()); + assert_eq!(*diff_replicas.as_replace_ref(), 3); + } + +} diff --git a/k8-diff/src/json/mod.rs b/k8-diff/src/json/mod.rs new file mode 100644 index 0000000000..ea962e4c57 --- /dev/null +++ b/k8-diff/src/json/mod.rs @@ -0,0 +1,43 @@ +mod diff; +mod se; + +use serde_json::Map; +use serde_json::Value; +use std::collections::HashMap; + +use crate::Changes; +use crate::Diff; +use crate::DiffError; + +type SerdeObj = Map; +pub type JsonDiff = Diff; + +#[derive(Debug)] +pub struct PatchObject(HashMap); + +impl PatchObject { + // diff { "a": 1,"b": 2}, { "a": 3, "b": 2} => { "a": 1} + fn diff(old: &SerdeObj, new: &SerdeObj) -> Result { + let mut map: HashMap = HashMap::new(); + + for (key, new_val) in new.iter() { + match old.get(key) { + Some(old_val) => { + if old_val != new_val { + let diff_value = old_val.diff(new_val)?; + map.insert(key.clone(), diff_value); + } + } + _ => { + map.insert(key.clone(), Diff::Replace(new_val.clone())); // just replace with new if key doesn't match + } + } + } + + Ok(PatchObject(map)) + } + + fn get_inner_ref(&self) -> &HashMap { + &self.0 + } +} diff --git a/k8-diff/src/json/se.rs b/k8-diff/src/json/se.rs new file mode 100644 index 0000000000..45b981a54d --- /dev/null +++ b/k8-diff/src/json/se.rs @@ -0,0 +1,104 @@ +use serde::Serialize; +use serde_json::Value; + +use super::PatchObject; +use crate::Diff; + +impl Serialize for PatchObject { + fn serialize(&self, serializer: S) -> Result + where + S: ::serde::Serializer, + { + let diff_maps = self.get_inner_ref(); + use serde::ser::SerializeMap; + let mut map = serializer.serialize_map(Some(diff_maps.len()))?; + for (key, val) in diff_maps { + match val { + Diff::None => {} + Diff::Delete => {} + Diff::Patch(ref v) => { + map.serialize_entry(key, v)? + }, + Diff::Replace(ref v) => { + map.serialize_entry(key, v)?; + } + Diff::Merge(ref v ) => { + map.serialize_entry(key, v)?; + } + } + } + + map.end() + } +} + +impl Serialize for Diff { + fn serialize(&self, serializer: S) -> Result + where + S: ::serde::Serializer, + { + match self { + Diff::None => serializer.serialize_unit(), + Diff::Delete => serializer.serialize_unit(), + Diff::Patch(ref p) => p.serialize(serializer), + Diff::Replace(ref v) => v.serialize(serializer), + Diff::Merge(ref v) => v.serialize(serializer), + } + } +} + +#[cfg(test)] +mod test { + + use serde_json::json; + + use crate::Changes; + + #[test] + fn test_patch_to_simple() { + let old_spec = json!({ + "replicas": 2, + "apple": 5 + }); + let new_spec = json!({ + "replicas": 3, + "apple": 5 + }); + + let diff = old_spec.diff(&new_spec).expect("diff"); + assert!(diff.is_patch()); + + let expected = json!({ + "replicas": 3 + }); + let json_diff = serde_json::to_value(diff).unwrap(); + assert_eq!(json_diff, expected); + } + + #[test] + fn test_patch_to_hierarchy() { + let old_spec = json!({ + "spec": { + "replicas": 2, + "apple": 5 + } + }); + let new_spec = json!({ + "spec": { + "replicas": 3, + "apple": 5 + } + }); + + let diff = old_spec.diff(&new_spec).expect("diff"); + assert!(diff.is_patch()); + println!("final diff: {:#?}", diff); + let expected = json!({ + "spec": { + "replicas": 3 + }}); + let json_diff = serde_json::to_value(diff).unwrap(); + assert_eq!(json_diff, expected); + } + +} diff --git a/k8-diff/src/lib.rs b/k8-diff/src/lib.rs new file mode 100644 index 0000000000..d2acb9f45f --- /dev/null +++ b/k8-diff/src/lib.rs @@ -0,0 +1,76 @@ +#![feature(nll)] + +mod json; + +pub trait Changes { + type Replace; + type Patch; + + fn diff(&self, new: &Self) -> Result, DiffError>; +} + +#[derive(Debug)] +pub enum DiffError { + DiffValue, // json values are different +} + +// use Option as inspiration +#[derive(Debug)] +pub enum Diff { + None, + Delete, + Patch(P), // for non primitive type + Replace(R), // can be used for map and list (with our without tag), works on ordered list + Merge(R), // need tag, works on unorderd list +} + +impl Diff { + pub fn is_none(&self) -> bool { + match self { + Diff::None => true, + _ => false, + } + } + + pub fn is_delete(&self) -> bool { + match self { + Diff::Delete => true, + _ => false, + } + } + + pub fn is_replace(&self) -> bool { + match self { + Diff::Replace(_) => true, + _ => false, + } + } + + pub fn is_patch(&self) -> bool { + match self { + Diff::Patch(_) => true, + _ => false, + } + } + + pub fn is_merge(&self) -> bool { + match self { + Diff::Replace(_) => true, + _ => false, + } + } + + pub fn as_replace_ref(&self) -> &R { + match self { + Diff::Replace(ref val) => val, + _ => panic!("no change value"), + } + } + + pub fn as_patch_ref(&self) -> &P { + match self { + Diff::Patch(ref val) => val, + _ => panic!("no change value"), + } + } +} diff --git a/k8-metadata/Cargo.toml b/k8-metadata/Cargo.toml new file mode 100644 index 0000000000..7f7ee57832 --- /dev/null +++ b/k8-metadata/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "k8-metadata" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[lib] +name = "k8_metadata" + +[dependencies] +log = "0.4.6" +metadata-core = { path = "metadata-core"} +metadata-auth-token = { path = "metadata-auth-token"} +metadata-topic = { path = "metadata-topic"} +metadata-spu = { path = "metadata-spu"} +metadata-partition = { path = "metadata-partition"} +metadata-spg = { path = "metadata-spg"} \ No newline at end of file diff --git a/k8-metadata/metadata-auth-token/Cargo.toml b/k8-metadata/metadata-auth-token/Cargo.toml new file mode 100644 index 0000000000..45e49d1d1c --- /dev/null +++ b/k8-metadata/metadata-auth-token/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "metadata-auth-token" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.27" +metadata-core = { path = "../metadata-core"} \ No newline at end of file diff --git a/k8-metadata/metadata-auth-token/src/lib.rs b/k8-metadata/metadata-auth-token/src/lib.rs new file mode 100644 index 0000000000..f4ab9b7ed2 --- /dev/null +++ b/k8-metadata/metadata-auth-token/src/lib.rs @@ -0,0 +1,22 @@ +mod spec; +mod status; + +pub use self::spec::AuthTokenSpec; +pub use self::spec::TokenType; +pub use self::status::AuthTokenStatus; +pub use self::status::TokenResolution; + +use metadata_core::Crd; +use metadata_core::CrdNames; +use metadata_core::GROUP; +use metadata_core::V1; + +const AUTH_TOKEN_API: Crd = Crd { + group: GROUP, + version: V1, + names: CrdNames { + kind: "AuthToken", + plural: "auth-tokens", + singular: "auth-token", + }, +}; diff --git a/k8-metadata/metadata-auth-token/src/spec.rs b/k8-metadata/metadata-auth-token/src/spec.rs new file mode 100644 index 0000000000..e895b8fa9f --- /dev/null +++ b/k8-metadata/metadata-auth-token/src/spec.rs @@ -0,0 +1,59 @@ +//! +//! # AuthToken Spec +//! +//! Interface to the AuthToken spec in K8 key value store +//! + +use crate::AUTH_TOKEN_API; +use metadata_core::Crd; +use metadata_core::Spec; + +use serde::Deserialize; +use serde::Serialize; + +use super::AuthTokenStatus; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +impl Spec for AuthTokenSpec { + + type Status = AuthTokenStatus; + + fn metadata() -> &'static Crd { + &AUTH_TOKEN_API + } +} + +// TODO: add refresh secret + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct AuthTokenSpec { + pub token_type: TokenType, + pub min_spu: i32, + pub max_spu: i32, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum TokenType { + Any, + Custom, + Managed, +} + +// ----------------------------------- +// Implementation - AuthTokenSpec +// ----------------------------------- + +impl AuthTokenSpec { + pub fn new(token_type: TokenType, min_spu: i32, max_spu: i32) -> Self { + AuthTokenSpec { + token_type, + min_spu, + max_spu, + } + } +} + diff --git a/k8-metadata/metadata-auth-token/src/status.rs b/k8-metadata/metadata-auth-token/src/status.rs new file mode 100644 index 0000000000..709e6f631c --- /dev/null +++ b/k8-metadata/metadata-auth-token/src/status.rs @@ -0,0 +1,31 @@ +//! +//! # AuthToken Status +//! +//! Interface to the AuthToken metadata status in K8 key value store +//! +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Status; + + + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct AuthTokenStatus { + pub resolution: TokenResolution, + pub reason: String, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum TokenResolution { + Ok, // operational + Init, // initializing + Invalid, // inactive +} + +impl Status for AuthTokenStatus{} diff --git a/k8-metadata/metadata-core/Cargo.toml b/k8-metadata/metadata-core/Cargo.toml new file mode 100644 index 0000000000..b5427db768 --- /dev/null +++ b/k8-metadata/metadata-core/Cargo.toml @@ -0,0 +1,14 @@ +[package] +edition = "2018" +name = "metadata-core" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.5" +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.27" +serde_qs = "0.4.1" +hyper = "0.12.25" + diff --git a/k8-metadata/metadata-core/src/crd.rs b/k8-metadata/metadata-core/src/crd.rs new file mode 100644 index 0000000000..50bb490295 --- /dev/null +++ b/k8-metadata/metadata-core/src/crd.rs @@ -0,0 +1,21 @@ +//! +//! # CRD Definition +//! +//! Interface to the CRD header definition in K8 key value store +//! +#[derive(Debug)] +pub struct Crd { + pub group: &'static str, + pub version: &'static str, + pub names: CrdNames, +} + +#[derive(Debug)] +pub struct CrdNames { + pub kind: &'static str, + pub plural: &'static str, + pub singular: &'static str, +} + +pub const GROUP: &'static str = "fluvio.infinyon.com"; +pub const V1: &'static str = "v1"; diff --git a/k8-metadata/metadata-core/src/lib.rs b/k8-metadata/metadata-core/src/lib.rs new file mode 100644 index 0000000000..01d81b9448 --- /dev/null +++ b/k8-metadata/metadata-core/src/lib.rs @@ -0,0 +1,42 @@ +//! +//! # CRD traits +//! +//! Trait for CRD Spec/Status definition +//! +mod crd; +pub mod metadata; +pub mod options; + +pub use self::crd::Crd; +pub use self::crd::CrdNames; +pub use self::crd::GROUP; +pub use self::crd::V1; + +pub trait Status: Sized{} + +/// Kubernetes Spec +pub trait Spec: Sized { + + type Status: Status; + + /// return uri for single instance + fn metadata() -> &'static Crd; + + fn api_version() -> String { + let metadata = Self::metadata(); + if metadata.group == "core" { + return metadata.version.to_owned(); + } + format!("{}/{}", metadata.group, metadata.version) + } + + fn kind() -> String { + Self::metadata().names.kind.to_owned() + } + + /// in case of applying, we have some fields that are generated + /// or override. So need to special logic to reset them so we can do proper comparison + fn make_same(&mut self,_other: &Self) { + } + +} diff --git a/k8-metadata/metadata-core/src/metadata.rs b/k8-metadata/metadata-core/src/metadata.rs new file mode 100644 index 0000000000..11baa992b5 --- /dev/null +++ b/k8-metadata/metadata-core/src/metadata.rs @@ -0,0 +1,635 @@ +use std::collections::HashMap; +use std::collections::BTreeMap; +use std::marker::PhantomData; +use std::fmt; + +use hyper::Uri; +use serde::de::Deserializer; +use serde::Deserialize; +use serde::Serialize; + +use crate::options::prefix_uri; +use crate::options::ListOptions; +use crate::Spec; + +pub const DEFAULT_NS: &'static str = "default"; +pub const TYPE_OPAQUE: &'static str = "Opaque"; + +pub trait K8Meta where S: Spec { + + // return uri for given host name + fn item_uri(&self,host_name: &str) -> Uri; +} + +pub trait LabelProvider: Sized { + + + fn set_label_map(self, labels: HashMap) -> Self; + + /// helper for setting list of labels + fn set_labels>(self, labels: Vec<(T, T)>) -> Self { + let mut label_map = HashMap::new(); + for (key, value) in labels { + label_map.insert(key.into(), value.into()); + } + self.set_label_map(label_map) + } + +} + +/// metadata associated with object when returned +/// here name and namespace must be populated +#[derive(Deserialize, Serialize, PartialEq, Debug, Default, Clone)] +#[serde(rename_all = "camelCase",default)] +pub struct ObjectMeta { + // mandatory fields + pub name: String, + pub namespace: String, + pub uid: String, + pub self_link: String, + pub creation_timestamp: String, + pub generation: Option, + pub resource_version: String, + // optional + pub cluster_name: Option, + pub deletion_timestamp: Option, + pub deletion_grace_period_seconds: Option, + pub labels: HashMap, + pub owner_references: Vec, +} + +impl LabelProvider for ObjectMeta { + + fn set_label_map(mut self, labels: HashMap) -> Self { + self.labels = labels; + self + } +} + + +impl ObjectMeta { + + pub fn new(name: S,name_space: S) -> Self + where S: Into { + Self { + name: name.into(), + namespace: name_space.into(), + ..Default::default() + } + } + + /// provide builder pattern setter + pub fn set_labels>(mut self, labels: Vec<(T, T)>) -> Self { + let mut label_map = HashMap::new(); + for (key, value) in labels { + label_map.insert(key.into(), value.into()); + } + self.labels = label_map; + self + } + + /// create with name and default namespace + pub fn named(name: S) -> Self where S: Into{ + Self { + name: name.into(), + ..Default::default() + } + } + + /// create owner references point to this metadata + /// if name or uid doesn't exists return none + pub fn make_owner_reference(&self) -> OwnerReferences { + + OwnerReferences { + api_version: S::api_version(), + kind: S::kind(), + name: self.name.clone(), + uid: self.uid.clone(), + controller: Some(true), + ..Default::default() + } + + } + + pub fn namespace(&self) -> &str { + &self.namespace + } + + /// create child references that points to this + pub fn make_child_input_metadata(&self,childname: String) -> InputObjectMeta { + + let mut owner_refs: Vec = vec![]; + owner_refs.push(self.make_owner_reference::()); + + InputObjectMeta { + name: childname, + namespace: self.namespace().to_owned(), + owner_references: owner_refs, + ..Default::default() + } + + } + + + pub fn as_input(&self) -> InputObjectMeta { + + InputObjectMeta { + name: self.name.clone(), + namespace: self.namespace.clone(), + ..Default::default() + } + } + + pub fn as_item(&self) -> ItemMeta { + ItemMeta { + name: self.name.clone(), + namespace: self.namespace.clone(), + } + } + + pub fn as_update(&self) -> UpdateItemMeta { + UpdateItemMeta { + name: self.name.clone(), + namespace: self.namespace.clone(), + resource_version: self.resource_version.clone() + } + } +} + + + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct InputObjectMeta { + pub name: String, + pub labels: HashMap, + pub namespace: String, + pub owner_references: Vec, +} + +impl LabelProvider for InputObjectMeta { + + fn set_label_map(mut self, labels: HashMap) -> Self { + self.labels = labels; + self + } +} + + +impl fmt::Display for InputObjectMeta { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{}",self.name,self.namespace) + } +} + +impl K8Meta for InputObjectMeta where S: Spec { + + fn item_uri(&self,host_name: &str) -> Uri { + + item_uri::( + host_name, + &self.name, + &self.namespace, + None, + ) + } +} + + + +impl InputObjectMeta { + // shorthand to create just with name and metadata + pub fn named>(name: S, namespace: S) -> Self { + InputObjectMeta { + name: name.into(), + namespace: namespace.into(), + ..Default::default() + } + } +} + +impl From for InputObjectMeta { + fn from(meta: ObjectMeta) -> Self { + Self { + name: meta.name, + namespace: meta.namespace, + ..Default::default() + } + } +} + + +/// used for retrieving,updating and deleting item +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ItemMeta { + pub name: String, + pub namespace: String, +} + + +impl From for ItemMeta { + fn from(meta: ObjectMeta) -> Self { + Self { + name: meta.name, + namespace: meta.namespace + } + } +} + +/// used for updating item +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UpdateItemMeta { + pub name: String, + pub namespace: String, + pub resource_version: String, +} + + +impl From for UpdateItemMeta { + fn from(meta: ObjectMeta) -> Self { + Self { + name: meta.name, + namespace: meta.namespace, + resource_version: meta.resource_version + } + } +} + + + +#[derive(Deserialize, Serialize, Debug, Default, Clone, PartialEq)] +#[serde(rename_all = "camelCase")] +pub struct OwnerReferences { + pub api_version: String, + pub block_owner_deletion: Option, + pub controller: Option, + pub kind: String, + pub name: String, + pub uid: String, +} + +/// items uri +pub fn item_uri(host: &str, name: &str, namespace: &str, sub_resource: Option<&str>) -> Uri +where + S: Spec + Sized, +{ + let crd = S::metadata(); + let prefix = prefix_uri(crd, host, namespace, None); + let uri_value = format!("{}/{}{}", prefix, name, sub_resource.unwrap_or("")); + let uri: Uri = uri_value.parse().unwrap(); + uri +} + +/// items uri +pub fn items_uri(host: &str, namespace: &str, list_options: Option<&ListOptions>) -> Uri +where + S: Spec, +{ + let crd = S::metadata(); + let uri_value = prefix_uri(crd, host, namespace, list_options); + let uri: Uri = uri_value.parse().unwrap(); + uri +} + +#[derive(Deserialize, Debug, Eq, PartialEq, Clone)] +pub enum StatusEnum { + SUCCESS, + FAILURE, +} + +impl DeserializeWith for StatusEnum { + fn deserialize_with<'de, D>(de: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(de)?; + + match s.as_ref() { + "Success" => Ok(StatusEnum::SUCCESS), + "Failure" => Ok(StatusEnum::FAILURE), + _ => Err(serde::de::Error::custom( + "error trying to deserialize status type", + )), + } + } +} + +#[derive(Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct K8Status { + pub api_version: String, + pub code: Option, + pub details: Option, + pub kind: String, + pub message: Option, + pub reason: Option, + #[serde(deserialize_with = "StatusEnum::deserialize_with")] + pub status: StatusEnum, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct StatusDetails { + pub name: String, + pub group: Option, + pub kind: String, + pub uid: String, +} + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct K8Obj { + pub api_version: String, + pub kind: String, + pub metadata: ObjectMeta, + pub spec: S, + pub status: Option

, + #[serde(default)] + pub data: Option>, +} + + + + + +impl K8Obj + where S: Spec + Default, + S::Status: Default +{ + + #[allow(dead_code)] + pub fn new(name: N,spec: S) -> Self where N: Into { + Self { + api_version: S::api_version(), + kind: S::kind(), + metadata: ObjectMeta::named(name), + spec, + status: None, + ..Default::default() + } + } + + #[allow(dead_code)] + pub fn set_status(mut self,status: S::Status) -> Self { + self.status = Some(status); + self + } + + pub fn as_status_update(&self,status: S::Status) -> UpdateK8ObjStatus { + + UpdateK8ObjStatus { + api_version: S::api_version(), + kind: S::kind(), + metadata: self.metadata.as_update(), + status, + ..Default::default() + } + } + +} + +/// For creating, only need spec +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct K8SpecObj { + pub api_version: String, + pub kind: String, + pub metadata: M, + pub spec: S, + #[serde(default)] + pub data: BTreeMap, +} + +impl K8SpecObj + where S: Spec + Default +{ + pub fn new(spec: S,metadata: M) -> Self where M: Default { + Self { + api_version: S::api_version(), + kind: S::kind(), + metadata, + spec, + ..Default::default() + } + } +} + +pub type InputK8Obj = K8SpecObj; +pub type UpdateK8Obj = K8SpecObj; + + +/// Used for updating k8obj +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct UpdateK8ObjStatus { + pub api_version: String, + pub kind: String, + pub metadata: UpdateItemMeta, + pub status: P, + pub data: PhantomData +} + + +impl From> for InputK8Obj where S: Default { + fn from(update: UpdateK8Obj) -> Self { + Self { + api_version: update.api_version, + kind: update.kind, + metadata: update.metadata.into(), + spec: update.spec, + ..Default::default() + } + } +} + + + +impl From for InputObjectMeta { + fn from(update: ItemMeta) -> Self { + Self { + name: update.name, + namespace: update.namespace, + ..Default::default() + } + } +} + +/// name is optional for template +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase",default)] +pub struct TemplateMeta { + pub name: Option, + pub creation_timestamp: Option, + pub labels: HashMap, +} + + +impl LabelProvider for TemplateMeta { + + fn set_label_map(mut self, labels: HashMap) -> Self { + self.labels = labels; + self + } +} + +impl TemplateMeta { + + /// create with name and default namespace + pub fn named(name: S) -> Self where S: Into{ + Self { + name: Some(name.into()), + ..Default::default() + } + } +} + + + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct TemplateSpec { + pub metadata: Option, + pub spec: S, +} + +impl TemplateSpec { + pub fn new(spec: S) -> Self { + TemplateSpec { + metadata: None, + spec + } + } +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct K8List { + pub api_version: String, + pub items: Vec>, + pub kind: String, + pub metadata: ListMetadata +} + +impl K8List where S: Spec { + + #[allow(dead_code)] + pub fn new() -> Self { + K8List { + api_version: S::api_version(), + items: vec![], + kind: S::kind(), + metadata: ListMetadata { + _continue: None, + resource_version: S::api_version(), + self_link: "".to_owned() + } + } + } +} + + + +pub trait DeserializeWith: Sized { + fn deserialize_with<'de, D>(de: D) -> Result + where + D: Deserializer<'de>; +} + +#[derive(Deserialize, Debug, Clone)] +#[serde(tag = "type", content = "object")] +pub enum K8Watch { + ADDED(K8Obj), + MODIFIED(K8Obj), + DELETED(K8Obj), +} + + + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ListMetadata { + pub _continue: Option, + pub resource_version: String, + pub self_link: String, +} + +#[derive(Deserialize, Serialize, Default, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct LabelSelector { + pub match_labels: HashMap, +} + +impl LabelSelector { + pub fn new_labels>(labels: Vec<(T, T)>) -> Self { + let mut match_labels = HashMap::new(); + for (key, value) in labels { + match_labels.insert(key.into(), value.into()); + } + LabelSelector { match_labels } + } +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Env { + pub name: String, + pub value: Option, + pub value_from: Option +} + +impl Env { + pub fn key_value>(name: T, value: T) -> Self { + Env { + name: name.into(), + value: Some(value.into()), + value_from: None + } + } + + pub fn key_field_ref>(name: T,field_path: T) -> Self { + Env { + name: name.into(), + value: None, + value_from: Some(EnvVarSource{ + field_ref: Some(ObjectFieldSelector{ field_path: field_path.into()}) + }) + } + } +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct EnvVarSource { + field_ref: Option +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ObjectFieldSelector { + pub field_path: String +} + +#[cfg(test)] +mod test { + + use super::Env; + use super::ObjectMeta; + + #[test] + fn test_metadata_label() { + let metadata = ObjectMeta::default().set_labels(vec![("app".to_owned(), "test".to_owned())]); + + let maps = metadata.labels; + assert_eq!(maps.len(), 1); + assert_eq!(maps.get("app").unwrap(), "test"); + } + + #[test] + fn test_env() { + let env = Env::key_value("lang", "english"); + assert_eq!(env.name, "lang"); + assert_eq!(env.value, Some("english".to_owned())); + } + +} diff --git a/k8-metadata/metadata-core/src/options.rs b/k8-metadata/metadata-core/src/options.rs new file mode 100644 index 0000000000..d03569bd15 --- /dev/null +++ b/k8-metadata/metadata-core/src/options.rs @@ -0,0 +1,137 @@ +use crate::Crd; +use serde::Serialize; + +/// related to query parameters and uri +/// +/// +/// +/// generate prefix for given crd +/// if crd group is core then /api is used otherwise /apis + group + +pub fn prefix_uri(crd: &Crd, host: &str, namespace: &str, options: Option<&ListOptions>) -> String { + let version = crd.version; + let plural = crd.names.plural; + let group = crd.group.as_ref(); + let api_prefix = match group { + "core" => "api".to_owned(), + _ => format!("apis/{}", group), + }; + + let query = if let Some(opt) = options { + let mut query = "?".to_owned(); + let qs = serde_qs::to_string(opt).unwrap(); + query.push_str(&qs); + query + } else { + "".to_owned() + }; + + format!( + "{}/{}/{}/namespaces/{}/{}{}", + host, api_prefix, version, namespace, plural, query + ) +} + +/// goes as query parameter +#[derive(Serialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct ListOptions { + pub pretty: Option, + #[serde(rename = "continue")] + pub continu: Option, + pub field_selector: Option, + pub include_uninitialized: Option, + pub label_selector: Option, + pub limit: Option, + pub resource_version: Option, + pub timeout_seconds: Option, + pub watch: Option, +} + +#[derive(Serialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct DeleteOptions { + pub api_version: Option, + pub grace_period_seconds: Option, + pub kind: Option, + pub orphan_dependents: Option, + pub preconditions: Vec, + pub propagation_policy: Option, +} + +#[derive(Serialize, Default)] +#[serde(rename_all = "camelCase")] +pub struct Precondition { + pub uid: String, +} + +#[cfg(test)] +mod test { + + use super::prefix_uri; + use super::ListOptions; + use crate::metadata::DEFAULT_NS; + use crate::Crd; + use crate::CrdNames; + + const G1: Crd = Crd { + group: "test.com", + version: "v1", + names: CrdNames { + kind: "Item", + plural: "items", + singular: "item", + }, + }; + + const C1: Crd = Crd { + group: "core", + version: "v1", + names: CrdNames { + kind: "Item", + plural: "items", + singular: "item", + }, + }; + + #[test] + fn test_api_prefix_group() { + let uri = prefix_uri(&G1, "https://localhost", DEFAULT_NS, None); + assert_eq!( + uri, + "https://localhost/apis/test.com/v1/namespaces/default/items" + ); + } + + #[test] + fn test_api_prefix_core() { + let uri = prefix_uri(&C1, "https://localhost", DEFAULT_NS, None); + assert_eq!(uri, "https://localhost/api/v1/namespaces/default/items"); + } + + #[test] + fn test_api_prefix_watch() { + let opt = ListOptions { + watch: Some(true), + ..Default::default() + }; + let uri = prefix_uri(&C1, "https://localhost", DEFAULT_NS, Some(&opt)); + assert_eq!( + uri, + "https://localhost/api/v1/namespaces/default/items?watch=true" + ); + } + + #[test] + fn test_list_query() { + let opt = ListOptions { + pretty: Some(true), + watch: Some(true), + ..Default::default() + }; + + let qs = serde_qs::to_string(&opt).unwrap(); + assert_eq!(qs, "pretty=true&watch=true") + } + +} diff --git a/k8-metadata/metadata-partition/Cargo.toml b/k8-metadata/metadata-partition/Cargo.toml new file mode 100644 index 0000000000..5c79a7298c --- /dev/null +++ b/k8-metadata/metadata-partition/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "metadata-partition" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.27" +metadata-core = { path = "../metadata-core"} \ No newline at end of file diff --git a/k8-metadata/metadata-partition/src/lib.rs b/k8-metadata/metadata-partition/src/lib.rs new file mode 100644 index 0000000000..dd0077d73d --- /dev/null +++ b/k8-metadata/metadata-partition/src/lib.rs @@ -0,0 +1,22 @@ +mod spec; +mod status; + +pub use self::spec::PartitionSpec; +pub use self::status::PartitionStatus; +pub use self::status::ReplicaStatus; +pub use self::status::PartitionResolution; + +use metadata_core::Crd; +use metadata_core::CrdNames; +use metadata_core::GROUP; +use metadata_core::V1; + +const PARTITION_API: Crd = Crd { + group: GROUP, + version: V1, + names: CrdNames { + kind: "Partition", + plural: "partitions", + singular: "partition", + }, +}; diff --git a/k8-metadata/metadata-partition/src/spec.rs b/k8-metadata/metadata-partition/src/spec.rs new file mode 100644 index 0000000000..f07c6d8705 --- /dev/null +++ b/k8-metadata/metadata-partition/src/spec.rs @@ -0,0 +1,38 @@ +//! +//! # Partition Spec +//! +//! Interface to the Partition metadata spec in K8 key value store +//! +use crate::PARTITION_API; +use metadata_core::Crd; +use metadata_core::Spec; + +use serde::Deserialize; +use serde::Serialize; + +use super::PartitionStatus; + +impl Spec for PartitionSpec { + + type Status = PartitionStatus; + + fn metadata() -> &'static Crd { + &PARTITION_API + } +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PartitionSpec { + pub leader: i32, + pub replicas: Vec, +} + +impl PartitionSpec { + pub fn new(leader: i32,replicas: Vec) -> Self { + PartitionSpec { + leader, + replicas + } + } +} diff --git a/k8-metadata/metadata-partition/src/status.rs b/k8-metadata/metadata-partition/src/status.rs new file mode 100644 index 0000000000..b373c4ba92 --- /dev/null +++ b/k8-metadata/metadata-partition/src/status.rs @@ -0,0 +1,46 @@ +//! +//! # Partition Status +//! +//! Interface to the Partition metadata status in K8 key value store +//! +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Status; + +#[derive(Deserialize, Serialize, Debug, Default, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PartitionStatus { + pub resolution: PartitionResolution, + pub leader: ReplicaStatus, + pub replicas: Vec, + pub lsr: u32 +} + +#[derive(Deserialize, Serialize, Debug, Default, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ReplicaStatus { + pub spu: i32, + pub hw: i64, + pub leo: i64 +} + + + +#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)] +pub enum PartitionResolution { + Offline, // no leader + Online, // leader is available + LeaderOffline, + ElectionLeaderFound +} + +impl Default for PartitionResolution { + fn default() -> Self { + PartitionResolution::Offline + } +} + + + +impl Status for PartitionStatus{} diff --git a/k8-metadata/metadata-spg/Cargo.toml b/k8-metadata/metadata-spg/Cargo.toml new file mode 100644 index 0000000000..e119b8c5cd --- /dev/null +++ b/k8-metadata/metadata-spg/Cargo.toml @@ -0,0 +1,17 @@ +[package] +edition = "2018" +name = "metadata-spg" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +serde = "1.0.76" +serde_derive = "1.0.76" +serde_json = "1.0.27" +serde_qs = "0.4.1" +types = { path = "../../types"} +metadata-core = { path = "../metadata-core"} +metadata-spu = { path = "../metadata-spu"} + diff --git a/k8-metadata/metadata-spg/src/lib.rs b/k8-metadata/metadata-spg/src/lib.rs new file mode 100644 index 0000000000..e06881b994 --- /dev/null +++ b/k8-metadata/metadata-spg/src/lib.rs @@ -0,0 +1,25 @@ +//! +//! # Cluster +//! +//! Interface to the Cluster metadata in K8 key value store +//! +mod spec; +mod status; + +pub use self::spec::*; +pub use self::status::*; + +use metadata_core::Crd; +use metadata_core::CrdNames; +use metadata_core::GROUP; +use metadata_core::V1; + +const SPG_API: Crd = Crd { + group: GROUP, + version: V1, + names: CrdNames { + kind: "SpuGroup", + plural: "spugroups", + singular: "spugroup", + }, +}; diff --git a/k8-metadata/metadata-spg/src/spec.rs b/k8-metadata/metadata-spg/src/spec.rs new file mode 100644 index 0000000000..c9cf2346de --- /dev/null +++ b/k8-metadata/metadata-spg/src/spec.rs @@ -0,0 +1,143 @@ +//! +//! # SPU Spec +//! +//! Interface to the SPU metadata spec in K8 key value store +//! +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Crd; +use metadata_core::Spec; + +use metadata_core::metadata::Env; +use metadata_core::metadata::TemplateSpec; +use metadata_spu::EncryptionEnum; + +use types::defaults::SPU_PUBLIC_PORT; +use types::defaults::SPU_PRIVATE_PORT; + +use crate::SPG_API; + +use super::SpuGroupStatus; + +impl Spec for SpuGroupSpec { + + type Status = SpuGroupStatus; + + fn metadata() -> &'static Crd { + &SPG_API + } +} + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase",default)] +pub struct SpuGroupSpec { + pub template: TemplateSpec, + pub replicas: u16, + #[serde(skip_serializing_if = "Option::is_none")] + pub min_id: Option, +} + + +impl SpuGroupSpec { + pub fn min_id(&self) -> i32 { + self.min_id.unwrap_or(0) + } +} + + +#[derive(Deserialize, Serialize, Default, Debug, Clone)] +#[serde(rename_all = "camelCase",default)] +pub struct SpuTemplate { + #[serde(skip_serializing_if = "Option::is_none")] + pub rack: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub public_endpoint: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub private_endpoint: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub controller_svc: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub replication: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub storage: Option, + #[serde(skip_serializing_if = "Vec::is_empty")] + pub env: Vec +} + + + + +#[derive(Deserialize, Serialize, Default,Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SpuEndpointTemplate { + pub port: u16, + pub encryption: EncryptionEnum, +} + + + +impl SpuEndpointTemplate { + pub fn new(port: u16) -> Self { + Self { + port, + encryption: EncryptionEnum::PLAINTEXT, + } + } + + pub fn default_public() -> Self { + Self { + port: SPU_PUBLIC_PORT, + encryption: EncryptionEnum::PLAINTEXT, + } + } + + pub fn default_private() -> Self { + Self { + port: SPU_PRIVATE_PORT, + encryption: EncryptionEnum::PLAINTEXT, + } + } +} + + + +#[derive(Deserialize, Serialize, Debug, Default, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ControllerEndPoint { + pub port: u16, + pub hoste: String, + pub encryption: EncryptionEnum, +} + + + + + +#[derive(Deserialize, Default, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ReplicationConfig { + pub in_sync_replica_min: Option, +} + + +#[derive(Deserialize,Serialize, Debug, Default,Clone)] +#[serde(rename_all = "camelCase")] +pub struct StorageConfig { + #[serde(skip_serializing_if = "Option::is_none")] + pub log_dir: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub size: Option +} + + +impl StorageConfig { + pub fn log_dir(&self) -> String { + self.log_dir.clone().unwrap_or("/tmp/fluvio".to_owned()) + } + + pub fn size(&self) -> String { + self.size.clone().unwrap_or("1Gi".to_owned()) + } +} + diff --git a/k8-metadata/metadata-spg/src/status.rs b/k8-metadata/metadata-spg/src/status.rs new file mode 100644 index 0000000000..72a7f6cc61 --- /dev/null +++ b/k8-metadata/metadata-spg/src/status.rs @@ -0,0 +1,68 @@ +//! +//! # Cluster Status +//! +//! Interface to the Cluster metadata status in K8 key value store +//! +use std::fmt; + +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Status; + +#[derive(Deserialize, Serialize, Debug, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SpuGroupStatus { + pub resolution: SpuGroupStatusResolution, + pub reason: Option +} + +impl Status for SpuGroupStatus {} + +impl SpuGroupStatus { + + pub fn invalid(reason: String) -> Self { + Self { + resolution: SpuGroupStatusResolution::Invalid, + reason: Some(reason) + } + } + + pub fn reserved() -> Self { + Self { + resolution: SpuGroupStatusResolution::Reserved, + ..Default::default() + } + } + + pub fn is_already_valid(&self) -> bool { + self.resolution == SpuGroupStatusResolution::Reserved + } + +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum SpuGroupStatusResolution { + Init, + Invalid, + Reserved +} + +impl Default for SpuGroupStatusResolution { + fn default() -> Self { + Self::Init + } +} + + +impl fmt::Display for SpuGroupStatusResolution { + + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Init => write!(f,"Init"), + Self::Invalid => write!(f,"Invalid"), + Self::Reserved => write!(f,"Reserved") + } + } +} + diff --git a/k8-metadata/metadata-spu/Cargo.toml b/k8-metadata/metadata-spu/Cargo.toml new file mode 100644 index 0000000000..9abba0f992 --- /dev/null +++ b/k8-metadata/metadata-spu/Cargo.toml @@ -0,0 +1,13 @@ +[package] +edition = "2018" +name = "metadata-spu" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +types = { path = "../../types"} +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.39" +metadata-core = { path = "../metadata-core"} \ No newline at end of file diff --git a/k8-metadata/metadata-spu/src/lib.rs b/k8-metadata/metadata-spu/src/lib.rs new file mode 100644 index 0000000000..3d79bbe78a --- /dev/null +++ b/k8-metadata/metadata-spu/src/lib.rs @@ -0,0 +1,25 @@ +mod spec; +mod status; + +pub use self::spec::SpuSpec; +pub use self::spec::SpuType; +pub use self::spec::Endpoint; +pub use self::spec::EncryptionEnum; + +pub use self::status::SpuStatus; +pub use self::status::SpuStatusResolution; + +use metadata_core::Crd; +use metadata_core::CrdNames; +use metadata_core::GROUP; +use metadata_core::V1; + +const SPU_API: Crd = Crd { + group: GROUP, + version: V1, + names: CrdNames { + kind: "Spu", + plural: "spus", + singular: "spu", + }, +}; diff --git a/k8-metadata/metadata-spu/src/spec.rs b/k8-metadata/metadata-spu/src/spec.rs new file mode 100644 index 0000000000..ce7ffcba3a --- /dev/null +++ b/k8-metadata/metadata-spu/src/spec.rs @@ -0,0 +1,79 @@ +//! +//! # SPU Spec +//! +//! Interface to the SPU metadata spec in K8 key value store +//! +use crate::SPU_API; +use metadata_core::Crd; +use metadata_core::Spec; + +use serde::Deserialize; +use serde::Serialize; + +use super::SpuStatus; + +impl Spec for SpuSpec { + + type Status = SpuStatus; + + fn metadata() -> &'static Crd { + &SPU_API + } +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct SpuSpec { + pub spu_id: i32, + pub public_endpoint: Endpoint, + pub private_endpoint: Endpoint, + + #[serde(skip_serializing_if = "Option::is_none")] + pub spu_type: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub rack: Option, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum SpuType { + Managed, + Custom, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Endpoint { + pub port: u16, + pub host: String, + pub encryption: EncryptionEnum, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum EncryptionEnum { + PLAINTEXT, + SSL, +} + +// ----------------------------------- +// Implementation - Endpoint +// ----------------------------------- + +impl Endpoint { + pub fn new(port: u16, host: String) -> Self { + Endpoint { + port, + host, + encryption: EncryptionEnum::PLAINTEXT, + } + } +} + +// ----------------------------------- +// Implementation - EncryptionEnum +// ----------------------------------- +impl Default for EncryptionEnum { + fn default() -> EncryptionEnum { + EncryptionEnum::PLAINTEXT + } +} diff --git a/k8-metadata/metadata-spu/src/status.rs b/k8-metadata/metadata-spu/src/status.rs new file mode 100644 index 0000000000..191be08073 --- /dev/null +++ b/k8-metadata/metadata-spu/src/status.rs @@ -0,0 +1,33 @@ +//! +//! # SPU Status +//! +//! Interface to the SPU metadata status in K8 key value store +//! +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Status; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Deserialize, Serialize, Debug, PartialEq, Default,Clone)] +pub struct SpuStatus { + pub resolution: SpuStatusResolution, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum SpuStatusResolution { + Online, + Offline, + Init +} + +impl Default for SpuStatusResolution { + fn default() -> Self { + SpuStatusResolution::Init + } +} + +impl Status for SpuStatus{} \ No newline at end of file diff --git a/k8-metadata/metadata-topic/Cargo.toml b/k8-metadata/metadata-topic/Cargo.toml new file mode 100644 index 0000000000..331eb1ac6d --- /dev/null +++ b/k8-metadata/metadata-topic/Cargo.toml @@ -0,0 +1,13 @@ +[package] +edition = "2018" +name = "metadata-topic" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.39" +metadata-core = { path = "../metadata-core"} + diff --git a/k8-metadata/metadata-topic/src/lib.rs b/k8-metadata/metadata-topic/src/lib.rs new file mode 100644 index 0000000000..99d4d47aee --- /dev/null +++ b/k8-metadata/metadata-topic/src/lib.rs @@ -0,0 +1,23 @@ +mod spec; +mod status; + +pub use self::spec::TopicSpec; +pub use self::spec::Partition; + +pub use self::status::TopicStatus; +pub use self::status::TopicStatusResolution; + +use metadata_core::Crd; +use metadata_core::CrdNames; +use metadata_core::GROUP; +use metadata_core::V1; + +const TOPIC_API: Crd = Crd { + group: GROUP, + version: V1, + names: CrdNames { + kind: "Topic", + plural: "topics", + singular: "topic", + }, +}; diff --git a/k8-metadata/metadata-topic/src/spec.rs b/k8-metadata/metadata-topic/src/spec.rs new file mode 100644 index 0000000000..9ec049f469 --- /dev/null +++ b/k8-metadata/metadata-topic/src/spec.rs @@ -0,0 +1,96 @@ +//! +//! # Topic Spec +//! +//! Interface to the Topic metadata spec in K8 key value store +//! +use metadata_core::Crd; +use metadata_core::Spec; +use serde::Deserialize; +use serde::Serialize; + +use crate::TOPIC_API; + +use super::TopicStatus; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +impl Spec for TopicSpec { + + type Status = TopicStatus; + + fn metadata() -> &'static Crd { + &TOPIC_API + } +} + +#[derive(Deserialize, Serialize, Default, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct TopicSpec { + #[serde(skip_serializing_if = "Option::is_none")] + pub partitions: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub replication_factor: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub ignore_rack_assignment: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub custom_replica_assignment: Option>, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct Partition { + pub partition: PartitionDetails, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +#[serde(rename_all = "camelCase")] +pub struct PartitionDetails { + pub id: i32, + pub replicas: Vec, //spu_ids +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl TopicSpec { + pub fn new( + partitions: Option, + replication_factor: Option, + ignore_rack_assignment: Option, + custom_replica_assignment: Option>, + ) -> Self { + TopicSpec { + partitions, + replication_factor, + ignore_rack_assignment, + custom_replica_assignment, + } + } +} + +impl Partition { + pub fn new(id: i32, replicas: Vec) -> Self { + Partition { + partition: PartitionDetails { id, replicas }, + } + } + + pub fn id(&self) -> i32 { + self.partition.id + } + + pub fn replicas(&self) -> &Vec { + &self.partition.replicas + } + + pub fn replica_cnt(&self) -> i32 { + self.partition.replicas.len() as i32 + } +} + diff --git a/k8-metadata/metadata-topic/src/status.rs b/k8-metadata/metadata-topic/src/status.rs new file mode 100644 index 0000000000..64a3390dc4 --- /dev/null +++ b/k8-metadata/metadata-topic/src/status.rs @@ -0,0 +1,38 @@ +//! +//! # Topic Status +//! +//! Interface to the Topic metadata status in K8 key value store +//! +use std::collections::BTreeMap; + +use serde::Deserialize; +use serde::Serialize; + +use metadata_core::Status; + +#[derive(Deserialize, Serialize, Debug, PartialEq, Default, Clone)] +#[serde(rename_all = "camelCase")] +pub struct TopicStatus { + pub resolution: TopicStatusResolution, + pub replica_map: BTreeMap>, + pub reason: String, +} + +#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)] +pub enum TopicStatusResolution { + Init, // initial state + Pending, // waiting for resources (spus) + InsufficientResources, // out of sync with partition/replication_factor + InvalidConfig, // invalid configuration + Provisioned, // spu allocated +} + +impl Default for TopicStatusResolution { + fn default() -> Self { + TopicStatusResolution::Init + } +} + + +impl Status for TopicStatus{} + diff --git a/k8-metadata/rust-toolchain b/k8-metadata/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/k8-metadata/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/k8-metadata/src/lib.rs b/k8-metadata/src/lib.rs new file mode 100644 index 0000000000..cfe386d893 --- /dev/null +++ b/k8-metadata/src/lib.rs @@ -0,0 +1,23 @@ +pub mod core { + pub use metadata_core::*; +} + +pub mod auth_token { + pub use metadata_auth_token::*; +} + +pub mod topic { + pub use metadata_topic::*; +} + +pub mod spu { + pub use metadata_spu::*; +} + +pub mod partition { + pub use metadata_partition::*; +} + +pub mod spg { + pub use metadata_spg::*; +} diff --git a/k8-util/crd/config/minikube-storageclass-spu.yaml b/k8-util/crd/config/minikube-storageclass-spu.yaml new file mode 100644 index 0000000000..019b75557d --- /dev/null +++ b/k8-util/crd/config/minikube-storageclass-spu.yaml @@ -0,0 +1,6 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: fluvio-spu +provisioner: k8s.io/minikube-hostpath +reclaimPolicy: Retain \ No newline at end of file diff --git a/k8-util/crd/config/persistent_volume_claim_spu.yaml b/k8-util/crd/config/persistent_volume_claim_spu.yaml new file mode 100644 index 0000000000..d55f5a482e --- /dev/null +++ b/k8-util/crd/config/persistent_volume_claim_spu.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: fluvio-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: fluvio-spu + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/k8-util/crd/crd_partition.yaml b/k8-util/crd/crd_partition.yaml new file mode 100644 index 0000000000..9fd3ae06d4 --- /dev/null +++ b/k8-util/crd/crd_partition.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: partitions.fluvio.infinyon.com +spec: + group: fluvio.infinyon.com + version: v1 + names: + kind: Partition + plural: partitions + singular: partition + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Leader + type: integer + format: int32 + description: Spu Id + JSONPath: .spec.leader + - name: Followers + type: string + description: Followers + JSONPath: .spec.replicas + - name: Status + type: string + description: Partition Status + JSONPath: .status.resolution + - name: LSR + type: integer + format: int32 + description: Live Replicas + JSONPath: .status.lsr + - name: HW + type: integer + format: int64 + description: Leader High Watermark + JSONPath: .status.leader.hw + - name: EOF + type: integer + format: int64 + description: Leader End Offset + JSONPath: .status.leader.leo + - name: Follower Offsets + type: string + description: Follower Offsets + JSONPath: .status.replicas + validation: + openAPIV3Schema: + type: object + required: ["spec"] + properties: + spec: + type: object + required: ["leader", "replicas"] + properties: + leader: + type: integer + replicas: + type: array + items: + type: integer diff --git a/k8-util/crd/crd_spg.yaml b/k8-util/crd/crd_spg.yaml new file mode 100644 index 0000000000..0bf81a5d79 --- /dev/null +++ b/k8-util/crd/crd_spg.yaml @@ -0,0 +1,93 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: spugroups.fluvio.infinyon.com +spec: + group: fluvio.infinyon.com + version: v1 + names: + kind: SpuGroup + plural: spugroups + singular: spugroup + shortNames: + - spg + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + required: ["spec"] + type: object + properties: + spec: + type: object + required: ["replicas"] + properties: + replicas: + type: integer + minimum: 1 + maximum: 100 + minId: + type: integer + minimum: 0 + maximum: 99999 + template: + type: object + required: ["spec"] + properties: + spec: + type: object + properties: + rack: + type: string + publicEndpoint: + type: object + properties: + port: + type: integer + encryption: + type: string + enum: + - PLAINTEXT + - SSL + privateEndpoint: + type: object + properties: + port: + type: integer + encryption: + type: string + enum: + - PLAINTEXT + - SSL + controllerSvc: + type: object + properties: + host: + type: integer + encryption: + type: string + enum: + - PLAINTEXT + - SSL + replication: + type: object + properties: + inSyncReplicaMin: + type: integer + minimum: 1 + storage: + type: object + properties: + logDir: + type: string + size: + type: string + env: + type: array + properties: + name: + type: string + value: + type: string + diff --git a/k8-util/crd/crd_spu.yaml b/k8-util/crd/crd_spu.yaml new file mode 100644 index 0000000000..73e2927524 --- /dev/null +++ b/k8-util/crd/crd_spu.yaml @@ -0,0 +1,83 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: spus.fluvio.infinyon.com +spec: + group: fluvio.infinyon.com + version: v1 + names: + kind: Spu + plural: spus + singular: spu + scope: Namespaced + additionalPrinterColumns: + - name: ID + type: integer + description: Spu ID + JSONPath: .spec.spuId + - name: Status + type: string + description: Spu Status + JSONPath: .status.resolution + - name: Type + type: string + description: Spu Type + JSONPath: .spec.spuType + - name: Public Port + type: integer + description: Spu Type + JSONPath: .spec.publicEndpoint.port + - name: Private Port + type: integer + description: Spu Type + JSONPath: .spec.privateEndpoint.port + subresources: + status: {} + validation: + openAPIV3Schema: + required: ["spec"] + type: object + properties: + spec: + required: ["spuId", "publicEndpoint", "privateEndpoint"] + type: object + properties: + spuId: + type: integer + spuType: + type: string + enum: + - Custom + - Managed + rack: + type: string + publicEndpoint: + type: object + required: ["host"] + properties: + host: + type: string + port: + minimum: 1 + maximum: 65535 + type: integer + encryption: + type: string + enum: + - PLAINTEXT + - SSL + privateEndpoint: + required: ["host"] + type: object + properties: + host: + type: string + port: + minimum: 1 + maximum: 65535 + type: integer + encryption: + type: string + enum: + - PLAINTEXT + - SSL diff --git a/k8-util/crd/crd_topic.yaml b/k8-util/crd/crd_topic.yaml new file mode 100644 index 0000000000..dd85f3c3e4 --- /dev/null +++ b/k8-util/crd/crd_topic.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: topics.fluvio.infinyon.com +spec: + group: fluvio.infinyon.com + version: v1 + names: + kind: Topic + plural: topics + singular: topic + scope: Namespaced + subresources: + status: {} + additionalPrinterColumns: + - name: Partitions + type: integer + description: Parition count + JSONPath: .spec.partitions + - name: Replications + type: integer + description: Replication Count + JSONPath: .spec.replicationFactor + - name: Status + type: string + description: Topic Status + JSONPath: .status.resolution + + validation: + # openAPIV3Schema is the schema for validating custom objects. + openAPIV3Schema: + type: object + required: ["spec"] + properties: + spec: + type: object + properties: + partitions: + type: integer + minimum: 1 + maximum: 5000 + replicationFactor: + type: integer + minimum: 1 + maximum: 5000 + ignoreRackAssignment: + type: boolean + customReplicaAssignment: + type: array + items: + required: + - partition + properties: + partition: + type: object + required: + - id + - replicas + properties: + id: + type: integer + minimum: 0 + replicas: + type: array + items: + type: integer + minimum: 0 + diff --git a/k8-util/crd/init.sh b/k8-util/crd/init.sh new file mode 100755 index 0000000000..39fdd52125 --- /dev/null +++ b/k8-util/crd/init.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# initialize CRD +# add CRD +DATA_DIR=$(dirname "$0") +kubectl apply -f ${DATA_DIR}/crd_spu.yaml +kubectl apply -f ${DATA_DIR}/crd_spg.yaml +kubectl apply -f ${DATA_DIR}/crd_partition.yaml +kubectl apply -f ${DATA_DIR}/crd_topic.yaml \ No newline at end of file diff --git a/k8-util/deploy.sh b/k8-util/deploy.sh new file mode 100755 index 0000000000..32be0d9024 --- /dev/null +++ b/k8-util/deploy.sh @@ -0,0 +1,6 @@ +#!/bin/bash +mydir="$(dirname "${0}")" +kubectl apply -f ${mydir}/sc-deployment/sc-deployment.yaml +kubectl apply -f ${mydir}/sc-deployment/sc-internal.yaml +kubectl apply -f ${mydir}/sc-deployment/sc-public.yaml + diff --git a/k8-util/docker/sc/.gitignore b/k8-util/docker/sc/.gitignore new file mode 100644 index 0000000000..eb5a316cbd --- /dev/null +++ b/k8-util/docker/sc/.gitignore @@ -0,0 +1 @@ +target diff --git a/k8-util/docker/sc/Dockerfile.sc b/k8-util/docker/sc/Dockerfile.sc new file mode 100644 index 0000000000..bee33797dd --- /dev/null +++ b/k8-util/docker/sc/Dockerfile.sc @@ -0,0 +1,5 @@ +FROM scratch + +COPY sc-server /fluvio/sc-server + +CMD ["/fluvio/sc-server"] \ No newline at end of file diff --git a/k8-util/docker/sc/Makefile b/k8-util/docker/sc/Makefile new file mode 100644 index 0000000000..704a8c4da6 --- /dev/null +++ b/k8-util/docker/sc/Makefile @@ -0,0 +1,27 @@ +NAME = infinyon/fluvio-sc +TAG = 0.1-alpha +BIN_DIR = ../../../target/x86_64-unknown-linux-musl/$(BIN_NAME) + +all: push + + +copy_binaries: + mkdir -p target + cp ${BIN_DIR}/sc-server target + +build: copy_binaries + docker build -f Dockerfile.sc -t $(NAME):$(TAG) ./target + +push_registry: + docker tag $(NAME):$(TAG) localhost:5000/$(NAME):$(TAG) + docker push localhost:5000/$(NAME):$(TAG) + +pull_minikube: + eval $$(minikube docker-env); \ + docker pull localhost:5000/$(NAME):$(TAG); \ + docker tag localhost:5000/$(NAME):$(TAG) $(NAME):$(TAG) + +push: push_registry pull_minikube + +push_release: build + docker push $(NAME):$(TAG) \ No newline at end of file diff --git a/k8-util/docker/spu/.gitignore b/k8-util/docker/spu/.gitignore new file mode 100644 index 0000000000..eb5a316cbd --- /dev/null +++ b/k8-util/docker/spu/.gitignore @@ -0,0 +1 @@ +target diff --git a/k8-util/docker/spu/Dockerfile.spu b/k8-util/docker/spu/Dockerfile.spu new file mode 100644 index 0000000000..4f22a28ca5 --- /dev/null +++ b/k8-util/docker/spu/Dockerfile.spu @@ -0,0 +1,4 @@ +FROM alpine:3.10 + +COPY spu-server /fluvio/spu-server +CMD ["/fluvio/spu-server"] \ No newline at end of file diff --git a/k8-util/docker/spu/Makefile b/k8-util/docker/spu/Makefile new file mode 100644 index 0000000000..43468735c6 --- /dev/null +++ b/k8-util/docker/spu/Makefile @@ -0,0 +1,27 @@ +NAME = infinyon/fluvio-spu +TAG = 0.1-alpha +BIN_DIR = ../../../target/x86_64-unknown-linux-musl/$(BIN_NAME) + +all: push + +copy_binaries: + mkdir -p target + cp ${BIN_DIR}/spu-server target + +build: copy_binaries + docker build -f Dockerfile.spu -t $(NAME):$(TAG) ./target + + +push_registry: + docker tag $(NAME):$(TAG) localhost:5000/$(NAME):$(TAG) + docker push localhost:5000/$(NAME):$(TAG) + +pull_minikube: + eval $$(minikube docker-env); \ + docker pull localhost:5000/$(NAME):$(TAG); \ + docker tag localhost:5000/$(NAME):$(TAG) $(NAME):$(TAG) + +push: build push_registry pull_minikube + +push_release: build + docker push $(NAME):$(TAG) diff --git a/k8-util/docker/tool/Dockerfile b/k8-util/docker/tool/Dockerfile new file mode 100644 index 0000000000..a55f4d90c4 --- /dev/null +++ b/k8-util/docker/tool/Dockerfile @@ -0,0 +1,20 @@ +FROM buildpack-deps:stretch +ARG RUSTV + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN apt-get update && apt-get install -y procps + +RUN set -eux; \ + \ + url="https://static.rust-lang.org/rustup/dist/x86_64-unknown-linux-gnu/rustup-init"; \ + wget "$url"; \ + chmod +x rustup-init; \ + ./rustup-init -y --no-modify-path --default-toolchain $RUSTV; \ + rm rustup-init; \ + chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ + rustup --version; \ + cargo --version; \ + rustc --version; \ No newline at end of file diff --git a/k8-util/docker/tool/Makefile b/k8-util/docker/tool/Makefile new file mode 100644 index 0000000000..bd4eb4d581 --- /dev/null +++ b/k8-util/docker/tool/Makefile @@ -0,0 +1,11 @@ +NAME = fluvio/rust-tool +TOOLCHAIN = "../../../rust-toolchain" +RUSTV = $(shell cat ${TOOLCHAIN}) +TAG = ${RUSTV} +all: build push + +build: + docker build --build-arg RUSTV=$(RUSTV) -t $(NAME):$(TAG) . + +push: + docker push $(NAME):$(TAG) \ No newline at end of file diff --git a/k8-util/install.sh b/k8-util/install.sh new file mode 100755 index 0000000000..07643afbbe --- /dev/null +++ b/k8-util/install.sh @@ -0,0 +1,5 @@ +#!/bin/bash +mydir="$(dirname "${0}")" +kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous +kubectl apply -f ${mydir}/crd/config/minikube-storageclass-spu.yaml +${mydir}/crd/init.sh diff --git a/k8-util/minikube-start.sh b/k8-util/minikube-start.sh new file mode 100755 index 0000000000..3772d923b7 --- /dev/null +++ b/k8-util/minikube-start.sh @@ -0,0 +1,3 @@ +#!/bin/bash +minikube start --kubernetes-version v1.13.7 +# minikube start --kubernetes-version v1.13.7-v 10 diff --git a/k8-util/minikube-tunnel.sh b/k8-util/minikube-tunnel.sh new file mode 100755 index 0000000000..ca84343271 --- /dev/null +++ b/k8-util/minikube-tunnel.sh @@ -0,0 +1,2 @@ +#!/bin/bash +nohup minikube tunnel > /tmp/tunnel.out 2> /tmp/tunnel.out & diff --git a/k8-util/operator-templates/spu-service.yaml b/k8-util/operator-templates/spu-service.yaml new file mode 100644 index 0000000000..5bd57e6325 --- /dev/null +++ b/k8-util/operator-templates/spu-service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + name: group1 + labels: + app: spu + group: group1 +spec: + ports: + - name: public + protocol: TCP + port: 9005 + targetPort: 9005 + - name: private + protocol: TCP + port: 9006 + targetPort: 9006 + clusterIP: None + type: ClusterIP + selector: + app: spu + group: group1 \ No newline at end of file diff --git a/k8-util/operator-templates/spu-stateful-patch.yaml b/k8-util/operator-templates/spu-stateful-patch.yaml new file mode 100644 index 0000000000..dbb0d46c61 --- /dev/null +++ b/k8-util/operator-templates/spu-stateful-patch.yaml @@ -0,0 +1,3 @@ +spec: + replicas: 2 + diff --git a/k8-util/operator-templates/spu-stateful.yaml b/k8-util/operator-templates/spu-stateful.yaml new file mode 100644 index 0000000000..b06042bcbe --- /dev/null +++ b/k8-util/operator-templates/spu-stateful.yaml @@ -0,0 +1,46 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: group1 +spec: + selector: + matchLabels: + app: spu + group: group1 + serviceName: group1 + replicas: 3 + template: + metadata: + labels: + app: spu + group: group1 + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: spu + image: fluvio/spu:0.1 + ports: + - name: public + containerPort: 9005 + - name: private + containerPort: 9006 + volumeMounts: + - name: data + mountPath: /var/lib/fluvio/data + env: + - name: SPU + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: FLV_SC_PRIVATE_HOST + value: flv-sc-internal.default.svc.cluster.local + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: fluvio-spu + resources: + requests: + storage: 1Gi + diff --git a/k8-util/operator-templates/spu0-exteranl-service.yaml b/k8-util/operator-templates/spu0-exteranl-service.yaml new file mode 100644 index 0000000000..523090ba3c --- /dev/null +++ b/k8-util/operator-templates/spu0-exteranl-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: spu-0 +spec: + type: LoadBalancer + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: spu-0 + ports: + - protocol: TCP + port: 9005 + targetPort: 9005 \ No newline at end of file diff --git a/k8-util/operator-templates/spu1-exteranl-service.yaml b/k8-util/operator-templates/spu1-exteranl-service.yaml new file mode 100644 index 0000000000..af04aea84f --- /dev/null +++ b/k8-util/operator-templates/spu1-exteranl-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: spu-1 +spec: + type: LoadBalancer + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: spu-1 + ports: + - protocol: TCP + port: 9005 + targetPort: 9005 \ No newline at end of file diff --git a/k8-util/operator-templates/spu2-exteranl-service.yaml b/k8-util/operator-templates/spu2-exteranl-service.yaml new file mode 100644 index 0000000000..2e53523114 --- /dev/null +++ b/k8-util/operator-templates/spu2-exteranl-service.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: spu-2 +spec: + type: LoadBalancer + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: spu-2 + ports: + - protocol: TCP + port: 9005 + targetPort: 9005 \ No newline at end of file diff --git a/k8-util/samples/crd/group1.yaml b/k8-util/samples/crd/group1.yaml new file mode 100644 index 0000000000..9d53031051 --- /dev/null +++ b/k8-util/samples/crd/group1.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: SpuGroup +metadata: + name: "group1" +spec: + replicas: 1 + minId: 10 diff --git a/k8-util/samples/crd/group2.yaml b/k8-util/samples/crd/group2.yaml new file mode 100644 index 0000000000..b8970efe59 --- /dev/null +++ b/k8-util/samples/crd/group2.yaml @@ -0,0 +1,12 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: SpuGroup +metadata: + name: "group2" +spec: + replicas: 2 + minId: 11 + template: + spec: + storage: + size: 2Gi + logDir: "/tmp/mylog" diff --git a/k8-util/samples/crd/group3.yaml b/k8-util/samples/crd/group3.yaml new file mode 100644 index 0000000000..a7b8e1c109 --- /dev/null +++ b/k8-util/samples/crd/group3.yaml @@ -0,0 +1,6 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: SpuGroup +metadata: + name: "group3" +spec: + replicas: 3 diff --git a/k8-util/samples/crd/group_trace.yaml b/k8-util/samples/crd/group_trace.yaml new file mode 100644 index 0000000000..077fcae0ac --- /dev/null +++ b/k8-util/samples/crd/group_trace.yaml @@ -0,0 +1,12 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: SpuGroup +metadata: + name: "group2" +spec: + replicas: 2 + minId: 11 + template: + spec: + env: + - name: "RUST_LOG" + value: "spu_server=trace" diff --git a/k8-util/samples/crd/init.sh b/k8-util/samples/crd/init.sh new file mode 100755 index 0000000000..00512bb035 --- /dev/null +++ b/k8-util/samples/crd/init.sh @@ -0,0 +1,8 @@ +#!/bin/bash +# initialize CRD +# Please add CRD +DATA_DIR=$(dirname "$0") +kubectl apply -f ${DATA_DIR}/crd_spu.yaml +kubectl apply -f ${DATA_DIR}/crd_cluster.yaml +kubectl apply -f ${DATA_DIR}/crd_partition.yaml +kubectl apply -f ${DATA_DIR}/crd_topic.yaml \ No newline at end of file diff --git a/k8-util/samples/crd/partition.yaml b/k8-util/samples/crd/partition.yaml new file mode 100644 index 0000000000..da26e1bd5b --- /dev/null +++ b/k8-util/samples/crd/partition.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Partition +metadata: + name: test +spec: + leader: 1 + replicas: [2,4] \ No newline at end of file diff --git a/k8-util/samples/crd/spu_5001.yaml b/k8-util/samples/crd/spu_5001.yaml new file mode 100644 index 0000000000..a63e3b398b --- /dev/null +++ b/k8-util/samples/crd/spu_5001.yaml @@ -0,0 +1,15 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Spu +metadata: + name: "custom-spu-1" +spec: + spuId: 5001 + spuType: "Custom" + publicEndpoint: + port: 9005 + host: fluvio.local.test + encryption: PLAINTEXT + privateEndpoint: + port: 9006 + host: fluvio.local.test + encryption: PLAINTEXT diff --git a/k8-util/samples/crd/spu_5002.yaml b/k8-util/samples/crd/spu_5002.yaml new file mode 100644 index 0000000000..a69d831e2b --- /dev/null +++ b/k8-util/samples/crd/spu_5002.yaml @@ -0,0 +1,15 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Spu +metadata: + name: "custom-spu-2" +spec: + spuId: 5002 + spuType: "Custom" + publicEndpoint: + port: 9007 + host: fluvio.local.test + encryption: PLAINTEXT + privateEndpoint: + port: 9008 + host: fluvio.local.test + encryption: PLAINTEXT diff --git a/k8-util/samples/crd/spu_5003.yaml b/k8-util/samples/crd/spu_5003.yaml new file mode 100644 index 0000000000..442c81e075 --- /dev/null +++ b/k8-util/samples/crd/spu_5003.yaml @@ -0,0 +1,15 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Spu +metadata: + name: "custom-spu-3" +spec: + spuId: 5003 + spuType: "Custom" + publicEndpoint: + port: 9009 + host: fluvio.local.test + encryption: PLAINTEXT + privateEndpoint: + port: 9010 + host: fluvio.local.test + encryption: PLAINTEXT diff --git a/k8-util/samples/crd/start_5001.sh b/k8-util/samples/crd/start_5001.sh new file mode 100755 index 0000000000..5cf864228a --- /dev/null +++ b/k8-util/samples/crd/start_5001.sh @@ -0,0 +1,2 @@ +#!/bin/bash +../../../dev-tools/log/debug-spu-min 5001 9005 9006 diff --git a/k8-util/samples/crd/topic1.yaml b/k8-util/samples/crd/topic1.yaml new file mode 100644 index 0000000000..a5d6b6962a --- /dev/null +++ b/k8-util/samples/crd/topic1.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic1 +spec: + partitions: 1 + replicationFactor: 1 diff --git a/k8-util/samples/crd/topic2.yaml b/k8-util/samples/crd/topic2.yaml new file mode 100644 index 0000000000..dbeb5c0735 --- /dev/null +++ b/k8-util/samples/crd/topic2.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic2 +spec: + partitions: 1 + replicationFactor: 2 diff --git a/k8-util/samples/crd/topic3.yaml b/k8-util/samples/crd/topic3.yaml new file mode 100644 index 0000000000..0e5df64d23 --- /dev/null +++ b/k8-util/samples/crd/topic3.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic3 +spec: + partitions: 1 + replicationFactor: 3 diff --git a/k8-util/samples/crd/topic4.yaml b/k8-util/samples/crd/topic4.yaml new file mode 100644 index 0000000000..4352b59d33 --- /dev/null +++ b/k8-util/samples/crd/topic4.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic4 +spec: + partitions: 1 + replicationFactor: 2 diff --git a/k8-util/samples/crd/topic5.yaml b/k8-util/samples/crd/topic5.yaml new file mode 100644 index 0000000000..d1ba5fea65 --- /dev/null +++ b/k8-util/samples/crd/topic5.yaml @@ -0,0 +1,7 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic5 +spec: + partitions: 1 + replicationFactor: 2 diff --git a/k8-util/samples/crd/topic5_custom.yaml b/k8-util/samples/crd/topic5_custom.yaml new file mode 100644 index 0000000000..853ea60bdb --- /dev/null +++ b/k8-util/samples/crd/topic5_custom.yaml @@ -0,0 +1,12 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic5 +spec: + customReplicaAssignment: + - partition: + id: 0 + replicas: [5001, 5002] + - partition: + id: 1 + replicas: [5002, 5003] diff --git a/k8-util/samples/crd/topic6_custom.yaml b/k8-util/samples/crd/topic6_custom.yaml new file mode 100644 index 0000000000..f56f76bec0 --- /dev/null +++ b/k8-util/samples/crd/topic6_custom.yaml @@ -0,0 +1,9 @@ +apiVersion: "fluvio.infinyon.com/v1" +kind: Topic +metadata: + name: topic6 +spec: + customReplicaAssignment: + - partition: + replicas: [5001, 5002] + id: 1 diff --git a/k8-util/sc-deployment/flv_namespace.yaml b/k8-util/sc-deployment/flv_namespace.yaml new file mode 100644 index 0000000000..3fa1bf5f9e --- /dev/null +++ b/k8-util/sc-deployment/flv_namespace.yaml @@ -0,0 +1,10 @@ +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "flv", + "labels": { + "name": "flv" + } + } +} \ No newline at end of file diff --git a/k8-util/sc-deployment/flv_set_context_minikube.sh b/k8-util/sc-deployment/flv_set_context_minikube.sh new file mode 100755 index 0000000000..2abf5ba3f1 --- /dev/null +++ b/k8-util/sc-deployment/flv_set_context_minikube.sh @@ -0,0 +1,3 @@ +#!/bin/bash +kubectl config set-context flv --namespace=flv --cluster=mycube --user=minikube +kubectl config use-context flv diff --git a/k8-util/sc-deployment/sc-deployment-debug.yaml b/k8-util/sc-deployment/sc-deployment-debug.yaml new file mode 100644 index 0000000000..4a1e085136 --- /dev/null +++ b/k8-util/sc-deployment/sc-deployment-debug.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: flv-sc + labels: + app: flv-sc +spec: + containers: + - name: flv-sc + image: infinyon/fluvio-sc:0.1-alpha + imagePullPolicy: Always + ports: + - containerPort: 9003 + env: + - name: RUST_LOG + value: sc_server=debug \ No newline at end of file diff --git a/k8-util/sc-deployment/sc-deployment.yaml b/k8-util/sc-deployment/sc-deployment.yaml new file mode 100644 index 0000000000..93153fd08d --- /dev/null +++ b/k8-util/sc-deployment/sc-deployment.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Pod +metadata: + name: flv-sc + labels: + app: flv-sc +spec: + containers: + - name: flv-sc + image: infinyon/fluvio-sc:0.1-alpha + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9003 + env: + - name: RUST_LOG + value: sc_server=info diff --git a/k8-util/sc-deployment/sc-internal-dev.yaml b/k8-util/sc-deployment/sc-internal-dev.yaml new file mode 100644 index 0000000000..70d7016011 --- /dev/null +++ b/k8-util/sc-deployment/sc-internal-dev.yaml @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: Service +metadata: + name: flv-sc-internal +spec: + selector: + app: flv-sc + ports: + - protocol: TCP + port: 9004 + targetPort: 9004 +--- +apiVersion: v1 +kind: Service +metadata: + name: flv-sc-internal +spec: + type: LoadBalancer + externalTrafficPolicy: Local + selector: + app: flv-sc + ports: + - protocol: TCP + port: 9004 + targetPort: 9004 \ No newline at end of file diff --git a/k8-util/sc-deployment/sc-internal.yaml b/k8-util/sc-deployment/sc-internal.yaml new file mode 100644 index 0000000000..841f62ed47 --- /dev/null +++ b/k8-util/sc-deployment/sc-internal.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Service +metadata: + name: flv-sc-internal +spec: + selector: + app: flv-sc + ports: + - protocol: TCP + port: 9004 + targetPort: 9004 \ No newline at end of file diff --git a/k8-util/sc-deployment/sc-public.yaml b/k8-util/sc-deployment/sc-public.yaml new file mode 100644 index 0000000000..68fb0f891c --- /dev/null +++ b/k8-util/sc-deployment/sc-public.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Service +metadata: + name: flv-sc-public +spec: + type: LoadBalancer + externalTrafficPolicy: Local + selector: + app: flv-sc + ports: + - protocol: TCP + port: 9003 + targetPort: 9003 \ No newline at end of file diff --git a/k8-util/util/README.md b/k8-util/util/README.md new file mode 100644 index 0000000000..5d47e0e06c --- /dev/null +++ b/k8-util/util/README.md @@ -0,0 +1,17 @@ +To run dns lookup utiltiy, first set up DNS pod by +```kubectl apply -f busybox.yaml``` + +To get dns entries, +```./dns.sh ``` + +For example, to get dns entries for spu +```bash +./dns.sh spu.default.svc.cluster.local +Server: 10.96.0.10 +Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local + +Name: spu.default.svc.cluster.local +Address 1: 172.17.0.2 spu-0.spu.default.svc.cluster.local +Address 2: 172.17.0.3 spu-1.spu.default.svc.cluster.local +Address 3: 172.17.0.6 spu-2.spu.default.svc.cluster.local +``` \ No newline at end of file diff --git a/k8-util/util/busybox.yaml b/k8-util/util/busybox.yaml new file mode 100644 index 0000000000..eeee4c38df --- /dev/null +++ b/k8-util/util/busybox.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: busybox + namespace: default +spec: + containers: + - name: busybox + image: busybox:1.28 + command: + - sleep + - "3600" + imagePullPolicy: IfNotPresent + restartPolicy: Always \ No newline at end of file diff --git a/k8-util/util/deploy_dashboard.sh b/k8-util/util/deploy_dashboard.sh new file mode 100755 index 0000000000..443532698f --- /dev/null +++ b/k8-util/util/deploy_dashboard.sh @@ -0,0 +1,2 @@ +#!/bin/bash +kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml diff --git a/k8-util/util/dns.sh b/k8-util/util/dns.sh new file mode 100755 index 0000000000..0bbaaea9b0 --- /dev/null +++ b/k8-util/util/dns.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# Used for lookup up dns entry +# kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh +kubectl exec -ti busybox -- nslookup $1 diff --git a/k8-util/util/run_dashboard.sh b/k8-util/util/run_dashboard.sh new file mode 100755 index 0000000000..d0a9593ed5 --- /dev/null +++ b/k8-util/util/run_dashboard.sh @@ -0,0 +1,3 @@ +#!/bin/bash +# +kubectl -n kube-system port-forward $(kubectl -n kube-system get pod -l k8s-app=kubernetes-dashboard -o jsonpath='{.items[0].metadata.name}') 8443:8443 & diff --git a/k8-util/util/snippet.md b/k8-util/util/snippet.md new file mode 100644 index 0000000000..e043d63cf4 --- /dev/null +++ b/k8-util/util/snippet.md @@ -0,0 +1,11 @@ +# Useful snippets + +**Tips**: For pretty display on a Mac, install **jsonpp** ```brew install jsonpp``` + +## Display raw jason output + +Display all infinyon objects +> kubectl get --raw /apis/fluvio.infinyon.com/v1 | jsonpp + +Display topic ```test``` +> kubectl get --raw /apis/fluvio.infinyon.com/v1/namespaces/default/topics/test | jsonpp diff --git a/kf-protocol/Cargo.toml b/kf-protocol/Cargo.toml new file mode 100644 index 0000000000..f1096221b6 --- /dev/null +++ b/kf-protocol/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "kf-protocol" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +kf-protocol-core = { path = "kf-protocol-core" } +kf-protocol-api = { path = "kf-protocol-api"} +kf-protocol-derive = { path = "kf-protocol-derive"} +kf-protocol-transport = { path = "kf-protocol-transport"} +kf-protocol-message = { path = "kf-protocol-message"} + +[dev-dependencies] +utils = { path= "../utils"} \ No newline at end of file diff --git a/kf-protocol/README.md b/kf-protocol/README.md new file mode 100644 index 0000000000..365715700e --- /dev/null +++ b/kf-protocol/README.md @@ -0,0 +1,16 @@ +# Test Client + +Test Client command to test spu server: + +``` +> ./send-b-client.sh data/apirequest.txt 9004 +``` + +# dump kafka binary as pretty print + +First build dump binary: + +```cargo build --features="cli"``` + +Then run binary: +```../target/debug/kafka-dump``` diff --git a/kf-protocol/kafka-serde b/kf-protocol/kafka-serde new file mode 100644 index 0000000000..b9d5ad4b06 --- /dev/null +++ b/kf-protocol/kafka-serde @@ -0,0 +1,26 @@ +[package] +name = "kf-protocol" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[[bin]] +name = "kafka-dump" +path = "src/bin/kafka-dump.rs" +required-features = ["cli"] + +[features] +cli = ["clap"] +serde_parser = ["serde"] + +[dependencies] +bytes = "0.4.12" +log = "0.4.6" +serde = { version ="1.0.82", features = ['derive'], optional = true } +kafka-derive = { path = "kafka-derive" } + +clap = { version = "2.32.0", optional = true } + +[dev-dependencies] +rand = "0.6.0" +pretty_env_logger = "0.2.5" diff --git a/kf-protocol/kf-protocol-api/Cargo.toml b/kf-protocol/kf-protocol-api/Cargo.toml new file mode 100644 index 0000000000..da1a22379b --- /dev/null +++ b/kf-protocol/kf-protocol-api/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "kf-protocol-api" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + + +[dependencies] +log = "0.4.6" +crc32c = "0.4.0" +content_inspector = "0.2.4" +serde = { version ="1.0.82", features = ['derive'] } +paste = "0.1.5" +utils = { path= "../../utils"} +kf-protocol = { path = "../kf-protocol-core", package = "kf-protocol-core" } +kf-protocol-derive = { path = "../kf-protocol-derive" } +kf-protocol-build = { path = "../kf-protocol-build" } + + diff --git a/kf-protocol/kf-protocol-api/src/api.rs b/kf-protocol/kf-protocol-api/src/api.rs new file mode 100644 index 0000000000..6e17dbc73b --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/api.rs @@ -0,0 +1,165 @@ +use std::default::Default; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::fs::File; +use std::io::Cursor; +use std::path::Path; +use std::io::Read; +use std::fmt::Debug; +use std::fmt; +use std::convert::TryFrom; + +use log::trace; +use log::debug; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::bytes::Buf; +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + + +pub trait Request: Encoder + Decoder + Debug { + + const API_KEY: u16; + + const DEFAULT_API_VERSION: i16 = 0; + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = -1; + + type Response: Encoder + Decoder + Debug ; + +} + + +pub trait KfRequestMessage: Sized + Default + +{ + type ApiKey: Decoder + Debug ; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf; + + + fn decode_from(src: &mut T) -> Result + where T: Buf, + + { + let header = RequestHeader::decode_from(src,0)?; + Self::decode_with_header(src,header) + + } + + fn decode_from_file>(file_name: P) -> Result { + + debug!("decoding from file: {:#?}", file_name.as_ref()); + let mut f = File::open(file_name)?; + let mut buffer: [u8; 1000] = [0; 1000]; + + f.read(&mut buffer)?; + + let data = buffer.to_vec(); + let mut src = Cursor::new(&data); + + let mut size: i32 = 0; + size.decode(&mut src,0)?; + trace!("decoded request size: {} bytes", size); + + if src.remaining() < size as usize { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not enought bytes for request message", + )); + } + + + Self::decode_from(&mut src) + } + +} + +pub trait KfApiKey: Sized + Encoder + Decoder + TryFrom { + +} + + + + +#[derive(Debug, Encode, Decode, Default)] +pub struct RequestHeader { + api_key: u16, + api_version: i16, + correlation_id: i32, + client_id: String, +} + +impl fmt::Display for RequestHeader { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"api: {} client: {}",self.api_key,self.client_id) + } +} + + + +impl RequestHeader { + + + pub fn new(api_key: u16) -> Self { + // TODO: generate random client id + Self::new_with_client(api_key,"dummy".to_owned()) + } + + pub fn new_with_client(api_key: u16,client_id: T) -> Self + where T: Into + { + RequestHeader { + api_key, + api_version: 1, + correlation_id: 1, + + client_id: client_id.into() + } + } + + pub fn api_key(&self) -> u16 { + self.api_key + } + + pub fn api_version(&self) -> i16 { + self.api_version + } + + pub fn set_api_version(&mut self,version: i16) -> &mut Self { + self.api_version = version; + self + } + + pub fn correlation_id(&self) -> i32 { + self.correlation_id + } + + pub fn set_correlation_id(&mut self,id: i32) -> &mut Self { + self.correlation_id = id; + self + } + + pub fn client_id(&self) -> &String { + &self.client_id + } + + pub fn set_client_id(&mut self,client_id: T) -> &mut Self + where T: Into + { + self.client_id = client_id.into(); + self + } +} + +impl From<&RequestHeader> for i32 { + fn from(header: &RequestHeader) -> i32 { + header.correlation_id() + } +} diff --git a/kf-protocol/kf-protocol-api/src/batch.rs b/kf-protocol/kf-protocol-api/src/batch.rs new file mode 100644 index 0000000000..923cfd4940 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/batch.rs @@ -0,0 +1,302 @@ + +use std::io::Error; +use std::mem::size_of; +use std::fmt::Debug; + +use log::trace; +use crc32c; + +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::Version; +use kf_protocol_derive::Decode; + +use crate::Offset; +use crate::Size; +use super::DefaultRecord; + +pub type DefaultBatchRecords = Vec; +pub type DefaultBatch = Batch; + + +pub trait BatchRecords: Default + Debug + Encoder + Decoder { + + /// how many bytes does record wants to process + fn remainder_bytes(&self,remainder: usize ) -> usize { + remainder + } + +} + + +impl BatchRecords for DefaultBatchRecords {} + + + +/// size of the offset and length +pub const BATCH_PREAMBLE_SIZE: usize = + size_of::() // Offset + + size_of::(); // i32 + + +#[derive(Default,Debug)] +pub struct Batch where R: BatchRecords { + pub base_offset: Offset, + pub batch_len: i32, // only for decoding + pub header: BatchHeader, + pub records: R +} + +impl Batch where R: BatchRecords { + + pub fn get_mut_header(&mut self) -> &mut BatchHeader { + &mut self.header + } + + pub fn get_header(&self) -> &BatchHeader { + &self.header + } + + pub fn get_base_offset(&self) -> Offset { + self.base_offset + } + + pub fn set_base_offset(&mut self,offset: Offset) { + self.base_offset = offset; + } + + pub fn base_offset(mut self, offset: Offset) -> Self { + self.base_offset = offset; + self + } + + pub fn set_offset_delta(&mut self,delta: i32) { + self.header.last_offset_delta = delta; + } + + pub fn get_last_offset(&self) -> Offset { + self.get_base_offset() + self.get_last_offset_delta() as Offset + } + + + /// get last offset delta + pub fn get_last_offset_delta(&self) -> Size { + self.get_header().last_offset_delta as Size + } + + /// decode from buf stored in the file + /// read all excluding records + pub fn decode_from_file_buf(&mut self, src: &mut T,version: Version) -> Result<(), Error> where T: Buf, + { + trace!("decoding premable"); + self.base_offset.decode(src,version)?; + self.batch_len.decode(src,version)?; + self.header.decode(src,version)?; + Ok(()) + } + + +} + + + + +impl Batch { + + + /// add new record, this will update the offset to correct + pub fn add_record(&mut self,mut record: DefaultRecord) { + let last_offset_delta = if self.records.len() == 0 { 0 } else { self.records.len() as Offset }; + record.preamble.set_offset_delta(last_offset_delta); + self.header.last_offset_delta = last_offset_delta as i32; + self.records.push(record) + } + +} + + + +impl Decoder for Batch where R: BatchRecords { + + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> where T: Buf, + { + trace!("decoding batch"); + self.decode_from_file_buf(src,version)?; + self.records.decode(src,version)?; + Ok(()) + } +} + + + +// Record batch contains 12 bytes of pre-amble plus header + records +impl Encoder for Batch where R: BatchRecords { + + fn write_size(&self,version: Version) -> usize { + BATCH_PREAMBLE_SIZE + BATCH_HEADER_SIZE + self.records.write_size(version) + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> where T: BufMut + { + trace!("Encoding Batch"); + self.base_offset.encode(dest,version)?; + let batch_len: i32 = (BATCH_HEADER_SIZE + self.records.write_size(version)) as i32; + batch_len.encode(dest,version)?; + + // encode parts of header + self.header.partition_leader_epoch.encode(dest,version)?; + self.header.magic.encode(dest,version)?; + + + let mut out: Vec = Vec::new(); + let buf = &mut out; + self.header.attributes.encode(buf,version)?; + self.header.last_offset_delta.encode(buf,version)?; + self.header.first_timestamp.encode(buf,version)?; + self.header.max_time_stamp.encode(buf,version)?; + self.header.producer_id.encode(buf,version)?; + self.header.producer_epoch.encode(buf,version)?; + self.header.first_sequence.encode(buf,version)?; + self.records.encode(buf,version)?; + + let crc = crc32c::crc32c(&out); + crc.encode(dest,version)?; + dest.put_slice(&out); + Ok(()) + } +} + + +#[derive(Debug,Decode)] +pub struct BatchHeader { + pub partition_leader_epoch: i32, + pub magic: i8, + pub crc: u32, + pub attributes: i16, + pub last_offset_delta: i32, + pub first_timestamp: i64, + pub max_time_stamp: i64, + pub producer_id: i64, + pub producer_epoch: i16, + pub first_sequence: i32, +} + + +impl Default for BatchHeader { + + fn default() -> Self { + BatchHeader { + partition_leader_epoch: -1, + magic: 2, + crc: 0, + attributes: 0, + last_offset_delta: 0, + first_timestamp: 0, + max_time_stamp: 0, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1, + } + } + +} + +#[allow(dead_code)] +pub const BATCH_HEADER_SIZE: usize = + size_of::() // partition leader epoch + + size_of::() // magic + + size_of::() //crc + + size_of::() // i16 + + size_of::() // last offset delta + + size_of::() // first_timestamp + + size_of::() // max_time_stamp + + size_of::() //producer id + + size_of::() // produce_epoch + + size_of::(); // first sequence + + + +#[cfg(test)] +mod test { + + + use std::io::Cursor; + use std::io::Error as IoError; + + use kf_protocol::Decoder; + use kf_protocol::Encoder; + + use crate::DefaultRecord; + use crate::DefaultBatch; + + + #[test] + fn test_encode_and_decode_batch() -> Result<(),IoError> { + + let record: DefaultRecord = vec![0x74,0x65,0x73,0x74].into(); + let mut batch = DefaultBatch::default(); + batch.records.push(record); + batch.header.first_timestamp = 1555478494747; + batch.header.max_time_stamp = 1555478494747; + + let bytes = batch.as_bytes(0)?; + println!("batch raw bytes: {:#X?}",bytes.as_ref()); + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + println!("batch: {:#?}",batch); + + let decoded_record = batch.records.get(0).unwrap(); + println!("record crc: {}",batch.header.crc); + assert_eq!(batch.header.crc, 1514417201); + if let Some(ref b) = decoded_record.value.inner_value_ref() { + assert_eq!(b.as_slice(),"test".to_owned().as_bytes()); + } else { + assert!(false); + } + + + Ok(()) + + } + + /* raw batch encoded + + 0000 02 00 00 00 45 00 00 c7 00 00 40 00 40 06 00 00 + 0010 c0 a8 07 30 c0 a8 07 30 d1 b9 23 84 29 ba 3d 48 + 0020 0b 13 89 98 80 18 97 62 90 6a 00 00 01 01 08 0a + 0030 1e 6f 09 0d 1e 6f 09 06 00 00 00 8f 00 00 00 05 + 0040 00 00 00 03 00 10 63 6f 6e 73 6f 6c 65 2d 70 72 + 0050 6f 64 75 63 65 72 ff ff 00 01 00 00 05 dc 00 00 + 0060 00 01 00 13 6d 79 2d 72 65 70 6c 69 63 61 74 65 + 0070 64 2d 74 6f 70 69 63 00 00 00 01 00 00 00 00 00 + 0080 00 00 48 00 00 00 00 00 00 00 00 00 00 00 3c ff + 0090 ff ff ff 02 5a 44 2c 31 00 00 00 00 00 00 00 00 + 00a0 01 6a 29 be 3e 1b 00 00 01 6a 29 be 3e 1b ff ff + 00b0 ff ff ff ff ff ff ff ff ff ff ff ff 00 00 00 01 + 00c0 14 00 00 00 01 08 74 65 73 74 00 + */ + + #[test] + fn test_records_offset() { + + let mut batch = DefaultBatch::default(); + + batch.add_record(DefaultRecord::default()); + batch.add_record(DefaultRecord::default()); + batch.add_record(DefaultRecord::default()); + + assert_eq!(batch.records.get(0).expect("index 0 should exists").get_offset_delta(),0); + assert_eq!(batch.records.get(1).expect("index 1 should exists").get_offset_delta(),1); + assert_eq!(batch.records.get(2).expect("index 2 should exists").get_offset_delta(),2); + assert_eq!(batch.get_last_offset_delta(),2); + + } + + +} + + + diff --git a/kf-protocol/kf-protocol-api/src/common.rs b/kf-protocol/kf-protocol-api/src/common.rs new file mode 100644 index 0000000000..0879975e8c --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/common.rs @@ -0,0 +1,17 @@ +use serde::{Serialize, Deserialize}; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + +#[derive(Debug, Encode, Serialize, Deserialize, Decode, Clone)] +#[repr(u8)] +pub enum Isolation { + ReadUncommitted = 0, + ReadCommitted = 1, +} + +impl Default for Isolation { + fn default() -> Self { + Isolation::ReadUncommitted + } +} diff --git a/kf-protocol/kf-protocol-api/src/error.rs b/kf-protocol/kf-protocol-api/src/error.rs new file mode 100644 index 0000000000..a4b93d11de --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/error.rs @@ -0,0 +1,307 @@ +use utils::string_helper::upper_cammel_case_to_sentence; +use serde::{Serialize, Deserialize}; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + +/// kafka error +/// https://kafka.apache.org/protocol#protocol_types + +#[repr(i16)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize, Encode, Decode)] +pub enum ErrorCode { + // The server experienced an unexpected error when processing the request + UnknownServerError = -1, + + None = 0, + + // The requested offset is not within the range of offsets maintained by the server. + OffsetOutOfRange = 1, + + // This message has failed its CRC checksum, exceeds the valid size, or is otherwise corrupt. + CorruptMessage = 2, + + //This server does not host this topic-partition. + UnknownTopicOrPartition = 3, + + // The requested fetch size is invalid. + InvalidFetchSize = 4, + + // There is no leader for this topic-partition as we are in the middle of a leadership election. + LeaderNotAvailable = 5, + + // This server is not the leader for that topic-partition. + NotLeaderForPartition = 6, + + // The request timed out. + RequestTimedOut = 7, + + // The broker is not available. + BrokerNotAvailable = 8, + + // The replica is not available for the requested topic-partition + ReplicaNotAvailable = 9, + + // The request included a message larger than the max message size the server will accept. + MessageTooLarge = 10, + + // The controller moved to another broker. + StaleControllerEpoch = 11, + + // The metadata field of the offset request was too large. + OffsetMetadataTooLarge = 12, + + // The server disconnected before a response was received. + NetworkException = 13, + + // The coordinator is loading and hence can't process requests. + CoordinatorLoadInProgress = 14, + + // The coordinator is not available. + CoordinatorNotAvailable = 15, + + // This is not the correct coordinato + NotCoordinator = 16, + + // The request attempted to perform an operation on an invalid topic. + InvalidTopicException = 17, + + // The request included message batch larger than the configured segment size on the server. + RecordListTooLarge = 18, + + // Messages are rejected since there are fewer in-sync replicas than required. + NotEnoughReplicas = 19, + + // Messages are written to the log, but to fewer in-sync replicas than required. + NotEnougReplicasAfterAppend = 20, + + // Produce request specified an invalid value for required acks. + InvalidRequiredAcks = 21, + + // Specified group generation id is not valid + IllegalGeneration = 22, + + // The group member's supported protocols are incompatible with those of existing members or first group member tried to join with empty protocol type or empty protocol list. + InconsistentGroupProtocol = 23, + + // The configured groupId is invalid + InvalidGroupId = 24, + + // The coordinator is not aware of this member. + UnknownMemberId = 25, + + // The session timeout is not within the range allowed by the broker (as configured by group.min.session.timeout.ms and group.max.session.timeout.ms). + InvalidSessionTimeout = 26, + + // The group is rebalancing, so a rejoin is needed. + RebalanceInProgress = 27, + + // The committing offset data size is not valid + InvalidCommitOffsetSize = 28, + + // Not authorized to access topics: [Topic authorization failed.] + TopicAuthorizationFailed = 29, + + // Not authorized to access group: Group authorization failed. + GroupAuthorizationFailed = 30, + + // Cluster authorization failed. + ClusterAuthorizationFailed = 31, + + // The timestamp of the message is out of acceptable range. + InvalidTimestamp = 32, + + // The broker does not support the requested SASL mechanism. + UnsupportedSaslMechanism = 33, + + // Request is not valid given the current SASL state. + IllegalSaslState = 34, + + // The version of API is not supported. + UnsupportedVersion = 35, + + // Topic with this name already exists. + TopicAlreadyExists = 36, + + // Number of partitions is invalid. + InvalidPartitions = 37, + + // Replication-factor is invalid. + InvalidReplicationFactor = 38, + + // Replica assignment is invalid. + InvalidReplicaAssignment = 39, + + // Configuration is invalid. + InvalidConfig = 40, + + // This is not the correct controller for this cluster. + NotController = 41, + + // This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details + InvalidRequest = 42, + + // The message format version on the broker does not support the request. + UnsupportedForMessageFormat = 43, + + // Request parameters do not satisfy the configured policy. + PolicyViolation = 44, + + // The broker received an out of order sequence number + OutOfOrderSequenceNumber = 45, + + // The broker received a duplicate sequence number + DuplicateSequenceNumber = 46, + + // Producer attempted an operation with an old epoch. Either there is a newer producer with the same transactionalId, or the producer's transaction has been expired by the broker. + InvalidProducerEpoch = 47, + + // The producer attempted a transactional operation in an invalid state + InvalidTxnState = 48, + + // The producer attempted to use a producer id which is not currently assigned to its transactional id + InvalidProducerIdMapping = 49, + + // The transaction timeout is larger than the maximum value allowed by the broker (as configured by transaction.max.timeout.ms). + InvalidTransactionTimeout = 50, + + // The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing + ConcurrentTransactions = 51, + + // Indicates that the transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer + TransactionCoordinatorFenced = 52, + + // Transactional Id authorization failed + TransactionalIdAuthorizationFailed = 53, + + // Security features are disabled. + SecurityDisabled = 54, + + // The broker did not attempt to execute this operation. This may happen for batched RPCs where some operations in the batch failed, causing the broker to respond without trying the rest. + OperationNotAttempted = 55, + + // Disk error when trying to access log file on the disk. + KafkaStorageError = 56, + + // The user-specified log directory is not found in the broker config. + LogDirNotFound = 57, + + // SASL Authentication failed + SaslAuthenticationFailed = 58, + + // This exception is raised by the broker if it could not locate the producer metadata associated with the producerId in question. This could happen if, for instance, the producer's records were deleted because their retention time had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, and future appends by the producer will return this exception. + UnknownProducerId = 59, + + // A partition reassignment is in progress + ReassignmentInProgress = 60, + + // Delegation Token feature is not enabled. + DelegationTokenAuthDisabled = 61, + + // Delegation Token is not found on server. + DelegationTokenNotFound = 62, + + // Specified Principal is not valid Owner/Renewer. + DelegationTokenOwnerMismatch = 63, + + // Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels. + DelegationTokenRequestNotAllowed = 64, + + // Delegation Token authorization failed. + DelegationTokenAuthorizationFailed = 65, + + // Delegation Token is expired. + DelegationTokenExpired = 66, + + // Supplied principalType is not supported + InvalidPrincipleType = 67, + + // The group The group is not empty is not empty + NonEmptyGroup = 68, + + // The group id The group id does not exist was not found + GroupIdNotFound = 69, + + // The fetch session ID was not found + FetchSessionIdNotFound = 70, + + // The fetch session epoch is invalid + InvalidFetchSessionEpoch = 71, + + // There is no listener on the leader broker that matches the listener + // on which metadata request was processed. + ListenerNotFound = 72, + + // Topic deletion is disabled + TopicDeletionDisabled = 73, + + // The leader epoch in the request is older than the epoch on the broker + FencedLeaderEpoch = 74, + + // The leader epoch in the request is newer than the epoch on the broker + UnknownLeaderEpoch = 75, + + // The requesting client does not support the compression type of given partition + UnsupportedCompressionType = 76, + + // Broker epoch has changed + StaleBrokerEpoch = 77, + + // The leader high watermark has not caught up from a recent leader election + // so the offsets cannot be guaranteed to be monotonically increasing + OffsetNotAvailable = 78, + + // The group member needs to have a valid member id before actually + // entering a consumer group + MemberIdRequired = 79, + + // The preferred leader was not available + PreferredLeaderNotAvailable = 80, + + // Consumer group The consumer group has reached maximum number of members allowed + GroupMaxSizeReached = 81, +} + +impl Default for ErrorCode { + fn default() -> ErrorCode { + ErrorCode::None + } +} + +impl ErrorCode { + pub fn to_string(&self) -> String { + match self { + ErrorCode::None => "Ok".to_owned(), + _ => format!("{:?}", self), + } + } + + pub fn to_sentence(&self) -> String { + match self { + ErrorCode::None => "".to_owned(), + _ => upper_cammel_case_to_sentence(format!("{:?}", self), false), + } + } + + pub fn is_error(&self) -> bool { + match self { + ErrorCode::None => false, + _ => true, + } + } +} + +#[cfg(test)] +mod test { + + use std::convert::TryInto; + + use super::ErrorCode; + #[test] + fn test_error_code_from_conversion() { + let val: i16 = 6; + let erro_code: ErrorCode = val.try_into().expect("convert"); + assert_eq!(erro_code, ErrorCode::NotLeaderForPartition); + } + +} diff --git a/kf-protocol/kf-protocol-api/src/flv_errors.rs b/kf-protocol/kf-protocol-api/src/flv_errors.rs new file mode 100644 index 0000000000..37893687ca --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/flv_errors.rs @@ -0,0 +1,79 @@ +//! +//! # Fluvio Error Codes +//! +//! Error code definitions described here. +//! +use serde::Serialize; +use utils::string_helper::upper_cammel_case_to_sentence; + +use kf_protocol_derive::Encode; +use kf_protocol_derive::Decode; + +// ----------------------------------- +// Error Definition & Implementation +// ----------------------------------- + +#[repr(i16)] +#[derive(Encode, Decode, PartialEq, Debug, Clone, Copy, Serialize)] +pub enum FlvErrorCode { + // Not an error + None = 0, + + // Spu errors + SpuError = 1, + SpuRegisterationFailed = 2, + SpuOffline = 3, + SpuNotFound = 4, + SpuAlreadyExists = 5, + + // Topic errors + TopicError = 6, + TopicNotFound = 7, + TopicAlreadyExists = 8, + TopicPendingInitialization = 9, + TopicInvalidConfiguration = 10, + + // Partition errors + PartitionPendingInitialization = 11, + PartitionNotLeader = 12, +} + +impl Default for FlvErrorCode { + fn default() -> FlvErrorCode { + FlvErrorCode::None + } +} + +impl FlvErrorCode { + pub fn to_sentence(&self) -> String { + match self { + FlvErrorCode::None => "".to_owned(), + _ => upper_cammel_case_to_sentence(format!("{:?}", self), true), + } + } + + pub fn is_error(&self) -> bool { + match self { + FlvErrorCode::None => false, + _ => true, + } + } +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +mod test { + + use std::convert::TryInto; + + use super::FlvErrorCode; + + #[test] + fn test_flv_error_code_from_conversion() { + let erro_code: FlvErrorCode = (2 as i16).try_into().expect("convert"); + assert_eq!(erro_code, FlvErrorCode::SpuRegisterationFailed); + } +} diff --git a/kf-protocol/kf-protocol-api/src/group_assigment.rs b/kf-protocol/kf-protocol-api/src/group_assigment.rs new file mode 100644 index 0000000000..069b50b610 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/group_assigment.rs @@ -0,0 +1,225 @@ +use std::io::Error; +use std::io::ErrorKind; + +use serde::{Serialize, Deserialize}; + +use kf_protocol::{Encoder, Decoder}; +use kf_protocol::bytes::{BufMut, Buf}; +use kf_protocol::Version; + +// ----------------------------------- +// GroupAssignment +// ----------------------------------- + +/* +Reverse Engineered + + GroupAssignment { + // 0x00, 0x00, 0x00, 0x10 + // [ 16 ] byte array length + len: i32, + + // 0x00, 0x00, ?? + reserved_i16: i16, + + // 0x00, 0x00, 0x00, 0x01 + // [ 1 ] topics array length + // 0x00, 0x04, 0x74, 0x65, 0x73, 0x74 + // [ len ] [ t e s t] + topics: Vec, + + pub reserved_i32: i32, // defaults to 1 + pub reserved_i64: i64, + } + +*/ + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct GroupAssignment { + pub content: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct Assignment { + + pub reserved_i16: i16, + + pub topics: Vec, + + pub reserved_i32: i32, + + pub reserved_i64: i64, +} + +impl Encoder for GroupAssignment { + fn write_size(&self, version: Version) -> usize { + let mut len = if let Some(content) = &self.content { + content.reserved_i16.write_size(version) + + content.topics.write_size(version) + + content.reserved_i32.write_size(version) + + content.reserved_i64.write_size(version) + } else { + 0 + }; + len += 4; + len + } + + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for length", + )); + } + + let length = (self.write_size(version) as i32) - 4; + length.encode(dest, version)?; + if let Some(content) = &self.content { + content.reserved_i16.encode(dest, version)?; + content.topics.encode(dest, version)?; + content.reserved_i32.encode(dest, version)?; + content.reserved_i64.encode(dest, version)?; + } + + Ok(()) + } +} + +impl Decoder for GroupAssignment { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for i32", + )); + } + + let mut len: i32 = 0; + len.decode(src, version)?; + if len > 0 { + if src.remaining() < len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf to decode metadata", + )); + } + + let mut reserved_i16: i16 = 0; + let mut topics: Vec = vec![]; + let mut reserved_i32: i32 = 1; + let mut reserved_i64: i64 = 0; + + reserved_i16.decode(src, version)?; + topics.decode(src, version)?; + reserved_i32.decode(src, version)?; + reserved_i64.decode(src, version)?; + + let assignment = Assignment { + reserved_i16, + topics, + reserved_i32, + reserved_i64, + }; + *self = Self { + content: Some(assignment), + }; + } + + Ok(()) + } +} + +// ----------------------------------- +// Test Cases +// ----------------------------------- + +#[cfg(test)] +mod test { + use std::io::Cursor; + + use super::*; + + #[test] + fn test_group_assignment_decoding() { + let data = [ + 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x74, 0x65, + 0x73, 0x74, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + let mut value = GroupAssignment::default(); + let mut cursor = &mut Cursor::new(data); + let result = value.decode(&mut cursor, 4); + assert!(result.is_ok()); + + let assignment = Assignment { + reserved_i16: 0, + topics: vec!["test".to_owned()], + reserved_i32: 1, + reserved_i64: 0, + }; + let expected_value = GroupAssignment { + content: Some(assignment), + }; + + assert_eq!(value, expected_value); + } + + #[test] + fn test2_group_assignment_decoding() { + let data = [ + 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x74, 0x65, + 0x73, 0x74, 0x32, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + ]; + + let mut value = GroupAssignment::default(); + let mut cursor = &mut Cursor::new(data); + let result = value.decode(&mut cursor, 4); + assert!(result.is_ok()); + + let assignment = Assignment { + reserved_i16: 0, + topics: vec!["test2".to_owned()], + reserved_i32: 1, + reserved_i64: 0, + }; + let expected_value = GroupAssignment { + content: Some(assignment), + }; + + assert_eq!(value, expected_value); + } + + #[test] + fn test_group_assignment_encoding() { + let mut data: Vec = vec![]; + let exected_data = [ + 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x74, 0x65, + 0x73, 0x74, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + let assignment = Assignment { + reserved_i16: 0, + topics: vec!["test".to_owned()], + reserved_i32: 1, + reserved_i64: 0, + }; + + let assignment = GroupAssignment { + content: Some(assignment), + }; + + let result = assignment.encode(&mut data, 4); + assert!(result.is_ok()); + + assert_eq!(data, exected_data); + } + +} diff --git a/kf-protocol/kf-protocol-api/src/group_protocol_metadata.rs b/kf-protocol/kf-protocol-api/src/group_protocol_metadata.rs new file mode 100644 index 0000000000..faaaa65d34 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/group_protocol_metadata.rs @@ -0,0 +1,210 @@ +use std::io::Error; +use std::io::ErrorKind; + +use serde::{Serialize, Deserialize}; + +use kf_protocol::{Encoder, Decoder}; +use kf_protocol::bytes::{BufMut, Buf}; +use kf_protocol::Version; + +// ----------------------------------- +// ProtocolMetadata +// ----------------------------------- + +/* +Reverse Engineered + + ProtocolMetadata { + // 0x00, 0x00, 0x00, 0x10 + // [ 16 ] byte array length + len: i32, + + // 0x00, 0x00, ?? + reserved_i16: i16, + + // 0x00, 0x00, 0x00, 0x01 + // [ 1 ] topics array length + // 0x00, 0x04, 0x74, 0x65, 0x73, 0x74 + // [ len ] [ t e s t] + topics: Vec, + + pub reserved_i32: i32, + } + +*/ + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct ProtocolMetadata { + pub content: Option, +} + +#[derive(Debug, Serialize, Deserialize, Default, PartialEq)] +pub struct Metadata { + pub reserved_i16: i16, + pub topics: Vec, + pub reserved_i32: i32, +} + +impl Encoder for ProtocolMetadata { + fn write_size(&self, version: Version) -> usize { + let mut len = if let Some(content) = &self.content { + content.reserved_i16.write_size(version) + + content.topics.write_size(version) + + content.reserved_i32.write_size(version) + } else { + 0 + }; + len += 4; + len + } + + fn encode(&self, dest: &mut T, version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for length", + )); + } + + let length = (self.write_size(version) as i32) - 4; + length.encode(dest, version)?; + if let Some(content) = &self.content { + content.reserved_i16.encode(dest, version)?; + content.topics.encode(dest, version)?; + content.reserved_i32.encode(dest, version)?; + } + + Ok(()) + } +} + +impl Decoder for ProtocolMetadata { + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for i32", + )); + } + + let mut len: i32 = 0; + len.decode(src, version)?; + if len > 0 { + if src.remaining() < len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf to decode metadata", + )); + } + + let mut reserved_i16: i16 = 0; + let mut topics: Vec = vec![]; + let mut reserved_i32: i32 = 0; + + reserved_i16.decode(src, version)?; + topics.decode(src, version)?; + reserved_i32.decode(src, version)?; + + let metadata = Metadata { + reserved_i16, + topics, + reserved_i32, + }; + + *self = Self { + content: Some(metadata), + }; + } + Ok(()) + } +} + +// ----------------------------------- +// Test Cases +// ----------------------------------- + +#[cfg(test)] +mod test { + use std::io::Cursor; + + use super::*; + + #[test] + fn test_group_protocol_metadata_decoding() { + let data = [ + 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x74, 0x65, + 0x73, 0x74, 0x00, 0x00, 0x00, 0x00, + ]; + + let mut value = ProtocolMetadata::default(); + let mut cursor = &mut Cursor::new(data); + let result = value.decode(&mut cursor, 4); + assert!(result.is_ok()); + + let metadata = Metadata { + reserved_i16: 0, + topics: vec!["test".to_owned()], + reserved_i32: 0, + }; + let expected_value = ProtocolMetadata { + content: Some(metadata), + }; + + assert_eq!(value, expected_value); + } + + #[test] + fn test2_metadata_group_decoding() { + let data = [ + 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x74, 0x65, + 0x73, 0x74, 0x32, 0x00, 0x00, 0x00, 0x00, + ]; + + let mut value = ProtocolMetadata::default(); + let mut cursor = &mut Cursor::new(data); + let result = value.decode(&mut cursor, 4); + assert!(result.is_ok()); + + let metadata = Metadata { + reserved_i16: 0, + topics: vec!["test2".to_owned()], + reserved_i32: 0, + }; + let expected_value = ProtocolMetadata { + content: Some(metadata), + }; + + assert_eq!(value, expected_value); + } + + #[test] + fn test_metadata_group_encoding() { + let mut data: Vec = vec![]; + let exected_data = [ + 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x74, 0x65, + 0x73, 0x74, 0x00, 0x00, 0x00, 0x00, + ]; + + let metadata = Metadata { + reserved_i16: 0, + topics: vec!["test".to_owned()], + reserved_i32: 0, + }; + + let protocol = ProtocolMetadata { + content: Some(metadata), + }; + + let result = protocol.encode(&mut data, 4); + assert!(result.is_ok()); + + assert_eq!(data, exected_data); + } + +} diff --git a/kf-protocol/kf-protocol-api/src/kf_api.rs b/kf-protocol/kf-protocol-api/src/kf_api.rs new file mode 100644 index 0000000000..60a2f054af --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/kf_api.rs @@ -0,0 +1,96 @@ +use kf_protocol_derive::Encode; +use kf_protocol_derive::Decode; + +#[derive(PartialEq, Debug, Clone, Copy, Encode, Decode)] +#[repr(u16)] +pub enum AllKfApiKey { + Produce = 0, + Fetch = 1, + ListOffsets = 2, + Metadata = 3, + LeaderAndIsr = 4, + StopReplica = 5, + UpdateMetadata = 6, + ControlShutdown = 7, + OffsetCommit = 8, + OffsetFetch = 9, + FindCoordinator = 10, + JoinGroup = 11, + Heartbeat = 12, + LeaveGroup = 13, + SyncGroup = 14, + DescribeGroup = 15, + ListGroups = 16, + SaslHandshake = 17, + ApiVersion = 18, + CreateTopics = 19, + DeleteTopics = 20, + DeleteRecords = 21, + InitProducerId = 22, + OffsetForLeaderEpoch = 23, + AddPartitionsToTxn = 24, + AddOffsetsToTxn = 25, + EndTxn = 26, + WriteTxnMarkers = 27, + TxnOffsetCommit = 28, + DescribeAcls = 29, + CreateAcls = 30, + DeleteAcls = 31, + DescribeConfigs = 32, + AlterConfigs = 33, + AlterReplicaLogDirs = 34, + DescribeLogDirs = 35, + SaslAuthenticate = 36, + CreatePartitions = 37, + CreateDelegationToken = 38, + RenewDelegationToken = 39, + ExpireDelegationToken = 40, + DescribeDelegationToken = 41, + DeleteGroups = 42, +} + +impl Default for AllKfApiKey { + fn default() -> AllKfApiKey { + AllKfApiKey::ApiVersion + } +} + +#[cfg(test)] +mod test { + + use crate::AllKfApiKey; + use kf_protocol::Decoder; + use kf_protocol::Encoder; + use std::io::Cursor; + + #[test] + fn test_decode_enum_not_enough() { + let data = [0x11]; // only one value + + let mut value = AllKfApiKey::ApiVersion; + let result = value.decode(&mut Cursor::new(&data), 0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_api_enum() { + let data = [0x00, 0x03]; + + let mut value = AllKfApiKey::Metadata; + let result = value.decode(&mut Cursor::new(&data), 0); + assert!(result.is_ok()); + assert_eq!(value, AllKfApiKey::Metadata); + } + + #[test] + fn test_encode_enum() { + let mut src = vec![]; + let value = AllKfApiKey::Metadata; + let result = value.encode(&mut src, 0); + assert!(result.is_ok()); + assert_eq!(src.len(), 2); + assert_eq!(src[0], 0x00); + assert_eq!(src[1], AllKfApiKey::Metadata as u8); + } + +} diff --git a/kf-protocol/kf-protocol-api/src/lib.rs b/kf-protocol/kf-protocol-api/src/lib.rs new file mode 100644 index 0000000000..67cc03a2a9 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/lib.rs @@ -0,0 +1,53 @@ +mod api; +mod kf_api; +mod request; +mod response; +mod error; +mod batch; +mod record; +mod common; +mod group_protocol_metadata; +mod group_assigment; +mod flv_errors; + +pub type Offset = i64; +pub type Size = u32; + +pub use self::api::KfApiKey; +pub use self::api::KfRequestMessage; +pub use self::api::Request; +pub use self::api::RequestHeader; +pub use self::kf_api::AllKfApiKey; +pub use self::request::RequestMessage; + +pub use self::response::ResponseMessage; + +pub use self::batch::Batch; +pub use self::batch::BatchRecords; +pub use self::batch::DefaultBatch; +pub use self::batch::DefaultBatchRecords; +pub use self::record::DefaultRecord; +pub use self::record::DefaultRecords; +pub use self::record::Record; +pub use self::record::RecordHeader; +pub use self::batch::BATCH_HEADER_SIZE; +pub use self::batch::BATCH_PREAMBLE_SIZE; +pub use self::group_protocol_metadata::ProtocolMetadata; +pub use self::group_protocol_metadata::Metadata; +pub use self::group_assigment::GroupAssignment; +pub use self::group_assigment::Assignment; +pub use self::common::*; + +pub use self::error::ErrorCode; +pub use self::flv_errors::FlvErrorCode; + +pub const MAX_BYTES: i32 = 52428800; + +#[macro_export] +macro_rules! api_decode { + ($api:ident,$req:ident,$src:expr,$header:expr) => {{ + use kf_protocol::Decoder; + let request = $req::decode_from($src, $header.api_version())?; + Ok($api::$req(RequestMessage::new($header, request))) + }}; +} diff --git a/kf-protocol/kf-protocol-api/src/record.rs b/kf-protocol/kf-protocol-api/src/record.rs new file mode 100644 index 0000000000..0032117ea5 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/record.rs @@ -0,0 +1,417 @@ + +use std::fmt; +use std::fmt::Debug; +use std::fmt::Display; +use std::io::Error; +use std::io::ErrorKind; + +use log::trace; +use content_inspector::{ContentType, inspect}; + +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; + +use kf_protocol::Decoder; +use kf_protocol::DecoderVarInt; +use kf_protocol::Encoder; +use kf_protocol::EncoderVarInt; +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol::Version; + +use crate::Offset; +use crate::DefaultBatch; + + +pub type DefaultRecord = Record; + +/// slice that can works in Async Context +pub trait AsyncBuffer { + fn len(&self) -> usize; +} + +pub trait Records {} + +#[derive(Default)] +pub struct DefaultAsyncBuffer(Option>); + +impl DefaultAsyncBuffer { + pub fn new(val: Option>) -> Self { + DefaultAsyncBuffer(val) + } + + pub fn inner_value(self) -> Option> { + self.0 + } + + pub fn inner_value_ref(&self) -> &Option> { + &self.0 + } + + pub fn len(&self) -> usize { + if self.0.is_some() { + self.0.as_ref().unwrap().len() + } else { + 0 + } + } + + /// Check if value is binary content + pub fn is_binary(&self) -> bool { + if let Some(value) = self.inner_value_ref() { + match inspect(value) { + ContentType::BINARY => true, + _ => false, + } + } else { + false + } + } + + /// Describe value - return text, binary, or 0 bytes + pub fn describe(&self) -> String { + if self.inner_value_ref().is_some() { + if self.is_binary() { + format!("binary: ({} bytes)", self.len()) + } else { + format!("text: '{}'", self) + } + } else { + format!("empty: (0 bytes)") + } + } + +} + +impl From>> for DefaultAsyncBuffer { + fn from(val: Option>) -> Self { + Self::new(val) + } +} + +impl AsyncBuffer for DefaultAsyncBuffer { + fn len(&self) -> usize { + match self.0 { + Some(ref val) => val.len(), + None => 0, + } + } +} + +impl Debug for DefaultAsyncBuffer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + Some(ref val) => write!(f, "{:?}", String::from_utf8_lossy(val)), + None => write!(f, "no values"), + } + } +} + +impl Display for DefaultAsyncBuffer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.0 { + Some(ref val) => write!(f, "{}", String::from_utf8_lossy(val)), + None => write!(f, ""), + } + } +} + +impl From for DefaultAsyncBuffer { + fn from(value: String) -> Self { + Self(Some(value.into_bytes())) + } +} + +impl From> for DefaultAsyncBuffer { + fn from(value: Vec) -> Self { + Self(Some(value)) + } +} + +impl Encoder for DefaultAsyncBuffer { + fn write_size(&self,_version: Version) -> usize { + self.0.var_write_size() + } + + fn encode(&self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + self.0.encode_varint(src)?; + + Ok(()) + } +} + +impl Decoder for DefaultAsyncBuffer { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + trace!("decoding default asyncbuffer"); + self.0.decode_varint(src)?; + trace!("value: {:#?}", self); + Ok(()) + } +} + +#[derive(Default, Debug)] +pub struct DefaultRecords { + pub batches: Vec, +} + + + +impl fmt::Display for DefaultRecords { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{} batches",self.batches.len()) + + } +} + + + +impl DefaultRecords { + pub fn add(mut self, batch: DefaultBatch) -> Self { + self.batches.push(batch); + self + } +} + +impl Decoder for DefaultRecords { + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + trace!("Decoding DefaultRecords"); + let mut len: i32 = 0; + len.decode(src,version)?; + trace!("recordsets len: {}", len); + + if src.remaining() < len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for batches", + )); + } + + let mut buf = src.take(len as usize); + while buf.remaining() > 0 { + trace!("decoding batches"); + let mut batch = DefaultBatch::default(); + batch.decode(&mut buf,version)?; + self.batches.push(batch) + } + + if buf.remaining() > 0 { + return Err(Error::new( + ErrorKind::InvalidData, + "not enough buf for batches", + )); + } + Ok(()) + } +} + +impl Encoder for DefaultRecords { + fn write_size(&self,version: Version) -> usize { + self.batches + .iter() + .fold(4, |sum, val| sum + val.write_size(version)) + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + trace!("encoding Default Records"); + + let mut out: Vec = Vec::new(); + + for batch in &self.batches { + trace!("decoding batch.."); + batch.encode(&mut out,version)?; + } + + let length: i32 = out.len() as i32; + trace!("recordset has {} bytes", length); + length.encode(dest,version)?; + + dest.put_slice(&mut out); + Ok(()) + } +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct RecordHeader { + attributes: i8, + #[varint] + timestamp_delta: i64, + #[varint] + offset_delta: Offset, +} + +impl RecordHeader { + pub fn set_offset_delta(&mut self,delta: Offset) { + self.offset_delta = delta; + } +} + +#[derive(Default)] +pub struct Record +where + B: Default, +{ + pub preamble: RecordHeader, + pub key: B, + pub value: B, + pub headers: i64, +} + +impl Record +where + B: Default, +{ + pub fn get_offset_delta(&self) -> Offset { + self.preamble.offset_delta + } + + pub fn get_value(&self) -> &B { + &self.value + } +} + +impl Debug for Record +where + B: AsyncBuffer + Debug + Default, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "{:?}", &self.preamble)?; + writeln!(f, "{:?}", &self.key)?; + writeln!(f, "{:?}", &self.value)?; + write!(f, "{:?}", &self.headers) + } +} + +impl From for Record +where + B: From + Default, +{ + fn from(value: String) -> Self { + let mut record = Record::default(); + record.value = value.into(); + record + } +} + +impl From> for Record +where + B: From> + Default, +{ + fn from(value: Vec) -> Self { + let mut record = Record::default(); + record.value = value.into(); + record + } +} + + +impl Encoder for Record +where + B: Encoder + Default, +{ + fn write_size(&self,version: Version) -> usize { + let inner_size = self.preamble.write_size(version) + + self.key.write_size(version) + + self.value.write_size(version) + + self.headers.var_write_size(); + let len: i64 = inner_size as i64; + len.var_write_size() + inner_size + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + let mut out: Vec = Vec::new(); + self.preamble.encode(&mut out,version)?; + self.key.encode(&mut out,version)?; + self.value.encode(&mut out,version)?; + self.headers.encode_varint(&mut out)?; + let len: i64 = out.len() as i64; + trace!("record encode as {} bytes", len); + len.encode_varint(dest)?; + dest.put_slice(&out); + Ok(()) + } +} + +impl Decoder for Record +where + B: Decoder, +{ + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + trace!("decoding record"); + let mut len: i64 = 0; + len.decode_varint(src)?; + + trace!("record contains: {} bytes", len); + + if (src.remaining() as i64) < len { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enought for record", + )); + } + self.preamble.decode(src,version)?; + trace!("offset delta: {}", self.preamble.offset_delta); + self.key.decode(src,version)?; + self.value.decode(src,version)?; + self.headers.decode_varint(src)?; + + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use std::io::Cursor; + use std::io::Error as IoError; + + use kf_protocol::Decoder; + use kf_protocol::Encoder; + + use crate::DefaultRecord; + + #[test] + fn test_decode_encode_record() -> Result<(), IoError> { + let data = [ + 0x14, // record length of 10 + 0x00, // attributes + 0xea, 0x0e, // timestamp + 0x02, // offset delta, 1 + 0x01, // key + 0x06, 0x64, 0x6f, 0x67, // value, 3 bytes len (dog) + 0x00, // 0 header + ]; + + let record = DefaultRecord::decode_from(&mut Cursor::new(&data),0)?; + assert_eq!(record.as_bytes(0)?.len(), data.len()); + + assert_eq!(record.write_size(0), data.len()); + assert_eq!(record.get_offset_delta(), 1); + assert!(record.key.inner_value().is_none()); + let val = record.value.inner_value(); + assert!(val.is_some()); + let value = val.unwrap(); + assert_eq!(value.len(), 3); + assert_eq!(value[0], 0x64); + + Ok(()) + } + +} diff --git a/kf-protocol/kf-protocol-api/src/request.rs b/kf-protocol/kf-protocol-api/src/request.rs new file mode 100644 index 0000000000..9469a65408 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/request.rs @@ -0,0 +1,334 @@ +use std::io::Error as IoError; +use std::path::Path; +use std::fmt; +use std::fmt::Display; + +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::Version; + +use crate::Request; +use crate::RequestHeader; +use crate::ResponseMessage; + +/// Start of API request +#[derive(Debug)] +pub struct RequestMessage { + pub header: RequestHeader, + pub request: R, +} + +impl RequestMessage { + pub fn get_mut_header(&mut self) -> &mut RequestHeader { + &mut self.header + } +} + + +impl fmt::Display for RequestMessage where R: Display{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{} {}",self.header,self.request) + } +} + + +impl Default for RequestMessage +where + R: Request + Default, +{ + fn default() -> Self { + let mut header = RequestHeader::default(); + header.set_api_version(R::DEFAULT_API_VERSION); + + Self { + header, + request: R::default(), + } + } +} + +impl RequestMessage +where + R: Request, +{ + /// create with header, this assume header is constructed from higher request + /// no api key check is performed since it is already done + pub fn new(header: RequestHeader, request: R) -> Self { + Self { header, request } + } + + /// create from request, header is implicilty created from key in the request + pub fn new_request(request: R) -> Self { + let mut header = RequestHeader::new(R::API_KEY); + header.set_api_version(R::DEFAULT_API_VERSION); + + Self { header, request } + } + + pub fn get_header_request(self) -> (RequestHeader, R) { + (self.header, self.request) + } + + pub fn request(&self) -> &R { + &self.request + } + + pub fn new_response(&self, response: R::Response) -> ResponseMessage { + Self::response_with_header(&self.header, response) + } + + pub fn response_with_header(header: H, response: R::Response) -> ResponseMessage + where + H: Into, + { + ResponseMessage::new(header.into(), response) + } + + pub fn decode_response( + &self, + src: &mut T, + version: Version, + ) -> Result, IoError> + where + T: Buf, + { + ResponseMessage::decode_from(src, version) + } + + pub fn decode_response_from_file>( + &self, + file_name: H, + version: Version, + ) -> Result, IoError> { + ResponseMessage::decode_from_file(file_name, version) + } + + /// helper function to set client id + pub fn set_client_id(mut self, client_id: T) -> Self + where + T: Into, + { + self.header.set_client_id(client_id); + self + } +} + +impl Decoder for RequestMessage +where + R: Request, +{ + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), IoError> + where + T: Buf, + { + self.header.decode(src, version)?; + self.request.decode(src, self.header.api_version())?; + Ok(()) + } +} + +impl Encoder for RequestMessage +where + R: Request, +{ + fn write_size(&self, version: Version) -> usize { + self.header.write_size(version) + self.request.write_size(self.header.api_version()) + } + + fn encode(&self, out: &mut T, version: Version) -> Result<(), IoError> + where + T: BufMut, + { + let len = self.write_size(version) as i32; + trace!("encoding request len: {}", len); + len.encode(out, version)?; + + trace!("encoding request header: {:#?}", &self.header); + self.header.encode(out, version)?; + + trace!("encoding request: {:#?}", &self.request); + self.request.encode(out, self.header.api_version())?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use std::io::Cursor; + use std::io::Error as IoError; + use std::convert::TryInto; + use kf_protocol::bytes::Buf; + use kf_protocol::bytes::BufMut; + use kf_protocol::Decoder; + use kf_protocol::Encoder; + use kf_protocol::Version; + use kf_protocol_derive::Encode; + use kf_protocol_derive::Decode; + + use super::RequestHeader; + use super::RequestMessage; + use crate::KfRequestMessage; + + use crate::Request; + use crate::AllKfApiKey; + + #[derive(Decode, Encode, Debug, Default)] + pub struct ApiVersionRequest {} + + impl Request for ApiVersionRequest { + const API_KEY: u16 = AllKfApiKey::ApiVersion as u16; + + type Response = ApiVersionResponse; + } + + #[derive(Encode, Decode, Default, Debug)] + pub struct ApiVersionResponse { + pub error_code: i16, + pub api_versions: Vec, + pub throttle_time_ms: i32, + } + + #[derive(Encode, Decode, Default, Debug)] + pub struct ApiVersion { + pub api_key: i16, + pub min_version: i16, + pub max_version: i16, + } + + #[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] + #[repr(u16)] + pub enum TestApiEnum { + ApiVersion = 18, + } + + impl Default for TestApiEnum { + fn default() -> TestApiEnum { + TestApiEnum::ApiVersion + } + } + + #[test] + fn test_decode_header() -> Result<(), IoError> { + // API versions request + // API key: API Versions (18) + // API version: 1 + // correlation id: 1, + // strng length 10 + // client id: consumer-1 + let data = [ + 0x00, 0x12, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6d, 0x65, 0x72, 0x2d, 0x31, + ]; + + let header: RequestHeader = RequestHeader::decode_from(&mut Cursor::new(&data), 0)?; + + assert_eq!(header.api_key(), TestApiEnum::ApiVersion as u16); + assert_eq!(header.api_version(), 1); + assert_eq!(header.correlation_id(), 1); + assert_eq!(header.client_id(), "consumer-1"); + + Ok(()) + } + + #[test] + fn test_encode_header() { + let req_header = RequestHeader::new_with_client( + TestApiEnum::ApiVersion as u16, + String::from("consumer-1"), + ); + let expected_result = [ + 0x00, 0x12, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x73, + 0x75, 0x6d, 0x65, 0x72, 0x2d, 0x31, + ]; + + let mut result = vec![]; + let req_result = req_header.encode(&mut result, 0); + + assert!(req_result.is_ok()); + assert_eq!(result, expected_result); + } + + pub enum TestApiRequest { + ApiVersionRequest(RequestMessage), + } + + impl Default for TestApiRequest { + fn default() -> TestApiRequest { + TestApiRequest::ApiVersionRequest(RequestMessage::::default()) + } + } + + impl KfRequestMessage for TestApiRequest { + type ApiKey = TestApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + match header.api_key().try_into()? { + TestApiEnum::ApiVersion => { + let request = ApiVersionRequest::decode_from(src, header.api_version())?; + return Ok(TestApiRequest::ApiVersionRequest(RequestMessage::new( + header, request, + ))); + } + } + } + } + + impl Encoder for TestApiRequest { + fn write_size(&self, version: Version) -> usize { + match self { + TestApiRequest::ApiVersionRequest(response) => response.write_size(version), + } + } + + fn encode(&self, src: &mut T, version: Version) -> Result<(), IoError> + where + T: BufMut, + { + match self { + TestApiRequest::ApiVersionRequest(response) => { + response.encode(src, version)?; + } + } + Ok(()) + } + } + + #[test] + fn test_encode_message() { + let mut message = RequestMessage::new_request(ApiVersionRequest {}); + message + .get_mut_header() + .set_client_id("consumer-1".to_owned()) + .set_correlation_id(5); + + let mut out = vec![]; + message.encode(&mut out, 0).expect("encode work"); + let mut encode_bytes = Cursor::new(&out); + + // decode back + let mut len: i32 = 0; + len.decode(&mut encode_bytes, 0).expect("cant decode len"); + let res_msg_result: Result, IoError> = + Decoder::decode_from(&mut encode_bytes, 0); + + match res_msg_result { + Ok(msg) => { + assert_eq!(msg.header.correlation_id(), 5); + } + Err(err) => { + assert!(false, "error: {}", err); + } + } + } + +} diff --git a/kf-protocol/kf-protocol-api/src/response.rs b/kf-protocol/kf-protocol-api/src/response.rs new file mode 100644 index 0000000000..0a655f2693 --- /dev/null +++ b/kf-protocol/kf-protocol-api/src/response.rs @@ -0,0 +1,112 @@ + +use std::fs::File; +use std::io::Cursor; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::io::Read; +use std::path::Path; + +use log::debug; +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::Version; + +use crate::RequestHeader; + +#[derive(Debug, Default)] +pub struct ResponseMessage

{ + pub correlation_id: i32, + pub response: P, +} + + +impl

ResponseMessage

{ + + + pub fn from_header(header: &RequestHeader,response: P) -> Self { + Self::new(header.correlation_id(),response) + } + + pub fn new(correlation_id: i32, response: P) -> Self { + Self { + correlation_id, + response, + } + } + +} + + + +impl

ResponseMessage

+ where P: Decoder +{ + pub fn decode_from(src: &mut T,version: Version) -> Result + where T:Buf { + + let mut correlation_id: i32 = 0; + correlation_id.decode(src,version)?; + trace!("decoded correlation id: {}",correlation_id); + + let response = P::decode_from(src, version)?; + Ok(ResponseMessage { + correlation_id, + response + }) + + } + + + pub fn decode_from_file>( + file_name: H, + version: Version + ) -> Result { + + debug!("decoding from file: {:#?}", file_name.as_ref()); + let mut f = File::open(file_name)?; + let mut buffer: [u8; 1000] = [0; 1000]; + + f.read(&mut buffer)?; + let data = buffer.to_vec(); + + let mut src = Cursor::new(&data); + + let mut size: i32 = 0; + size.decode(&mut src,version)?; + trace!("decoded response size: {} bytes", size); + + if src.remaining() < size as usize { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not enought for response", + )); + } + + Self::decode_from(&mut src,version) + } + +} + +impl

Encoder for ResponseMessage

+where + P: Encoder + Default, +{ + fn write_size(&self,version: Version) -> usize { + self.correlation_id.write_size(version) + self.response.write_size(version) + } + + fn encode(&self, out: &mut T,version: Version) -> Result<(), IoError> + where + T: BufMut, + { + let len = self.write_size(version) as i32; + len.encode(out,version)?; + self.correlation_id.encode(out,version)?; + self.response.encode(out,version)?; + Ok(()) + } +} diff --git a/kf-protocol/kf-protocol-build/Cargo.toml b/kf-protocol/kf-protocol-build/Cargo.toml new file mode 100644 index 0000000000..ea9a54fe06 --- /dev/null +++ b/kf-protocol/kf-protocol-build/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "kf-protocol-build" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +structopt = { version = "0.2.14", optional = true } +serde = { version ="1.0.82", features = ['derive'] } +serde_json = "1.0.39" +tera = "0.11.20" +Inflector = "0.11.4" +rustfmt-nightly = "1.2.2" +textwrap = "0.11.0" + +[[bin]] +name = "kfspec2code" +path = "src/bin/cli.rs" +doc = false +required-features = ["cli"] + +[features] +cli = ["structopt"] \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/DEV.md b/kf-protocol/kf-protocol-build/DEV.md new file mode 100644 index 0000000000..4ad0c31095 --- /dev/null +++ b/kf-protocol/kf-protocol-build/DEV.md @@ -0,0 +1,14 @@ +# Kafka to Rust code generator + +```make build-cli``` + +You can run code generator CLI (assuming you are at back the root) + +```./target/debug/kfspec2code --help``` + +With Kafka installed in a parallel directory, run file generation + +``` +../../target/debug/kfspec2code generate -i ../../../kafka/clients/src/main/resources/common/message/ -d ../kf-protocol-message/src/kf_code_gen/ +``` + diff --git a/kf-protocol/kf-protocol-build/Makefile b/kf-protocol/kf-protocol-build/Makefile new file mode 100644 index 0000000000..b8ea98baee --- /dev/null +++ b/kf-protocol/kf-protocol-build/Makefile @@ -0,0 +1,2 @@ +build-cli: + cargo build --features=cli \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/README.md b/kf-protocol/kf-protocol-build/README.md new file mode 100644 index 0000000000..20ca426c52 --- /dev/null +++ b/kf-protocol/kf-protocol-build/README.md @@ -0,0 +1,309 @@ +# KF-Protocol-Build Code Generator + + The motivation for building generator for **Kafka Spec** to **Rust Code** is meant to reduce the burden of integrating various Kafka releases with Fluvio project. + + Code generator takes an *input* directory of **JSON** files and an *output* file or directory to store resulting **RUST** code. The conversion may be augmented with custom defined field translations through the use of map files. + +Input directory must consist of file **Request/Response** file-pairs (as published by Kafka): + +* __[API-name]Request.json__ +* __[API-name]Response.json__ + +Each file-pair is compiled together into a corresponding **kf_[api_name].rs** file. Resulting **.rs** files are placed in the output parameter provided at the command line. + +## Usage + +Install Kafka in a parallel directory + +#### Generate code + +Code generator has the ability to covert files with or without modifications and place output in a file or directory. + +``` +USAGE: + kfspec2code generate [OPTIONS] --input --output-directory + +FLAGS: + -h, --help Prints help information + +OPTIONS: + -i, --input Input directory + -m, --map-file Json file for field translations + -o, --output-directory Output directory + -f, --output-file Output file +``` + +Example code generation with directory output: + +``` +./kfspec2code generate -i ../kafka/clients/src/main/resources/common/message/ -o ~/output +``` + + +Use **Map Files** to translate individual fields during during code generation, Maps are **json** files that match fields in Kafka spec and translate them with custom definitions. Map Files have the following keys: + +* kind - {'header', 'primitiveField', 'structField'} +* match - { 'name', 'type' } +* translate - { 'type' } +* add - [ array of strings to augment 'structFiled'] + +Translate ErrorCode from type **int16** to **ErrorCode** which is represented by an enum in the Rust code base: + +``` +> vi error_code_map.json +{ + "translations": [ + { + "field": { + "match": { + "name": "ErrorCode", + "type": "int16" + }, + "translate": { + "type": "ErrorCode" + } + } + } + ] +} +:wq + +./kfspec2code generate -i ../kafka/clients/src/main/resources/common/message/ -m error_code_map.json -o ~/output +``` + +Map files may have multiple sections. In this example, **Record** is translated to an **R** trait: + +``` +{ + "translations": [ + { + "header": { + "match": { + "name": "FetchResponse", + "type": "response" + }, + "translate": { + "name": "FetchResponse" + } + } + }, + { + "field": { + "match": { + "name": "Topics", + "type": "[]FetchableTopicResponse" + }, + "translate": { + "type": "[]FetchableTopicResponse" + } + } + }, + { + "field": { + "match": { + "name": "Partitions", + "type": "[]FetchablePartitionResponse" + }, + "translate": { + "type": "[]FetchablePartitionResponse" + } + } + }, + { + "field": { + "match": { + "name": "Records", + "type": "bytes" + }, + "translate": { + "type": "R" + } + } + } + ], + "additions": [ + { + "struct": { + "names": [ + "FetchResponse", + "FetchableTopicResponse", + "FetchablePartitionResponse" + ] + }, + "add": { + "lines": [ + { + "text": "where", + "indent_level": 1 + }, + { + "text": "R: Encoder + Decoder + Default + Debug,", + "indent_level": 2 + } + ] + } + } + ] +} +``` + +Finally, multiple map files may be chained together in one code generation request + +``` +./kfspec2code generate -i ../kafka/clients/src/main/resources/common/message/ -m error_code_map.json -m fetch_responses_map.json -o ~/output +``` + +#### Check Keys + +Check if any new keys have been introduced + +``` +USAGE: + kfspec2code check-keys --input + +FLAGS: + -h, --help Prints help information + +OPTIONS: + -i, --input Input directory +``` + +For example + +``` +./kfspec2code check-keys -i ../kafka/clients/src/main/resources/common/message/ +``` + + + +## Code Generator + +Fluvio code generator interprets **Kafka Json** parameters and generates **Fluvio RS** template files that that are compiled into code by Rust template generator. + + +## Derive Templates + +Fluvio uses **#[derive(FluvioKf)]** to tell Rust to convert the API template to various API versions. Kafka derive parameters such as **#[fluvio_kf-(topic)]** informs compiler of the topics that are targeteed by the conversion. + +#### API Versions + +Each **Request** and **Response** has a mandatory ***validVersions*** parameter that details all versions supported by the API: + +``` +"validVersions": "0-3" +... +"validVersions": "0" +``` + +In Fluvio, validVersions are represented by **#fluvio_kf(...)]**: + +``` +#[fluvio_kf(api_min_version = 0, api_max_version = 3)] + +#[fluvio_kf(api_min_version = 0, api_max_version = 0)] +``` + +#### Field Versions + +Each field has a mandatory has ***versions*** parameter that defines the versions of the API that support this field: + +``` +"versions": "2-4" +... +"versions": "3+" +``` + +In Fluvio, they are represented by **#fluvio_kf(...)]**: + +``` +#[fluvio_kf(min_version = 2, max_version = 4)] + +#[fluvio_kf(min_version = 3)] +``` + +Code generator skips elements with "0+" versions. + + +#### Nullable Versions + +Kafka uses an optional parameter ***nullableVersions*** for strings and arrays. Nullable strings are also represented in documentation as: + +``` +transactional_id => NULLABLE_STRING +``` + +Nullable pameters represented in ***"(version)+"*** format + +``` +"nullableVersions": "2+ +``` + + +In Fluvio ***nullable*** is representd through ***Option*** + +``` +#[fluvio_kf(versions="1-3")] +trancation_id: Option + +#[fluvio_kf(versions="2")] +trandactions: Option> + +``` + +#### Ignorable + +Ignorable are flags that are translated into annotations + +``` +#[fluvio_kf(min_version = 2, max_version = 4, ignorable)] +``` + + +#### Example + +Code generator for for Fetch Request (only top struct displayed) + +``` +kf_api!( + #[fluvio_kf(api_min_version = 0, api_max_version = 10, api_key = 1 response = "FetchResponse")] + pub struct KfFetchRequest { + pub replica_id: i32, + pub max_wait: i32, + pub min_bytes: i32, + + #[fluvio_kf(min_version = 3, ignorable)] + pub max_bytes: i32, + + #[fluvio_kf(min_version = 4)] + pub isolation_level: i8, + + #[fluvio_kf(min_version = 7)] + pub session_id: i32, + + #[fluvio_kf(min_version = 7)] + pub epoch: i32, + pub topics: Vec, + + #[fluvio_kf(min_version = 7)] + pub forgotten: Vec, + } +} +... +``` + +## Tips + +Grab parameters from **Kafka Json** files: + +``` +grep -ho "\"versions\": *.\+" input-json/* + +grep -ho "\"nullableVersions\": *.\+" input-json/* + +grep -ho "\"ignorable\": *.\+" input-json/* + +``` + + + + diff --git a/kf-protocol/kf-protocol-build/input/.DS_Store b/kf-protocol/kf-protocol-build/input/.DS_Store new file mode 100644 index 0000000000..5008ddfcf5 Binary files /dev/null and b/kf-protocol/kf-protocol-build/input/.DS_Store differ diff --git a/kf-protocol/kf-protocol-build/src/bin/cli.rs b/kf-protocol/kf-protocol-build/src/bin/cli.rs new file mode 100644 index 0000000000..349a03dfa5 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/bin/cli.rs @@ -0,0 +1,114 @@ +//! +//! # Root CLI +//! +//! CLI configurations at the top of the tree +//! + +use std::io::Error as IoError; + +use structopt::clap::AppSettings; +use structopt::StructOpt; + +use kf_protocol_build::format_code::TemplateFormat; + +use kf_protocol_build::check_keys::check_header_and_field_keys_are_known; +use kf_protocol_build::generate_code::gen_code_and_output_to_dir; + +macro_rules! print_cli_err { + ($x:expr) => { + println!("\x1B[1;31merror:\x1B[0m {}", $x); + }; +} + +#[derive(StructOpt)] +#[structopt( + about = "KafkaSpec2Code CLI", + author = "", + raw( + global_settings = "&[AppSettings::DisableVersion, AppSettings::VersionlessSubcommands, AppSettings::DeriveDisplayOrder]" + ) +)] + +enum Root { + #[structopt( + name = "generate", + author = "", + about = "Generate Rust APIs from Kafka json specs" + )] + Generate(GenerateOpt), + + #[structopt( + name = "check-keys", + author = "", + about = "Check if any new keys have been introduced" + )] + CheckKeys(CheckKeysOpt), +} + +fn run_cli() { + let result = match Root::from_args() { + Root::Generate(generate) => process_code_generator(generate), + Root::CheckKeys(check_keys) => process_check_keys(check_keys), + }; + + if result.is_err() { + print_cli_err!(result.unwrap_err()); + } +} + +#[derive(StructOpt)] +pub struct GenerateOpt { + /// Input directory + #[structopt(short = "i", long = "input", value_name = "string")] + pub input_dir: String, + + /// Directory to output each file pair + #[structopt(short = "d", long = "output-directory", value_name = "string")] + pub output_dir: String, + + /// Skip Rust formatter (useful for troubleshooting custom templates) + #[structopt(short = "s", long = "skip-formatter")] + pub skip_formatter: bool, + + /// Use custom template + #[structopt(short = "t", long = "custom-template", value_name = "string")] + pub template_dir: Option, +} + +#[derive(StructOpt)] +pub struct CheckKeysOpt { + /// Input directory + #[structopt(short = "i", long = "input", value_name = "string")] + pub input_dir: String, +} + +// ----------------------------------- +// Code Generator +// ----------------------------------- + +/// Process code generator +pub fn process_code_generator(opt: GenerateOpt) -> Result<(), IoError> { + let template = TemplateFormat::new(opt.template_dir)?; + gen_code_and_output_to_dir( + &opt.input_dir, + &opt.output_dir, + &template, + opt.skip_formatter, + ) +} + +// ----------------------------------- +// Check Keys +// ----------------------------------- + +/// Process check keys +pub fn process_check_keys(opt: CheckKeysOpt) -> Result<(), IoError> { + check_header_and_field_keys_are_known(&opt.input_dir)?; + println!("All fields are known."); + + Ok(()) +} + +fn main() { + run_cli(); +} diff --git a/kf-protocol/kf-protocol-build/src/check_keys.rs b/kf-protocol/kf-protocol-build/src/check_keys.rs new file mode 100644 index 0000000000..3e32254de2 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/check_keys.rs @@ -0,0 +1,104 @@ +//! +//! # Check Keys +//! +//! Takes a Kafka json spec and compares against known keys +//! + +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::path::PathBuf; + +use serde_json::Value; + +use super::file_pairs::FilePairs; +use super::file_to_json::file_to_json; + +/// Check if all header and field keys are known +pub fn check_header_and_field_keys_are_known(dir: &String) -> Result<(), IoError> { + let file_pairs = FilePairs::new(dir)?; + + for file_pair in &file_pairs.pairs { + check_known_header_and_field_keys(&file_pair.req_file)?; + check_known_header_and_field_keys(&file_pair.res_file)?; + } + + Ok(()) +} + +/// Check if all Field keys are known (called recursively) +fn check_known_field_keys(fields: &Vec) -> Result<(), IoError> { + let known_field_keys = vec![ + "name".to_owned(), + "about".to_owned(), + "type".to_owned(), + "default".to_owned(), + "ignorable".to_owned(), + "mapKey".to_owned(), + "versions".to_owned(), + "nullableVersions".to_owned(), + "entityType".to_owned(), + "fields".to_owned(), + ]; + + for field in fields { + for (key, val) in field.as_object().unwrap().iter() { + if !known_field_keys.contains(key) { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("unknown field key: '{}'", key), + )); + } + + if key == "fields" { + match val.as_array() { + Some(field_val) => check_known_field_keys(field_val)?, + None => { + return Err(IoError::new( + ErrorKind::InvalidData, + "key 'fields' must be array", + )); + } + } + } + } + } + + Ok(()) +} + +/// Check if all Header keys are known +fn check_known_header_and_field_keys(file_path: &PathBuf) -> Result<(), IoError> { + let known_header_keys = vec![ + "name".to_owned(), + "validVersions".to_owned(), + "type".to_owned(), + "fields".to_owned(), + "apiKey".to_owned(), + ]; + + match file_to_json(file_path) { + Ok(val) => { + for (key, val) in val.as_object().unwrap().iter() { + if !known_header_keys.contains(key) { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("unknown header key: '{}'", key), + )); + } + if key == "fields" { + match val.as_array() { + Some(field_val) => check_known_field_keys(field_val)?, + None => { + return Err(IoError::new( + ErrorKind::InvalidData, + "key 'fields' must be array", + )); + } + } + } + } + Ok(()) + } + Err(err) => Err(IoError::new(ErrorKind::InvalidData, format!("{}", err))), + } +} diff --git a/kf-protocol/kf-protocol-build/src/constants.rs b/kf-protocol/kf-protocol-build/src/constants.rs new file mode 100644 index 0000000000..cbe3c10166 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/constants.rs @@ -0,0 +1,5 @@ +pub const WARNING: &'static str = r#"/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. + +"#; diff --git a/kf-protocol/kf-protocol-build/src/file_content.rs b/kf-protocol/kf-protocol-build/src/file_content.rs new file mode 100644 index 0000000000..14b675235b --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/file_content.rs @@ -0,0 +1,382 @@ +//! +//! # Generate code from internal message +//! +//! Takes a message structure and generates a code file +//! + +use inflector::Inflector; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use textwrap::fill; + +use super::spec_msg::NullableVersions; +use super::spec_msg::SpecMessage; +use super::spec_msg::{SpecField, SpecFieldType, SpecFields}; + +// ----------------------------------- +// Structures +// ----------------------------------- + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct FileContent { + pub request: Request, + pub response: Response, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct Request { + pub name: String, + pub annotation: RequestAnnotation, + pub fields: Vec, + pub structures: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct RequestAnnotation { + pub api_key: i16, + pub min_api_version: i16, + pub max_api_version: i16, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct Response { + pub name: String, + pub fields: Vec, + pub structures: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct Structure { + pub name: String, + pub fields: Vec, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq)] +pub struct Field { + pub name: String, + pub value: String, + + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub annotation: Option, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Default)] +pub struct FieldAnnotation { + #[serde(skip_serializing_if = "Option::is_none")] + pub min_version: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub max_version: Option, + + #[serde(skip_serializing_if = "Option::is_none")] + pub ignorable: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub default: Option, +} + +// ----------------------------------- +// Macros +// ----------------------------------- + +macro_rules! make_kf_name { + ($name:expr) => { + &format!("Kf{}", $name); + }; +} + +// ----------------------------------- +// Implement FileContent +// ----------------------------------- +impl FileContent { + /// build BTreeMap of all Fields in map format + pub fn all_fields(&self) -> BTreeMap> { + let mut all_fields = BTreeMap::new(); + + // requests + for field in &self.request.fields { + FileContent::add_field_if_unique(&field.name, &field.value, &mut all_fields); + } + for req_struct in &self.request.structures { + for field in &req_struct.fields { + FileContent::add_field_if_unique(&field.name, &field.value, &mut all_fields); + } + } + + // responses + for field in &self.response.fields { + FileContent::add_field_if_unique(&field.name, &field.value, &mut all_fields); + } + for res_struct in &self.response.structures { + for field in &res_struct.fields { + FileContent::add_field_if_unique(&field.name, &field.value, &mut all_fields); + } + } + + all_fields + } + + /// Add field to BTree map if not already there + fn add_field_if_unique(name: &String, value: &String, map: &mut BTreeMap>) { + if let Some(map_values) = map.get_mut(name) { + if !map_values.contains(value) { + map_values.push(value.clone()); + } + } else { + map.insert(name.clone(), vec![value.clone()]); + } + } +} + +// ----------------------------------- +// Functions +// ----------------------------------- + +/// Convert Request/Response message spec into +pub fn build_file_content(s_req: &SpecMessage, s_res: &SpecMessage) -> FileContent { + FileContent { + request: build_request(s_req), + response: build_response(s_res), + } +} + +/// Convert Request message Template friendly Message +fn build_request(s_msg: &SpecMessage) -> Request { + let mut structures: Vec = vec![]; + let name = make_kf_name!(&s_msg.name).clone(); + let annotation = request_annotation(s_msg); + let fields = build_fields_and_structs(&s_msg.fields, &mut structures); + structures.reverse(); + + Request { + name, + annotation, + fields, + structures, + } +} + +/// Convert Response to Template friendly Message +fn build_response(s_msg: &SpecMessage) -> Response { + let mut structures: Vec = vec![]; + let name = make_kf_name!(&s_msg.name).clone(); + let fields = build_fields_and_structs(&s_msg.fields, &mut structures); + structures.reverse(); + + Response { + name, + fields, + structures, + } +} + +/// Loop through SpecFields and generate GUI friendly Fields & Structs +pub fn build_fields_and_structs( + maybe_s_fields: &Option, + parent_structures: &mut Vec, +) -> Vec { + let mut fields: Vec = vec![]; + let mut collect_structures: Vec = vec![]; + + if let Some(s_fields) = maybe_s_fields { + for s_field in s_fields { + // check if field and subtree should be skipped + if skip_field(s_field) { + continue; + } + + // generate field + fields.push(generate_field(s_field)); + + // generate structs (if sub-fields) + if s_field.fields.is_some() { + let structure = generate_struct( + &s_field.typ.custom_value_name(), + &s_field.fields, + parent_structures, + ); + + collect_structures.insert(0, structure); + } + } + } + + parent_structures.append(&mut collect_structures); + + fields +} + +/// Generate Structure +pub fn generate_struct<'a>( + name: &String, + maybe_s_fields: &Option, + parent_structures: &mut Vec, +) -> Structure { + let name = name.clone(); + let fields = build_fields_and_structs(maybe_s_fields, parent_structures); + Structure { name, fields } +} + +/// Generate Field +pub fn generate_field(s_field: &SpecField) -> Field { + let name = field_name(&s_field.name, &s_field.map_key, &s_field.entity_type); + let value = field_value(&s_field.typ, &s_field.nullable_versions); + let comment = field_comment(&s_field.about); + let annotation = field_annotation(s_field); + + Field { + name, + value, + comment, + annotation, + } +} + +/// Generate annotation for request message +pub fn request_annotation(req: &SpecMessage) -> RequestAnnotation { + let (min_api_version, max_api_version) = req.api_versions.touples(); + + RequestAnnotation { + api_key: req.api_key as i16, + min_api_version, + max_api_version, + } +} + +/// Generate field derive based on versions, nullable and defaults +pub fn field_annotation(field: &SpecField) -> Option { + let mut annotation = FieldAnnotation::default(); + + // provision versions + if !field.versions.is_zero_plus() { + let (min_version, max_version) = field.versions.touples(); + annotation.min_version = Some(min_version); + annotation.max_version = max_version; + } + + // provision ignorable + if field.ignorable.unwrap_or(false) { + annotation.ignorable = Some(true); + } + + // provision default + if let Some(default) = &field.default { + annotation.default = Some(default.value()); + } + + if annotation.min_version.is_some() + || annotation.ignorable.is_some() + || annotation.default.is_some() + { + Some(annotation) + } else { + None + } +} + +/// Generate field name, replace with entity_type if map_key is set +pub fn field_name(name: &String, map_key: &Option, entity_type: &Option) -> String { + let new_name = if let Some(map_key) = map_key { + if *map_key && entity_type.is_some() { + &entity_type.as_ref().unwrap() + } else { + name + } + } else { + name + }; + + new_name.to_snake_case() +} + +/// Generate field value, if nullableVersion is set and Typpe is String or Array, make it an Option +pub fn field_value(field_type: &SpecFieldType, nullable_ver: &Option) -> String { + if nullable_ver.is_some() { + if field_type.is_string_or_array() { + return format!("Option<{}>", field_type.value()); + } + } + field_type.value() +} + +/// Converts about to sized (90 column) code comment +pub fn field_comment(about: &Option) -> Option { + if let Some(text) = about { + let data = fill(text, 92); + let mut comment = String::new(); + for line in data.lines() { + comment.push_str(&format!("/// {}\n", line)); + } + Some(comment) + } else { + None + } +} + +/// Skip fields if they match the following criteria +/// - version is exactly 0. +pub fn skip_field(field: &SpecField) -> bool { + field.versions.is_zero() +} + +// ----------------------------------- +// Test Cases +// ----------------------------------- + +#[cfg(test)] +mod test { + use std::fs::read_to_string; + use std::io::Error as IoError; + use std::io::ErrorKind; + use std::path::{Path, PathBuf}; + + use super::*; + + use crate::file_to_json::file_to_json; + use crate::json_to_msg::{parse_json_to_request, parse_json_to_response}; + + pub fn file_content_decode>(path: T) -> Result { + let file_str: String = read_to_string(path)?; + serde_json::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err))) + } + + // read files and generate messages + fn file_to_msgs( + req_file_str: &'static str, + res_file_str: &'static str, + ) -> (SpecMessage, SpecMessage) { + let mut req_file = PathBuf::new(); + req_file.push(req_file_str); + let req_json = file_to_json(&req_file); + assert!(req_json.is_ok()); + + let mut res_file = PathBuf::new(); + res_file.push(res_file_str); + let res_json = file_to_json(&res_file); + assert!(res_json.is_ok()); + + let req_msg = parse_json_to_request(req_json.unwrap()); + let res_msg = parse_json_to_response(res_json.unwrap()); + assert!(req_msg.is_ok()); + assert!(res_msg.is_ok()); + + (req_msg.unwrap(), res_msg.unwrap()) + } + + #[test] + fn test_convert_file_content() { + let (req_msg, res_msg) = file_to_msgs( + "./test-data/MetadataRequest.json", + "./test-data/MetadataResponse.json", + ); + let file_content_expected = + file_content_decode(Path::new("./test-data/metadata_file_content.json")).unwrap(); + let file_content = build_file_content(&req_msg, &res_msg); + + println!("{:#?}", file_content); + assert_eq!(file_content, file_content_expected); + } +} diff --git a/kf-protocol/kf-protocol-build/src/file_pairs.rs b/kf-protocol/kf-protocol-build/src/file_pairs.rs new file mode 100644 index 0000000000..73d5dcbe93 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/file_pairs.rs @@ -0,0 +1,233 @@ +//! +//! # File Pairs +//! +//! Data structure to cache a pair of request/response files +//! +use std::fs; +use std::io::Error; +use std::io::ErrorKind; +use std::path::PathBuf; + +#[derive(Debug, PartialEq)] +pub struct FilePair { + pub req_file: PathBuf, + pub res_file: PathBuf, + pub filename: String, +} + +#[derive(Debug, PartialEq)] +pub struct FilePairs { + pub pairs: Vec, +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl FilePairs { + /// Read directory and generate file pairs + pub fn new(dir: &str) -> Result { + let skip_files = vec!["RequestHeader".to_owned(), "ResponseHeader".to_owned()]; + let files = Self::files_in_dir(dir, &skip_files)?; + let pairs = Self::make_pairs(files); + Ok(FilePairs { pairs }) + } + + /// Read files in directory (subdirectories are skipped) + fn files_in_dir(dir: &str, skip_files: &Vec) -> Result, Error> { + let mut files: Vec = vec![]; + let dir_files = match fs::read_dir(dir) { + Ok(dir) => dir, + Err(err) => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("{} - {}", dir, err), + )); + } + }; + + for file_path in dir_files { + let file = file_path?; + let path = file.path(); + if path.is_dir() { + println!("'{}' is directory... skipped", path.display()); + continue; + } + + if let Some(filename) = path.file_stem() { + let file_str = &filename + .to_os_string() + .into_string() + .unwrap_or("".to_owned()); + if skip_files.contains(file_str) { + println!("'{}' skipped... not implementeed", file_str); + continue; + } + } + + files.push(path); + } + + Ok(files) + } + + /// Take the files and make Request/Response file pairs + fn make_pairs(files: Vec) -> Vec { + let mut file_pairs: Vec = vec![]; + let mut temp = files.clone(); + + while temp.len() > 0 { + let first_file_path = temp.remove(0); + let first_file = if let Some(first) = first_file_path.file_stem() { + match first.to_str() { + Some(file) => file, + None => continue, + } + } else { + continue; + }; + let filename = message_from_filename(&first_file.to_owned()); + + if let Some((index, is_request)) = second_file_index(first_file.to_string(), &temp) { + let second_file_path = temp.remove(index); + + // generate file pair + let file_pair = if is_request { + FilePair { + req_file: second_file_path, + res_file: first_file_path, + filename: filename, + } + } else { + FilePair { + req_file: first_file_path, + res_file: second_file_path, + filename: filename, + } + }; + + file_pairs.push(file_pair); + } + } + + file_pairs + } +} + +/// If Request file, looks-up the index of the Response file (or the reverese) +/// Returns the index of the file, or -1 +fn second_file_index(first_file: String, files: &Vec) -> Option<(usize, bool)> { + let found_request: bool; + + // request or reponse + let file_len = first_file.len(); + let request_len = first_file.find("Request").unwrap_or(file_len); + let response_len = first_file.find("Response").unwrap_or(file_len); + + // generate second file + let second_file = if file_len != request_len { + found_request = false; + + if request_len == 0 { + // Some files begin with Response + let mut second_file = "Response".to_owned(); + second_file.push_str(&first_file["Request".len()..].to_owned()); + second_file + } else { + let mut second_file = first_file[..request_len].to_owned(); + second_file.push_str("Response"); + second_file + } + } else if file_len != response_len { + found_request = true; + + if response_len == 0 { + // Some files begin with Request + let mut second_file = "Request".to_owned(); + second_file.push_str(&first_file["Response".len()..].to_owned()); + second_file + } else { + let mut second_file = first_file[..response_len].to_owned(); + second_file.push_str("Request"); + second_file + } + } else { + println!("Invalid json file {}... skipped", first_file); + return None; + }; + + // find second file + let index = if let Some(index) = files.iter().position(|file| { + if let Some(file_stem) = file.file_stem() { + file_stem.to_str() == Some(&second_file) + } else { + false + } + }) { + Some((index, found_request)) + } else { + println!("{} - Cannot find >>> {}", first_file, second_file); + None + }; + + // return index + index +} + +/// Takes a filename and returns the message type without Request/Response +fn message_from_filename(filename: &String) -> String { + if let Some(req_idx) = filename.find("Request") { + filename[0..req_idx].to_string() + } else if let Some(res_idx) = filename.find("Response") { + filename[0..res_idx].to_string() + } else { + filename.clone() + } +} + +// ----------------------------------- +// Test Cases +// ----------------------------------- + +#[cfg(test)] +mod test { + use super::*; + use std::path::Path; + + #[test] + fn test_files_in_dir() { + let files = FilePairs::files_in_dir(&"./test-data".to_owned(), &vec![]); + assert_eq!(files.is_ok(), true); + } + + #[test] + fn test_file_pairs_new() { + let pairs = FilePairs::new(&"./test-data".to_owned()); + let expected_pairs = FilePairs { + pairs: vec![FilePair { + req_file: Path::new("./test-data/MetadataRequest.json").to_path_buf(), + res_file: Path::new("./test-data/MetadataResponse.json").to_path_buf(), + filename: "Metadata".to_owned(), + }], + }; + + assert_eq!(pairs.is_ok(), true); + assert_eq!(pairs.unwrap(), expected_pairs); + } + + #[test] + fn test_message_from_file() { + assert_eq!( + message_from_filename(&"MetadataRequest".to_string()), + "Metadata".to_owned() + ); + assert_eq!( + message_from_filename(&"MetadataResponse".to_string()), + "Metadata".to_owned() + ); + assert_eq!( + message_from_filename(&"SomeFile".to_string()), + "SomeFile".to_owned() + ); + } +} diff --git a/kf-protocol/kf-protocol-build/src/file_to_json.rs b/kf-protocol/kf-protocol-build/src/file_to_json.rs new file mode 100644 index 0000000000..efc2c99cd4 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/file_to_json.rs @@ -0,0 +1,47 @@ +//! +//! # File to Json +//! +//! Reads and validates that file is proper json +//! + +use std::fs::File; +use std::io::prelude::*; +use std::io::BufReader; +use std::path::PathBuf; +use std::process; + +use serde_json::Value; +use serde_json::Result as JsonResult; + +/// Convert string to json +pub fn file_to_json(file_name: &PathBuf) -> JsonResult { + let v = serde_json::from_str(&file_to_clean_json(&file_name))?; + Ok(v) +} + +/// Reads file and removes comments for clean json +fn file_to_clean_json(file_name: &PathBuf) -> String { + // Access file + let f = match File::open(file_name) { + Ok(f) => f, + Err(err) => { + eprintln!("Error: {}", err); + process::exit(1); + } + }; + let file = BufReader::new(&f); + + // strip comments & collect everything else + let mut result = String::new(); + for line in file.lines() { + if let Ok(text) = line { + let raw = text.trim_start(); + if raw.len() >= 2 && &raw[..2] == "//" { + continue; + } + result.push_str(&text); + } + } + + result +} diff --git a/kf-protocol/kf-protocol-build/src/format_code.rs b/kf-protocol/kf-protocol-build/src/format_code.rs new file mode 100644 index 0000000000..d54a9e2b62 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/format_code.rs @@ -0,0 +1,154 @@ +//! +//! # Format code based son Jinja2 templates +//! + +use std::fs::metadata; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::path::Path; +use std::str; + +use serde_json; +use serde_json::value::{from_value, to_value}; +use serde_json::Value; +use std::collections::BTreeMap; + +use tera::compile_templates; +use tera::Error as TeraError; +use tera::{Context, GlobalFn}; + +use ::rustfmt_nightly::Config; +use ::rustfmt_nightly::EmitMode; +use ::rustfmt_nightly::Input; +use ::rustfmt_nightly::Session; +use ::rustfmt_nightly::Verbosity; + +use crate::file_content::FileContent; + +const BASE_FILE: &str = "fluvio_base.j2"; +const DEFAULT_TEMPLATE: &str = "./templates/*"; + +#[derive(Debug, PartialEq)] +pub struct TemplateFormat { + template_dir: String, +} + +// ----------------------------------- +// Implement - Template +// ----------------------------------- + +impl TemplateFormat { + /// save template directory for custom templates + pub fn new(maybe_dir: Option) -> Result { + let template_dir = if let Some(dir) = maybe_dir { + // if set, ensure directory exists + if let Err(err) = metadata(&dir) { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("{} - {}", dir, err), + )); + } + + // ensure base file exists + let base_file = Path::new(&dir).join(BASE_FILE); + if let Err(err) = metadata(base_file) { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("{} is mandatory - {}", BASE_FILE, err), + )); + } + + // concatonate wildcard /* to the directory + Path::new(&dir).join("*").to_string_lossy().to_string() + } else { + DEFAULT_TEMPLATE.to_string() + }; + + Ok(TemplateFormat { template_dir }) + } + + /// Templates file are loaded from users CLI input or internal project directory. + /// These files take the Request and Response messages as parameters to generate. + /// File content is return in String format + pub fn generate_code(&self, content: &FileContent) -> Result { + let mut context = Context::new(); + context.insert("request", &content.request); + context.insert("response", &content.response); + + let mut tera = compile_templates!(&self.template_dir); + tera.register_function("contains_field", make_contains_field(content.all_fields())); + let code = match tera.render(BASE_FILE, &context) { + Ok(result) => result, + Err(err) => return Err(IoError::new(ErrorKind::InvalidInput, format!("{}", err))), + }; + + Ok(code) + } +} + +/// Takes an unformatted string of code and converts to Rustmformatted string +pub fn rustify_code(input: String) -> Result { + // configuration + let mut config = Config::default(); + config.set().emit_mode(EmitMode::Stdout); + config.set().verbose(Verbosity::Quiet); + + // output buffer + let mut buf: Vec = vec![]; + + // create a session and transform string (enclowse in braket, so it gets dropped + // after call, otherwise buf cannot be used) + { + let mut session = Session::new(config, Some(&mut buf)); + if let Err(err) = session.format(Input::Text(input)) { + return Err(IoError::new( + ErrorKind::InvalidInput, + format!("cannot format output: {}", err), + )); + } + } + + // convert to string + match str::from_utf8(&buf) { + Ok(result) => Ok(result.to_string()), + Err(err) => Err(IoError::new( + ErrorKind::InvalidInput, + format!("conversion error: {}", err), + )), + } +} + +// macro function to look-up all fields in Tera templates +fn make_contains_field(all_fields: BTreeMap>) -> GlobalFn { + Box::new(move |args| -> Result { + // lookup name + let req_name = match args.get("name") { + Some(some_name) => match from_value::(some_name.clone()) { + Ok(name) => Some(name), + Err(_) => None, + }, + None => None, + }; + + // lookup value + let req_value = match args.get("value") { + Some(some_value) => match from_value::(some_value.clone()) { + Ok(value) => Some(value), + Err(_) => None, + }, + None => None, + }; + + if let Some(name) = req_name { + if let Some(value) = req_value { + if let Some(map_values) = all_fields.get(&name) { + if map_values.contains(&value) { + return Ok(to_value(true).unwrap()); + } + } + } + } + + Ok(to_value(false).unwrap()) + }) +} diff --git a/kf-protocol/kf-protocol-build/src/generate_code.rs b/kf-protocol/kf-protocol-build/src/generate_code.rs new file mode 100644 index 0000000000..49efd6571b --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/generate_code.rs @@ -0,0 +1,105 @@ +//! +//! # Write Code to File +//! +//! Takes file pairs, generates code, and write to directory +//! + +use std::fs::metadata; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use crate::constants::WARNING; + +use crate::format_code::rustify_code; +use crate::format_code::TemplateFormat; + +use crate::file_pairs::{FilePair, FilePairs}; +use crate::json_to_msg::parse_json_to_request; +use crate::json_to_msg::parse_json_to_response; + +use crate::file_content::build_file_content; +use crate::file_to_json::file_to_json; +use crate::output_to_file::code_to_output_file; +use crate::output_to_file::make_file_from_dir; + +// ----------------------------------- +// Implementation +// ----------------------------------- + +/// Generate code and to individual files in directory +pub fn gen_code_and_output_to_dir( + input_dir: &String, + dir: &String, + template: &TemplateFormat, + skip_formatter: bool, +) -> Result<(), IoError> { + // ensure output directory exists + if let Err(err) = metadata(dir) { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("{} - {}", dir, err), + )); + } + + // each generate goes into its own file + let file_pairs = FilePairs::new(input_dir)?; + for file_pair in &file_pairs.pairs { + match generate_code_from_files(file_pair, template) { + Ok(content) => { + let code = augment_code(&content, skip_formatter)?; + let mut file = make_file_from_dir(dir, &file_pair.filename)?; + + // add code to file + match code_to_output_file(&mut file, code) { + Ok(()) => {} + Err(err) => { + return Err(IoError::new(ErrorKind::InvalidData, format!("{}", err))); + } + } + } + Err(err) => return Err(IoError::new(ErrorKind::InvalidData, format!("{}", err))), + } + } + + Ok(()) +} + +// ----------------------------------- +// Private functions +// ----------------------------------- + +/// Augment code +/// * add Warning, +/// * run Rust formatter (if not skipped) +fn augment_code(content: &String, skip_formatter: bool) -> Result { + let with_warning = format!("{}{}", WARNING, content); + let new_content = if skip_formatter { + with_warning + } else { + rustify_code(with_warning)? + }; + + Ok(new_content) +} + +/// Take a (Request & Response)file pair, a map, and a template to generate code +fn generate_code_from_files( + file_pair: &FilePair, + template: &TemplateFormat, +) -> Result { + let req_json = match file_to_json(&file_pair.req_file) { + Ok(json) => json, + Err(err) => return Err(IoError::new(ErrorKind::InvalidData, format!("{}", err))), + }; + let res_json = match file_to_json(&file_pair.res_file) { + Ok(json) => json, + Err(err) => return Err(IoError::new(ErrorKind::InvalidData, format!("{}", err))), + }; + + // request/response file to json + let req_msg = parse_json_to_request(req_json)?; + let res_msg = parse_json_to_response(res_json)?; + + // use template and file content to generate code + template.generate_code(&build_file_content(&req_msg, &res_msg)) +} diff --git a/kf-protocol/kf-protocol-build/src/json_to_msg.rs b/kf-protocol/kf-protocol-build/src/json_to_msg.rs new file mode 100644 index 0000000000..5f574b0642 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/json_to_msg.rs @@ -0,0 +1,467 @@ +//! +//! # Parse Json and generates Message Structure +//! +//! Takes a JSON value object and generates a data structure +//! +use std::io::Error; +use std::io::ErrorKind; + +use serde_json; +use serde_json::Value; + +use super::spec_msg::SpecMessage; +use super::spec_msg::SpecMessageType; +use super::spec_msg::{ApiVersions, DefaultType, NullableVersions, Versions}; +use super::spec_msg::{SpecField, SpecFieldType, SpecFields}; + +/// Convert Json to Request Msg +pub fn parse_json_to_request(val: Value) -> Result { + let msg_type = get_msg_type(&val)?; + + match msg_type { + SpecMessageType::Request => parse_message(&val, msg_type), + SpecMessageType::Response => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("expected 'Request', found '{}'", msg_type), + )); + } + } +} + +/// Convert Json to Response Msg +pub fn parse_json_to_response(val: Value) -> Result { + let msg_type = get_msg_type(&val)?; + + match msg_type { + SpecMessageType::Request => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("expected 'Response', found '{}'", msg_type), + )); + } + SpecMessageType::Response => parse_message(&val, msg_type), + } +} + +/// Json object into Request/Response message +fn parse_message(val: &Value, typ: SpecMessageType) -> Result { + let name = get_name(&val)?; + let api_key = get_api_key(&val)?; + let api_versions = get_api_versions(&val)?; + let fields = match maybe_get_fields(&val)? { + Some(vals) => Some(parse_fields(vals)?), + None => None, + }; + + // build message + Ok(SpecMessage { + name, + api_key, + api_versions, + typ, + fields, + }) +} + +/// Json object into fields +fn parse_fields<'a>(vals: &'a Vec) -> Result { + let mut fields: SpecFields = vec![]; + + for val in vals { + fields.push(parse_field(val)?); + } + + Ok(fields) +} + +/// Json object into fields +fn parse_field(val: &Value) -> Result { + let name = get_name(&val)?; + let typ = get_field_type(&val)?; + let versions = get_versions(&val)?; + let about = maybe_string(&val, "about")?; + let entity_type = maybe_string(&val, "entityType")?; + let default = maybe_default(&val)?; + let ignorable = maybe_bool(&val, "ignorable")?; + let map_key = maybe_bool(&val, "mapKey")?; + let nullable_versions = maybe_get_nullable_versions(&val)?; + let fields = match maybe_get_fields(&val)? { + Some(vals) => Some(parse_fields(vals)?), + None => None, + }; + + Ok(SpecField { + name, + typ, + versions, + about, + default, + nullable_versions, + entity_type, + ignorable, + map_key, + fields, + }) +} + +/// Decode 'value' at key or error +fn get_key<'a>(val: &'a Value, key: &str) -> Result<&'a Value, Error> { + match val.get(key) { + Some(v) => Ok(v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("expected '{}', found none", key), + )), + } +} + +/// Decode 'name' string or error +fn get_name(val: &Value) -> Result { + let name = get_string(val, "name")?; + //println!("{}", name); + match name.as_str() { + "Type" => Ok("Typ".to_string()), + _ => Ok(name), + } +} + +/// Decode 'apiKey' string or error +fn get_api_key(val: &Value) -> Result { + get_i64(val, "apiKey") +} + +/// Decode 'type' at key to MesageType or error +pub fn get_msg_type(val: &Value) -> Result { + let key = "type"; + + match get_key(&val, key)?.as_str() { + Some(v) => SpecMessageType::decode(key, v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not message type", key), + )), + } +} + +/// Decode 'i64' at key or error +fn get_i64(val: &Value, key: &str) -> Result { + match get_key(&val, key)?.as_i64() { + Some(v) => Ok(v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not i64 number", key), + )), + } +} + +#[allow(dead_code)] +/// Decode 'bool' at key or error +fn get_bool(val: &Value, key: &str) -> Result { + match get_key(&val, key)?.as_bool() { + Some(v) => Ok(v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not boolean", key), + )), + } +} + +/// Decode 'bool' if avialable at key or error +fn maybe_bool(val: &Value, key: &str) -> Result, Error> { + match get_key(&val, key) { + Ok(v) => match v.as_bool() { + Some(v) => Ok(Some(v)), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not boolean", key), + )), + }, + Err(_) => Ok(None), + } +} + +/// Decode 'default' if avialable at key or error +fn maybe_default(val: &Value) -> Result, Error> { + let key = "default"; + match get_key(&val, key) { + Ok(val) => match DefaultType::decode(key, val) { + Ok(v) => Ok(Some(v)), + Err(err) => Err(err), + }, + Err(_) => Ok(None), + } +} + +/// Decode 'String' at key or error +fn get_string(val: &Value, key: &str) -> Result { + match get_key(&val, key)?.as_str() { + Some(v) => Ok(v.to_string()), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not string", key), + )), + } +} + +/// Decode 'String' if avialable at key or error +fn maybe_string(val: &Value, key: &str) -> Result, Error> { + match get_key(&val, key) { + Ok(v) => match v.as_str() { + Some(v) => Ok(Some(v.to_string())), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not string", key), + )), + }, + Err(_) => Ok(None), + } +} + +/// Decode 'versions' at key to Version enum or error +fn get_versions(val: &Value) -> Result { + let key = "versions"; + match get_key(&val, key)?.as_str() { + Some(v) => Versions::decode(key, v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect versions format", key), + )), + } +} + +/// Decode 'validVersion' at key to Version enum or error +fn get_api_versions(val: &Value) -> Result { + let key = "validVersions"; + match get_key(&val, key)?.as_str() { + Some(v) => ApiVersions::decode(key, v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not validVersion", key), + )), + } +} + +/// Decode 'nullableVersions' if avaialble +fn maybe_get_nullable_versions(val: &Value) -> Result, Error> { + let key = "nullableVersions"; + match get_key(&val, key) { + Ok(v_raw) => match v_raw.as_str() { + Some(v) => match NullableVersions::decode(key, v) { + Ok(ver) => Ok(Some(ver)), + Err(err) => Err(err), + }, + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not version string", key), + )), + }, + Err(_) => Ok(None), + } +} + +/// Decode 'fileds' is avaialble +fn maybe_get_fields(val: &Value) -> Result>, Error> { + match get_key(&val, "fields") { + Ok(v) => match v.as_array() { + Some(v) => Ok(Some(v)), + None => Err(Error::new( + ErrorKind::InvalidData, + "key 'fields', not a array", + )), + }, + Err(_) => Ok(None), + } +} + +/// Decode field 'type' message at key to SpecFieldType or error +fn get_field_type(val: &Value) -> Result { + let key = "type"; + match get_key(&val, key)?.as_str() { + Some(v) => SpecFieldType::decode("key", v), + None => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not field type", key), + )), + } +} + +// ----------------------------------- +// Test Cases +// ----------------------------------- + +#[cfg(test)] +mod test { + use super::*; + use crate::spec_msg::PType; + + use serde_json; + use serde_json::Value; + use std::io::Error as IoError; + use std::fs::File; + use std::io::BufReader; + use std::path::Path; + + fn from_file>(path: P) -> Result { + Ok(serde_json::from_reader(BufReader::new(File::open(path)?))?) + } + + #[test] + fn test_parse_read_json() { + let value = from_file("./test-data/MetadataRequest_clean.json"); + assert!(value.is_ok()); + } + + #[test] + fn test_get_i64() { + let val = from_file("./test-data/MetadataRequest_clean.json").unwrap(); + let api_key = get_api_key(&val); + assert!(api_key.is_ok()); + assert_eq!(api_key.unwrap(), 3); + } + + #[test] + fn test_get_string() { + let val = from_file("./test-data/MetadataRequest_clean.json").unwrap(); + let name = get_name(&val); + assert!(name.is_ok()); + assert_eq!(name.unwrap(), "MetadataRequest".to_owned()); + + // maybe string + let none = maybe_string(&val, "ttt"); + assert!(none.is_ok()); + assert!(none.unwrap().is_none()); + } + + #[test] + fn test_get_msg_type() { + let val = from_file("./test-data/MetadataRequest_clean.json").unwrap(); + let msg_type = get_msg_type(&val); + assert!(msg_type.is_ok()); + assert_eq!(msg_type.unwrap(), SpecMessageType::Request); + + let val = serde_json::from_str("{\"notValid\": \"na\"}").unwrap(); + let msg_type = get_msg_type(&val); + assert!(msg_type.is_err()); + assert_eq!( + msg_type.unwrap_err().to_string(), + "expected \'type\', found none".to_owned() + ); + } + + #[test] + fn test_get_version() { + // Invalid + let val = serde_json::from_str("{\"versions\": \"test\"}").unwrap(); + let version = get_versions(&val); + assert!(version.is_err()); + assert_eq!( + version.unwrap_err().to_string(), + "key 'versions', incorrect version 'test'".to_owned() + ); + + // Range + let val = serde_json::from_str("{\"versions\": \"0-2\"}").unwrap(); + let version = get_versions(&val); + assert!(version.is_ok()); + assert_eq!(version.unwrap(), Versions::Range(0, 2)); + + // Exact + let val = serde_json::from_str("{\"versions\": \"11\"}").unwrap(); + let version = get_versions(&val); + assert!(version.is_ok()); + assert_eq!(version.unwrap(), Versions::Exact(11)); + + // Exact + let val = serde_json::from_str("{\"versions\": \"0\"}").unwrap(); + let version = get_versions(&val); + assert!(version.is_ok()); + assert_eq!(version.unwrap(), Versions::Exact(0)); + + // GreaterOrEqualTo + let val = serde_json::from_str("{\"versions\": \"0+\"}").unwrap(); + let version = get_versions(&val); + assert!(version.is_ok()); + assert_eq!(version.unwrap(), Versions::GreaterOrEqualTo(0)); + } + + #[test] + fn test_parse_fields() { + let val = from_file("./test-data/MetadataRequest_clean.json").unwrap(); + let ok_fields = maybe_get_fields(&val); + assert!(ok_fields.is_ok()); + + let some_fields = ok_fields.unwrap(); + assert!(some_fields.is_some()); + assert_eq!(some_fields.unwrap().len() > 0, true); + } + + #[test] + fn test_parse_field_type() { + // Invalid + let val = serde_json::from_str("{\"notType\": \"not valid\"}").unwrap(); + let typ = get_field_type(&val); + assert!(typ.is_err()); + assert_eq!( + typ.unwrap_err().to_string(), + "expected \'type\', found none".to_owned() + ); + + // Primitive + let val = serde_json::from_str("{\"type\": \"int8\"}").unwrap(); + let typ = get_field_type(&val); + assert!(typ.is_ok()); + assert_eq!(typ.unwrap(), SpecFieldType::PRIMITIVE(PType::INT8)); + + // Primitive Array + let val = serde_json::from_str("{\"type\": \"[]int32\"}").unwrap(); + let typ = get_field_type(&val); + assert!(typ.is_ok()); + assert_eq!(typ.unwrap(), SpecFieldType::PRIMITIVE_ARRAY(PType::INT32)); + + // Custom Array + let val = serde_json::from_str("{\"type\": \"[]CreatableTopic\"}").unwrap(); + let typ = get_field_type(&val); + assert!(typ.is_ok()); + assert_eq!( + typ.unwrap(), + SpecFieldType::CUSTOM_ARRAY("CreatableTopic".to_owned()) + ); + } + + #[test] + fn test_parse_maybe_default() { + // No default + let val = serde_json::from_str("{\"notDefault\": \"not valid\"}").unwrap(); + let default = maybe_default(&val); + assert!(default.is_ok()); + assert!(default.unwrap().is_none()); + + // negative number (string) + let val = serde_json::from_str("{\"default\": \"-1\"}").unwrap(); + let default = maybe_default(&val); + + assert!(default.is_ok()); + assert_eq!( + default.unwrap(), + Some(DefaultType::STRING("-1".to_string())) + ); + + // negative number + let val = serde_json::from_str("{\"default\": -1}").unwrap(); + let default = maybe_default(&val); + + assert!(default.is_ok()); + assert_eq!(default.unwrap(), Some(DefaultType::INT64(-1))); + + // boolean + let val = serde_json::from_str("{\"default\": \"true\"}").unwrap(); + let default = maybe_default(&val); + + assert!(default.is_ok()); + assert_eq!( + default.unwrap(), + Some(DefaultType::STRING("true".to_string())) + ); + } +} diff --git a/kf-protocol/kf-protocol-build/src/lib.rs b/kf-protocol/kf-protocol-build/src/lib.rs new file mode 100644 index 0000000000..5807c65758 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/lib.rs @@ -0,0 +1,17 @@ +//! +//! # Kafka Json Spec to Rust code +//! +//! Takes a Kafka json spec file and generates Rust data structures +//! + +mod constants; +mod file_content; +mod file_pairs; +mod file_to_json; +mod json_to_msg; +mod spec_msg; + +pub mod check_keys; +pub mod format_code; +pub mod generate_code; +pub mod output_to_file; diff --git a/kf-protocol/kf-protocol-build/src/output_to_file.rs b/kf-protocol/kf-protocol-build/src/output_to_file.rs new file mode 100644 index 0000000000..ae1b0fcb07 --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/output_to_file.rs @@ -0,0 +1,93 @@ +//! +//! # Output to file +//! +//! Takes code and pushes to an output directory. +//! Any existing file with colliding name, will be replaced. +//! + +use std::fs::metadata; +use std::fs::{File, OpenOptions}; +use std::io::prelude::*; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::path::Path; +use std::path::PathBuf; + +use inflector::Inflector; + +// ----------------------------------- +// Macros +// ----------------------------------- + +macro_rules! make_rust_filename { + ($filename:expr) => { + &format!("{}.rs", $filename.to_snake_case()); + }; +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +/// Take Request/Reponse code, format to file and inject into directory +pub fn code_to_output_file(file: &mut File, code: String) -> Result<(),IoError> { + file.write_all(code.as_bytes())?; + + Ok(()) +} + +// Generate a file pointer form a filename +pub fn file_from_name

(filename: &P) -> Result +where + P: AsRef, +{ + match open_file(filename) { + Ok(file) => Ok(file), + Err(err) => Err(IoError::new( + ErrorKind::InvalidData, + format!("{} - {}", filename.as_ref().display(), err), + )), + } +} + +/// Create and open file +pub fn open_file

(file_path: P) -> Result +where + P: AsRef, +{ + Ok(OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .open(file_path)?) +} + +/// Create and open file +pub fn make_file_path(dir: &str, filename: &str) -> Result { + // check if exists + metadata(dir)?; + + // construct file + let mut target_file = PathBuf::new(); + target_file.push(dir); + target_file.push(make_rust_filename!(filename)); + + Ok(target_file.to_string_lossy().to_string()) +} + +// Generate a file pointer form directory and filename +pub fn make_file_from_dir(dir: &str, filename: &str) -> Result { + match make_file_path(dir, &filename) { + Ok(file_path) => match open_file(&file_path) { + Ok(file) => Ok(file), + Err(err) => Err(IoError::new( + ErrorKind::InvalidData, + format!("{} - {}", filename, err), + )), + }, + Err(err) => Err(IoError::new( + ErrorKind::InvalidData, + format!("{} - {}", filename, err), + )), + } +} diff --git a/kf-protocol/kf-protocol-build/src/spec_msg.rs b/kf-protocol/kf-protocol-build/src/spec_msg.rs new file mode 100644 index 0000000000..f10d3ef19a --- /dev/null +++ b/kf-protocol/kf-protocol-build/src/spec_msg.rs @@ -0,0 +1,364 @@ +//! +//! # SpecMessage Structure +//! +//! SpecMessage structures +//! +use std::fmt; +use std::io::Error; +use std::io::ErrorKind; + +use serde_json::Value; + +#[derive(Debug, PartialEq)] +pub struct SpecMessage { + pub name: String, + pub api_key: i64, + pub api_versions: ApiVersions, + pub typ: SpecMessageType, + pub fields: Option, +} + +#[derive(Debug, PartialEq)] +pub struct SpecField { + pub name: String, + pub typ: SpecFieldType, + pub versions: Versions, + pub about: Option, + pub default: Option, + pub nullable_versions: Option, + pub entity_type: Option, + pub ignorable: Option, + pub map_key: Option, + pub fields: Option, +} +pub type SpecFields = Vec; + +#[derive(Debug, PartialEq)] +pub enum SpecMessageType { + Request, + Response, +} + +pub type StructName = String; + +#[allow(non_camel_case_types)] +#[derive(Debug, PartialEq)] +pub enum SpecFieldType { + PRIMITIVE(PType), + PRIMITIVE_ARRAY(PType), + CUSTOM_ARRAY(StructName), +} + +#[derive(Debug, PartialEq)] +pub enum PType { + BOOL, + INT8, + INT16, + INT32, + INT64, + STRING, + BYTES, +} + +#[derive(Debug, PartialEq)] +pub enum DefaultType { + INT64(i64), + STRING(String), +} + +#[derive(Debug, PartialEq)] +pub enum Versions { + Exact(i16), + Range(i16, i16), + GreaterOrEqualTo(i16), +} + +#[derive(Debug, PartialEq)] +pub enum ApiVersions { + Exact(i16), + Range(i16, i16), +} + +#[derive(Debug, PartialEq)] +pub struct NullableVersions { + min_ver: i16, +} + +// ----------------------------------- +// Implement - SpecMessageType +// ----------------------------------- + +impl SpecMessageType { + pub fn decode(key: &str, val: &str) -> Result { + match val { + "request" => Ok(SpecMessageType::Request), + "response" => Ok(SpecMessageType::Response), + _ => Err(Error::new( + ErrorKind::InvalidData, + format!( + "key '{}', expected: 'request', response' - found '{}'", + key, val + ), + )), + } + } +} + +impl fmt::Display for SpecMessageType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SpecMessageType::Request => write!(f, "request"), + SpecMessageType::Response => write!(f, "response"), + } + } +} + +// ----------------------------------- +// Implement - PType +// ----------------------------------- +impl PType { + pub fn new(val: &str) -> Result { + match val { + "bool" => Ok(PType::BOOL), + "int8" => Ok(PType::INT8), + "int16" => Ok(PType::INT16), + "int32" => Ok(PType::INT32), + "int64" => Ok(PType::INT64), + "string" => Ok(PType::STRING), + "bytes" => Ok(PType::BYTES), + _ => Err(Error::new( + ErrorKind::InvalidData, + format!("invalid primitive type '{}'", val), + )), + } + } + + pub fn from(&self) -> &str { + match self { + PType::BOOL => "bool", + PType::INT8 => "i8", + PType::INT16 => "i16", + PType::INT32 => "i32", + PType::INT64 => "i64", + PType::STRING => "String", + PType::BYTES => "Vec", + } + } +} + +// ----------------------------------- +// Implement - SpecFieldType +// ----------------------------------- + +impl SpecFieldType { + /// Decode field type, primitives, primitive arrays, and custom arrays + pub fn decode(key: &str, val: &str) -> Result { + match PType::new(val) { + Ok(primitive) => Ok(SpecFieldType::PRIMITIVE(primitive)), + Err(_) => SpecFieldType::decode_array(key, val), + } + } + + /// Decode primitive or custom arrays + pub fn decode_array(key: &str, val: &str) -> Result { + if val.len() < 3 { + return Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect array type '{}'", key, val), + )); + } + + // ensure array "[]..." + let arr_str = &val[..2]; + if arr_str != "[]" { + return Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', not array type '{}'", key, val), + )); + } + + // parse content + let content = &val[2..]; + match PType::new(content) { + Ok(primitive) => Ok(SpecFieldType::PRIMITIVE_ARRAY(primitive)), + Err(_) => Ok(SpecFieldType::CUSTOM_ARRAY(content.to_string())), + } + } + + /// True if primitive String or Array + pub fn is_string_or_array(&self) -> bool { + match self { + SpecFieldType::PRIMITIVE(p_type) => match p_type { + PType::BYTES => true, //array of u8 + PType::STRING => true, + _ => false, + }, + + SpecFieldType::PRIMITIVE_ARRAY(_) => true, + SpecFieldType::CUSTOM_ARRAY(_) => true, + } + } + + /// Convert into string value + pub fn value(&self) -> String { + match self { + SpecFieldType::PRIMITIVE(p_type) => p_type.from().to_owned(), + SpecFieldType::PRIMITIVE_ARRAY(p_type) => format!("Vec<{}>", p_type.from().to_owned()), + SpecFieldType::CUSTOM_ARRAY(struct_name) => format!("Vec<{}>", struct_name), + } + } + + /// Return value name + pub fn custom_value_name(&self) -> String { + match self { + SpecFieldType::CUSTOM_ARRAY(struct_name) => struct_name.clone(), + _ => "unknown".to_owned(), + } + } +} + +// ----------------------------------- +// Implement - DefaultType +// ----------------------------------- + +impl DefaultType { + pub fn decode(key: &str, val: &Value) -> Result { + if let Some(v) = val.as_i64() { + Ok(DefaultType::INT64(v)) + } else if let Some(v) = val.as_str() { + Ok(DefaultType::STRING(v.to_string())) + } else { + Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', unknown default type", key), + )) + } + } + + /// Convert into string value + pub fn value(&self) -> String { + match self { + DefaultType::INT64(v) => v.to_string(), + DefaultType::STRING(s) => s.clone(), + } + } +} + +// ----------------------------------- +// Implement - Version +// ----------------------------------- + +impl Versions { + pub fn decode(key: &str, val: &str) -> Result { + let versions: Vec<&str> = val.split('-').collect(); + // Parse Range + if versions.len() == 2 { + let min_range = match versions[0].parse::() { + Ok(min_ver) => min_ver, + Err(_) => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect version range '{}'", key, versions[0]), + )); + } + }; + + let max_range = match versions[1].parse::() { + Ok(max_ver) => max_ver, + Err(_) => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect version range '{}'", key, versions[1]), + )); + } + }; + + Ok(Versions::Range(min_range, max_range)) + } else { + let versions: Vec<&str> = val.split('+').collect(); + + // Parse exact value + let ver_num = match versions[0].parse::() { + Ok(ver_num) => ver_num, + Err(_) => { + return Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect version '{}'", key, val), + )); + } + }; + + // Exact or Greater + match val.contains('+') { + true => Ok(Versions::GreaterOrEqualTo(ver_num)), + false => Ok(Versions::Exact(ver_num)), + } + } + } + + /// Check if version is Exactly zero "0" + pub fn is_zero(&self) -> bool { + match self { + Versions::Exact(ver) => *ver == 0, + _ => false, + } + } + + /// Check if version is Greater or equal to zero "0+" + pub fn is_zero_plus(&self) -> bool { + match self { + Versions::GreaterOrEqualTo(min_ver) => *min_ver == 0, + _ => false, + } + } + + /// Retrieve API versions in a pair touple, where second is optional + pub fn touples(&self) -> (i16, Option) { + match self { + Versions::Exact(ver) => (*ver, Some(*ver)), + Versions::Range(min_ver, max_ver) => (*min_ver, Some(*max_ver)), + Versions::GreaterOrEqualTo(min_ver) => (*min_ver, None) + } + } +} + +// ----------------------------------- +// Implement - ApiVersions +// ----------------------------------- + +impl ApiVersions { + pub fn decode(key: &str, val: &str) -> Result { + match Versions::decode(key, val)? { + Versions::Exact(ver) => Ok(ApiVersions::Exact(ver)), + Versions::Range(min, max) => Ok(ApiVersions::Range(min, max)), + _ => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect validVersions", key), + )), + } + } + + /// Retrieve API versions in a pair touple + pub fn touples(&self) -> (i16, i16) { + match self { + ApiVersions::Exact(ver) => (*ver, *ver), + ApiVersions::Range(min_ver, max_ver) => (*min_ver, *max_ver) + } + } +} + +// ----------------------------------- +// Implement - NullableVersions +// ----------------------------------- + +impl NullableVersions { + pub fn decode(key: &str, val: &str) -> Result { + match Versions::decode(key, val)? { + Versions::GreaterOrEqualTo(min_ver) => Ok(NullableVersions { min_ver }), + _ => Err(Error::new( + ErrorKind::InvalidData, + format!("key '{}', incorrect nullableVersions", key), + )), + } + } +} diff --git a/kf-protocol/kf-protocol-build/templates/content.j2 b/kf-protocol/kf-protocol-build/templates/content.j2 new file mode 100644 index 0000000000..27ecf20728 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/content.j2 @@ -0,0 +1,34 @@ + +// ----------------------------------- +// {{request_name}} +// ----------------------------------- + +{% set structure_name = request_name -%} +{% set fields = request.fields -%} +{% include "struct.j2" %} + +{% for structure in request.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// {{response_name}} +// ----------------------------------- + +{% set structure_name = response_name -%} +{% set fields = response.fields -%} +{% include "struct.j2" %} + +{% for structure in response.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// Implementation - {{request_name}} +// ----------------------------------- + + {% include "impl_request.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates/field.j2 b/kf-protocol/kf-protocol-build/templates/field.j2 new file mode 100644 index 0000000000..6742a45fcf --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/field.j2 @@ -0,0 +1,35 @@ +{# Translate Field Values #} +{%- set field_name = field.name -%} +{%- if field.name == "error_code" and field.value == "i16" -%} + {%- set field_value = "ErrorCode" -%} +{%- elif field.name == "isolation_level" and field.value == "i8" -%} + {%- set field_value = "Isolation" -%} +{%- elif field.name == "records" and field.value == "Option>" -%} + {%- set field_value = "R" -%} +{%- elif field.name == "metadata" and field.value == "Vec" -%} + {%- set field_value = "ProtocolMetadata" -%} +{%- elif field.name == "assignment" and field.value == "Vec" -%} + {%- set field_value = "GroupAssignment" -%} +{%- elif field.name == "topics" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- elif field.name == "partitions" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- elif field.name == "topics" and field.value == "Vec" -%} + {%- set field_value = " Vec>" -%} +{%- elif field.name == "partitions" and field.value == "Vec" -%} + {%- set field_value = " Vec>" -%} +{%- else -%} + {%- set field_value = field.value -%} +{%- endif -%} + +{# Generate Field #} +{%if field.comment %} + {{field.comment}} +{%- endif %} +{%- if field.annotation %} + {%- include "field_annotation.j2" -%} + pub {{ field_name }}: {{ field_value }}, +{% else %} + pub {{ field_name }}: {{ field_value }}, +{%- endif -%} + diff --git a/kf-protocol/kf-protocol-build/templates/field_annotation.j2 b/kf-protocol/kf-protocol-build/templates/field_annotation.j2 new file mode 100644 index 0000000000..e8b15eb7f3 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/field_annotation.j2 @@ -0,0 +1,17 @@ +#[fluvio_kf( + {%- if field.annotation.min_version -%} + min_version = {{field.annotation.min_version}}, + {%- endif -%} + + {%- if field.annotation.max_version -%} + max_version = {{field.annotation.max_version}}, + {%- endif -%} + + {%- if field.annotation.ignorable -%} + ignorable, + {%- endif -%} + + {%- if false -%} + default = "{{field.annotation.default}}", + {%- endif -%} +)] diff --git a/kf-protocol/kf-protocol-build/templates/fluvio_base.j2 b/kf-protocol/kf-protocol-build/templates/fluvio_base.j2 new file mode 100644 index 0000000000..22079204dc --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/fluvio_base.j2 @@ -0,0 +1,21 @@ +{# Set Request (request_name) variable #} +{%- set request_name = request.name -%} +{%- set request_trait = "" -%} +{%- set request_trait_clause = "" -%} + +{%- if request.name == "KfFetchRequest" or + request.name == "KfProduceRequest" -%} + {%- set request_name = request_name ~ "" -%} + {%- set request_trait = "" -%} + {%- set request_trait_clause = " where R: Debug + Decoder + Encoder " -%} +{%- endif -%} + +{# Set Response (response_name) variable #} +{%- set response_name = response.name -%} +{%- if response.name == "KfFetchResponse" -%} + {%- set response_name = "KfFetchResponse" -%} +{%- endif -%} + +{# Generate Code #} +{% include "header.j2" %} +{% include "content.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates/header.j2 b/kf-protocol/kf-protocol-build/templates/header.j2 new file mode 100644 index 0000000000..4e8f7e2061 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/header.j2 @@ -0,0 +1,29 @@ +{% + if request_name == "KfFetchRequest" or + request_name == "KfProduceRequest" +-%} +use std::fmt::Debug; +use std::marker::PhantomData; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; +{% endif %} + +use serde::{Serialize, Deserialize}; + +use kf_protocol_api::ErrorCode; +{% if contains_field(name="isolation_level", value="i8") == true -%} +use kf_protocol_api::Isolation; +{% endif -%} +{% if contains_field(name="metadata", value="Vec") == true -%} +use kf_protocol_api::ProtocolMetadata; +{% endif -%} +{% if contains_field(name="assignment", value="Vec") == true -%} +use kf_protocol_api::GroupAssignment; +{% endif -%} +use kf_protocol_api::Request; + +use kf_protocol_derive::KfDefault; +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + diff --git a/kf-protocol/kf-protocol-build/templates/impl_request.j2 b/kf-protocol/kf-protocol-build/templates/impl_request.j2 new file mode 100644 index 0000000000..e5e9c03e31 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/impl_request.j2 @@ -0,0 +1,9 @@ +impl {{request_trait}}Request for {{request_name}}{{request_trait_clause}}{ + const API_KEY: u16 = {{request.annotation.api_key}}; + + const MIN_API_VERSION: i16 = {{request.annotation.min_api_version}}; + const MAX_API_VERSION: i16 = {{request.annotation.max_api_version}}; + const DEFAULT_API_VERSION: i16 = {{request.annotation.max_api_version}}; + + type Response = {{response_name}}; +} diff --git a/kf-protocol/kf-protocol-build/templates/struct.j2 b/kf-protocol/kf-protocol-build/templates/struct.j2 new file mode 100644 index 0000000000..6914cb64e9 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates/struct.j2 @@ -0,0 +1,48 @@ +{%- set structure_where_clause = "" -%} +{%- set clause_content = "where R: Encoder + Decoder + Default + Debug" -%} + +{# Structure Name - augment with #} +{%- + if structure_name == "FetchableTopicResponse" or + structure_name == "FetchablePartitionResponse" or + structure_name == "TopicProduceData" or + structure_name == "PartitionProduceData" +-%} + {%- set structure_name = structure_name ~ "" -%} +{%- endif -%} + +{# Structure Header - augment with Where clause #} +{%- + if structure_name == "KfFetchRequest" or + structure_name == "KfProduceRequest" or + structure_name == "KfFetchResponse" or + structure_name == "FetchableTopicResponse" or + structure_name == "FetchablePartitionResponse" or + structure_name == "TopicProduceData" or + structure_name == "PartitionProduceData" +-%} + {%- set structure_where_clause = clause_content -%} +{%- endif -%} + +{# Fields - add PhantomData #} +{%- + if structure_name == "KfFetchRequest" or + structure_name == "KfProduceRequest" or + structure_name == "KfFetchResponse" or + structure_name == "FetchableTopicResponse" or + structure_name == "TopicProduceData" +-%} + {%- set add_phantom_data = true -%} +{%- endif -%} + +{# Generate Structure #} +#[derive(Encode,Decode,Serialize,Deserialize,KfDefault,Debug)] +pub struct {{structure_name}} {{structure_where_clause}} { + {% for field in fields -%} + {% include "field.j2" -%} + {% endfor -%} + + {%- if add_phantom_data %} + pub data: PhantomData, + {%- endif %} +} diff --git a/kf-protocol/kf-protocol-build/templates_custom/basic/field.j2 b/kf-protocol/kf-protocol-build/templates_custom/basic/field.j2 new file mode 100644 index 0000000000..70130cc00c --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/basic/field.j2 @@ -0,0 +1,6 @@ +{% if field.annotation %} + {{ field.annotation }} + {{ field.name }}: {{ field.value }}, +{% else -%} + {{ field.name }}: {{ field.value }}, +{%- endif -%} diff --git a/kf-protocol/kf-protocol-build/templates_custom/basic/fluvio_base.j2 b/kf-protocol/kf-protocol-build/templates_custom/basic/fluvio_base.j2 new file mode 100644 index 0000000000..7d4b600107 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/basic/fluvio_base.j2 @@ -0,0 +1,33 @@ +// ----------------------------------- +// {{request.name}} +// ----------------------------------- + +{{ request.annotation }} +struct {{request.name}} +{% set structure_name = request.name -%} +{ + {% for field in request.fields -%} + {% include "field.j2" -%} + {% endfor %} +} + +{% for structure in request.structures -%} + {% set structure_name = structure.name -%} + {% include "structure.j2" %} +{% endfor %} + +// ----------------------------------- +// {{response.name}} +// ----------------------------------- + +struct {{response.name}} +{% set structure_name = response.name -%} +{ + {% for field in response.fields -%} + {% include "field.j2" -%} + {% endfor %} +} +{% for structure in response.structures -%} + {% set structure_name = structure.name -%} + {% include "structure.j2" %} +{% endfor %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/basic/structure.j2 b/kf-protocol/kf-protocol-build/templates_custom/basic/structure.j2 new file mode 100644 index 0000000000..f3382d751c --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/basic/structure.j2 @@ -0,0 +1,6 @@ +struct {{structure_name}} +{ + {% for field in structure.fields -%} + {% include "field.j2" -%} + {% endfor %} +} diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/content.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/content.j2 new file mode 100644 index 0000000000..27ecf20728 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/content.j2 @@ -0,0 +1,34 @@ + +// ----------------------------------- +// {{request_name}} +// ----------------------------------- + +{% set structure_name = request_name -%} +{% set fields = request.fields -%} +{% include "struct.j2" %} + +{% for structure in request.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// {{response_name}} +// ----------------------------------- + +{% set structure_name = response_name -%} +{% set fields = response.fields -%} +{% include "struct.j2" %} + +{% for structure in response.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// Implementation - {{request_name}} +// ----------------------------------- + + {% include "impl_request.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/field.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/field.j2 new file mode 100644 index 0000000000..8834f2a99d --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/field.j2 @@ -0,0 +1,24 @@ +{# Translate Field Values #} +{%- set field_name = field.name -%} +{%- if field.name == "error_code" and field.value == "i16" -%} + {%- set field_value = "ErrorCode" -%} +{%- elif field.name == "isolation_level" and field.value == "i8" -%} + {%- set field_value = "Isolation" -%} +{%- elif field.name == "records" and field.value == "Option" -%} + {%- set field_value = "R" -%} +{%- elif field.name == "topics" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- elif field.name == "partitions" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- else -%} + {%- set field_value = field.value -%} +{%- endif -%} + +{# Generate Field #} +{%- if field.annotation %} + {% include "field_annotation.j2" -%} + pub {{ field_name }}: {{ field_value }}, +{% else %} + pub {{ field_name }}: {{ field_value }}, +{%- endif -%} + diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/field_annotation.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/field_annotation.j2 new file mode 100644 index 0000000000..bbe0ff1344 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/field_annotation.j2 @@ -0,0 +1,18 @@ + +#[fluvio_kf( + {%- if field.annotation.min_version -%} + min_version = {{field.annotation.min_version}}, + {%- endif -%} + + {%- if field.annotation.max_version -%} + max_version = {{field.annotation.max_version}}, + {%- endif -%} + + {%- if field.annotation.ignorable -%} + ignorable, + {%- endif -%} + + {%- if field.annotation.default -%} + default = "{{field.annotation.default}}", + {%- endif -%} +)] diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/fluvio_base.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/fluvio_base.j2 new file mode 100644 index 0000000000..9e8e93e5d8 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/fluvio_base.j2 @@ -0,0 +1,20 @@ +{# Set Request (request_name) variable #} +{%- set request_name = request.name -%} +{%- set request_trait = "" -%} +{%- set request_trait_clause = "" -%} + +{%- if request.name == "KfFetchRequest" -%} + {%- set request_name = "KfFetchRequest" -%} + {%- set request_trait = "" -%} + {%- set request_trait_clause = " where R: Debug + Decoder + Encoder " -%} +{%- endif -%} + +{# Set Response (response_name) variable #} +{%- set response_name = response.name -%} +{%- if response.name == "KfFetchResponse" -%} + {%- set response_name = "KfFetchResponse" -%} +{%- endif -%} + +{# Generate Code #} +{% include "header.j2" %} +{% include "content.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/header.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/header.j2 new file mode 100644 index 0000000000..b9e244733b --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/header.j2 @@ -0,0 +1,18 @@ +{% if request_name == "KfFetchRequest" -%} +use std::fmt::Debug; +use std::marker::PhantomData; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; +{% endif %} + +use kf_protocol_api::ErrorCode; +{% if contains_field(name="isolation_level", value="i8") == true -%} +use kf_protocol_api::Isolation; +{% endif -%} +use kf_protocol_api::Request; + +use kf_protocol_derive::KfDefault; +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/impl_request.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/impl_request.j2 new file mode 100644 index 0000000000..e5e9c03e31 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/impl_request.j2 @@ -0,0 +1,9 @@ +impl {{request_trait}}Request for {{request_name}}{{request_trait_clause}}{ + const API_KEY: u16 = {{request.annotation.api_key}}; + + const MIN_API_VERSION: i16 = {{request.annotation.min_api_version}}; + const MAX_API_VERSION: i16 = {{request.annotation.max_api_version}}; + const DEFAULT_API_VERSION: i16 = {{request.annotation.max_api_version}}; + + type Response = {{response_name}}; +} diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/request_annotation.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/request_annotation.j2 new file mode 100644 index 0000000000..a54a55a51f --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/request_annotation.j2 @@ -0,0 +1,5 @@ +#[fluvio_kf( + min_api_version = {{request.annotation.min_api_version}}, + max_api_version = {{request.annotation.max_api_version}}, + api_key = {{request.annotation.api_key}}, + response = "{{response_name}}" )] \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/v1/struct.j2 b/kf-protocol/kf-protocol-build/templates_custom/v1/struct.j2 new file mode 100644 index 0000000000..75679795fb --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v1/struct.j2 @@ -0,0 +1,35 @@ +{%- set structure_where_clause = "" -%} +{%- set clause_content = "where R: Encoder + Decoder + Default + Debug" -%} + +{# Request Handling (special case) #} +{%- if structure_name == "KfFetchRequest" -%} + {%- set add_phantom_data = true -%} + {%- set structure_where_clause = clause_content -%} +{%- endif -%} + +{# Response Handling (special case) #} +{%- if structure_name == "KfFetchResponse" -%} + {% set structure_where_clause = clause_content -%} +{%- endif -%} + +{# Translate Stucture Name #} +{%- if structure_name == "FetchableTopicResponse" -%} + {%- set structure_name = "FetchableTopicResponse" -%} + {%- set structure_where_clause = clause_content -%} +{%- elif structure_name == "FetchablePartitionResponse" -%} + {%- set structure_name = "FetchablePartitionResponse" -%} + {%- set structure_where_clause = clause_content -%} +{%- endif -%} + + +{# Generate Structure #} +#[derive(Encode,Decode,KfDefault,Debug)] +pub struct {{structure_name}} {{structure_where_clause}} { + {% for field in fields -%} + {% include "field.j2" -%} + {% endfor -%} + + {%- if add_phantom_data %} + pub data: PhantomData, + {%- endif %} +} diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/content.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/content.j2 new file mode 100644 index 0000000000..27ecf20728 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/content.j2 @@ -0,0 +1,34 @@ + +// ----------------------------------- +// {{request_name}} +// ----------------------------------- + +{% set structure_name = request_name -%} +{% set fields = request.fields -%} +{% include "struct.j2" %} + +{% for structure in request.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// {{response_name}} +// ----------------------------------- + +{% set structure_name = response_name -%} +{% set fields = response.fields -%} +{% include "struct.j2" %} + +{% for structure in response.structures -%} + {% set structure_name = structure.name -%} + {% set fields = structure.fields -%} + {% include "struct.j2" %} +{% endfor %} + +// ----------------------------------- +// Implementation - {{request_name}} +// ----------------------------------- + + {% include "impl_request.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/field.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/field.j2 new file mode 100644 index 0000000000..4be828ec28 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/field.j2 @@ -0,0 +1,28 @@ +{# Translate Field Values #} +{%- set field_name = field.name -%} +{%- if field.name == "error_code" and field.value == "i16" -%} + {%- set field_value = "ErrorCode" -%} +{%- elif field.name == "isolation_level" and field.value == "i8" -%} + {%- set field_value = "Isolation" -%} +{%- elif field.name == "records" and field.value == "Option" -%} + {%- set field_value = "R" -%} +{%- elif field.name == "topics" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- elif field.name == "partitions" and field.value == "Vec" -%} + {%- set field_value = "Vec>" -%} +{%- elif field.name == "topics" and field.value == "Vec" -%} + {%- set field_value = " Vec>" -%} +{%- elif field.name == "partitions" and field.value == "Vec" -%} + {%- set field_value = " Vec>" -%} +{%- else -%} + {%- set field_value = field.value -%} +{%- endif -%} + +{# Generate Field #} +{%- if field.annotation %} + {% include "field_annotation.j2" -%} + pub {{ field_name }}: {{ field_value }}, +{% else %} + pub {{ field_name }}: {{ field_value }}, +{%- endif -%} + diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/field_annotation.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/field_annotation.j2 new file mode 100644 index 0000000000..30ace8bc25 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/field_annotation.j2 @@ -0,0 +1,18 @@ + +#[fluvio_kf( + {%- if field.annotation.min_version -%} + min_version = {{field.annotation.min_version}}, + {%- endif -%} + + {%- if field.annotation.max_version -%} + max_version = {{field.annotation.max_version}}, + {%- endif -%} + + {%- if field.annotation.ignorable -%} + ignorable, + {%- endif -%} + + {%- if false -%} + default = "{{field.annotation.default}}", + {%- endif -%} +)] diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/fluvio_base.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/fluvio_base.j2 new file mode 100644 index 0000000000..22079204dc --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/fluvio_base.j2 @@ -0,0 +1,21 @@ +{# Set Request (request_name) variable #} +{%- set request_name = request.name -%} +{%- set request_trait = "" -%} +{%- set request_trait_clause = "" -%} + +{%- if request.name == "KfFetchRequest" or + request.name == "KfProduceRequest" -%} + {%- set request_name = request_name ~ "" -%} + {%- set request_trait = "" -%} + {%- set request_trait_clause = " where R: Debug + Decoder + Encoder " -%} +{%- endif -%} + +{# Set Response (response_name) variable #} +{%- set response_name = response.name -%} +{%- if response.name == "KfFetchResponse" -%} + {%- set response_name = "KfFetchResponse" -%} +{%- endif -%} + +{# Generate Code #} +{% include "header.j2" %} +{% include "content.j2" %} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/header.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/header.j2 new file mode 100644 index 0000000000..7c4d708b7f --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/header.j2 @@ -0,0 +1,21 @@ +{% + if request_name == "KfFetchRequest" or + request_name == "KfProduceRequest" +-%} +use std::fmt::Debug; +use std::marker::PhantomData; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; +{% endif %} + +use kf_protocol_api::ErrorCode; +{% if contains_field(name="isolation_level", value="i8") == true -%} +use kf_protocol_api::Isolation; +{% endif -%} +use kf_protocol_api::Request; + +use kf_protocol_derive::KfDefault; +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; + diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/impl_request.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/impl_request.j2 new file mode 100644 index 0000000000..e5e9c03e31 --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/impl_request.j2 @@ -0,0 +1,9 @@ +impl {{request_trait}}Request for {{request_name}}{{request_trait_clause}}{ + const API_KEY: u16 = {{request.annotation.api_key}}; + + const MIN_API_VERSION: i16 = {{request.annotation.min_api_version}}; + const MAX_API_VERSION: i16 = {{request.annotation.max_api_version}}; + const DEFAULT_API_VERSION: i16 = {{request.annotation.max_api_version}}; + + type Response = {{response_name}}; +} diff --git a/kf-protocol/kf-protocol-build/templates_custom/v2/struct.j2 b/kf-protocol/kf-protocol-build/templates_custom/v2/struct.j2 new file mode 100644 index 0000000000..66ba5cb11e --- /dev/null +++ b/kf-protocol/kf-protocol-build/templates_custom/v2/struct.j2 @@ -0,0 +1,48 @@ +{%- set structure_where_clause = "" -%} +{%- set clause_content = "where R: Encoder + Decoder + Default + Debug" -%} + +{# Structure Name - augment with #} +{%- + if structure_name == "FetchableTopicResponse" or + structure_name == "FetchablePartitionResponse" or + structure_name == "TopicProduceData" or + structure_name == "PartitionProduceData" +-%} + {%- set structure_name = structure_name ~ "" -%} +{%- endif -%} + +{# Structure Header - augment with Where clause #} +{%- + if structure_name == "KfFetchRequest" or + structure_name == "KfProduceRequest" or + structure_name == "KfFetchResponse" or + structure_name == "FetchableTopicResponse" or + structure_name == "FetchablePartitionResponse" or + structure_name == "TopicProduceData" or + structure_name == "PartitionProduceData" +-%} + {%- set structure_where_clause = clause_content -%} +{%- endif -%} + +{# Fields - add PhantomData #} +{%- + if structure_name == "KfFetchRequest" or + structure_name == "KfProduceRequest" or + structure_name == "KfFetchResponse" or + structure_name == "FetchableTopicResponse" or + structure_name == "TopicProduceData" +-%} + {%- set add_phantom_data = true -%} +{%- endif -%} + +{# Generate Structure #} +#[derive(Encode,Decode,KfDefault,Debug)] +pub struct {{structure_name}} {{structure_where_clause}} { + {% for field in fields -%} + {% include "field.j2" -%} + {% endfor -%} + + {%- if add_phantom_data %} + pub data: PhantomData, + {%- endif %} +} diff --git a/kf-protocol/kf-protocol-build/test-data/FetchResponse.json b/kf-protocol/kf-protocol-build/test-data/FetchResponse.json new file mode 100644 index 0000000000..451cf645bb --- /dev/null +++ b/kf-protocol/kf-protocol-build/test-data/FetchResponse.json @@ -0,0 +1,115 @@ +{ + "apiKey": 1, + "type": "response", + "name": "FetchResponse", + "validVersions": "0-10", + "fields": [ + { + "name": "ThrottleTimeMs", + "type": "int32", + "versions": "1+", + "ignorable": true, + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." + }, + { + "name": "ErrorCode", + "type": "int16", + "versions": "7+", + "ignorable": false, + "about": "The top level response error code." + }, + { + "name": "SessionId", + "type": "int32", + "versions": "7+", + "default": "0", + "ignorable": false, + "about": "The fetch session ID, or 0 if this is not part of a fetch session." + }, + { + "name": "Topics", + "type": "[]FetchableTopicResponse", + "versions": "0+", + "about": "The response topics.", + "fields": [ + { + "name": "Name", + "type": "string", + "versions": "0+", + "about": "The topic name." + }, + { + "name": "Partitions", + "type": "[]FetchablePartitionResponse", + "versions": "0+", + "about": "The topic partitions.", + "fields": [ + { + "name": "PartitionIndex", + "type": "int32", + "versions": "0+", + "about": "The partiiton index." + }, + { + "name": "ErrorCode", + "type": "int16", + "versions": "0+", + "about": "The error code, or 0 if there was no fetch error." + }, + { + "name": "HighWatermark", + "type": "int64", + "versions": "0+", + "about": "The current high water mark." + }, + { + "name": "LastStableOffset", + "type": "int64", + "versions": "4+", + "default": "-1", + "ignorable": true, + "about": "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)" + }, + { + "name": "LogStartOffset", + "type": "int64", + "versions": "5+", + "default": "-1", + "ignorable": true, + "about": "The current log start offset." + }, + { + "name": "Aborted", + "type": "[]AbortedTransaction", + "versions": "4+", + "nullableVersions": "4+", + "ignorable": false, + "about": "The aborted transactions.", + "fields": [ + { + "name": "ProducerId", + "type": "int64", + "versions": "4+", + "about": "The producer id associated with the aborted transaction." + }, + { + "name": "FirstOffset", + "type": "int64", + "versions": "4+", + "about": "The first offset in the aborted transaction." + } + ] + }, + { + "name": "Records", + "type": "bytes", + "versions": "0+", + "nullableVersions": "0+", + "about": "The record data." + } + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/test-data/MetadataRequest.json b/kf-protocol/kf-protocol-build/test-data/MetadataRequest.json new file mode 100644 index 0000000000..cbb3489bf7 --- /dev/null +++ b/kf-protocol/kf-protocol-build/test-data/MetadataRequest.json @@ -0,0 +1,42 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 3, + "type": "request", + "name": "MetadataRequest", + "validVersions": "0-8", + "fields": [ + // In version 0, an empty array indicates "request metadata for all topics." In version 1 and + // higher, an empty array indicates "request metadata for no topics," and a null array is used to + // indiate "request metadata for all topics." + // + // Version 2 and 3 are the same as version 1. + // + // Version 4 adds AllowAutoTopicCreation. + // Starting in version 8, authorized operations can be requested for cluster and topic resource. + { "name": "Topics", "type": "[]MetadataRequestTopic", "versions": "0+", "nullableVersions": "1+", + "about": "The topics to fetch metadata for.", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", + "about": "The topic name." } + ]}, + { "name": "AllowAutoTopicCreation", "type": "bool", "versions": "4+", "default": "true", "ignorable": false, + "about": "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so." }, + { "name": "IncludeClusterAuthorizedOperations", "type": "bool", "versions": "8+", + "about": "Whether to include cluster authorized operations." }, + { "name": "IncludeTopicAuthorizedOperations", "type": "bool", "versions": "8+", + "about": "Whether to include topic authorized operations." } + ] +} diff --git a/kf-protocol/kf-protocol-build/test-data/MetadataRequest_clean.json b/kf-protocol/kf-protocol-build/test-data/MetadataRequest_clean.json new file mode 100644 index 0000000000..07fa7baa16 --- /dev/null +++ b/kf-protocol/kf-protocol-build/test-data/MetadataRequest_clean.json @@ -0,0 +1,44 @@ +{ + "apiKey": 3, + "type": "request", + "name": "MetadataRequest", + "validVersions": "0-8", + "fields": [ + { + "name": "Topics", + "type": "[]MetadataRequestTopic", + "versions": "0+", + "nullableVersions": "1+", + "about": "The topics to fetch metadata for.", + "fields": [ + { + "name": "Name", + "type": "string", + "versions": "0+", + "entityType": "topicName", + "about": "The topic name." + } + ] + }, + { + "name": "AllowAutoTopicCreation", + "type": "bool", + "versions": "4+", + "default": "true", + "ignorable": false, + "about": "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so." + }, + { + "name": "IncludeClusterAuthorizedOperations", + "type": "bool", + "versions": "8+", + "about": "Whether to include cluster authorized operations." + }, + { + "name": "IncludeTopicAuthorizedOperations", + "type": "bool", + "versions": "8+", + "about": "Whether to include topic authorized operations." + } + ] +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-build/test-data/MetadataResponse.json b/kf-protocol/kf-protocol-build/test-data/MetadataResponse.json new file mode 100644 index 0000000000..f54ef2805f --- /dev/null +++ b/kf-protocol/kf-protocol-build/test-data/MetadataResponse.json @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 3, + "type": "response", + "name": "MetadataResponse", + // Version 1 adds fields for the rack of each broker, the controller id, and + // whether or not the topic is internal. + // + // Version 2 adds the cluster ID field. + // + // Version 3 adds the throttle time. + // + // Version 4 is the same as version 3. + // + // Version 5 adds a per-partition offline_replicas field. This field specifies + // the list of replicas that are offline. + // + // Starting in version 6, on quota violation, brokers send out responses before throttling. + // + // Version 7 adds the leader epoch to the partition metadata. + // Starting in version 8, brokers can send authorized operations for topic and cluster. + "validVersions": "0-8", + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "Brokers", "type": "[]MetadataResponseBroker", "versions": "0+", + "about": "Each broker in the response.", "fields": [ + { "name": "NodeId", "type": "int32", "versions": "0+", "mapKey": true, "entityType": "brokerId", + "about": "The broker ID." }, + { "name": "Host", "type": "string", "versions": "0+", + "about": "The broker hostname." }, + { "name": "Port", "type": "int32", "versions": "0+", + "about": "The broker port." }, + { "name": "Rack", "type": "string", "versions": "1+", "nullableVersions": "1+", "ignorable": true, "default": "null", + "about": "The rack of the broker, or null if it has not been assigned to a rack." } + ]}, + { "name": "ClusterId", "type": "string", "nullableVersions": "2+", "versions": "2+", "ignorable": true, "default": "null", + "about": "The cluster ID that responding broker belongs to." }, + { "name": "ControllerId", "type": "int32", "versions": "1+", "default": "-1", "ignorable": true, "entityType": "brokerId", + "about": "The ID of the controller broker." }, + { "name": "Topics", "type": "[]MetadataResponseTopic", "versions": "0+", + "about": "Each topic in the response.", "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The topic error, or 0 if there was no error." }, + { "name": "Name", "type": "string", "versions": "0+", "mapKey": true, "entityType": "topicName", + "about": "The topic name." }, + { "name": "IsInternal", "type": "bool", "versions": "1+", "default": "false", "ignorable": true, + "about": "True if the topic is internal." }, + { "name": "Partitions", "type": "[]MetadataResponsePartition", "versions": "0+", + "about": "Each partition in the topic.", "fields": [ + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The partition error, or 0 if there was no error." }, + { "name": "PartitionIndex", "type": "int32", "versions": "0+", + "about": "The partition index." }, + { "name": "LeaderId", "type": "int32", "versions": "0+", "entityType": "brokerId", + "about": "The ID of the leader broker." }, + { "name": "LeaderEpoch", "type": "int32", "versions": "7+", "default": "-1", "ignorable": true, + "about": "The leader epoch of this partition." }, + { "name": "ReplicaNodes", "type": "[]int32", "versions": "0+", "entityType": "brokerId", + "about": "The set of all nodes that host this partition." }, + { "name": "IsrNodes", "type": "[]int32", "versions": "0+", + "about": "The set of nodes that are in sync with the leader for this partition." }, + { "name": "OfflineReplicas", "type": "[]int32", "versions": "5+", "ignorable": true, + "about": "The set of offline replicas of this partition." } + ]}, + { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", + "about": "32-bit bitfield to represent authorized operations for this topic." } + ]}, + { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", + "about": "32-bit bitfield to represent authorized operations for this cluster." } + ] +} diff --git a/kf-protocol/kf-protocol-build/test-data/dummy/noop.json b/kf-protocol/kf-protocol-build/test-data/dummy/noop.json new file mode 100644 index 0000000000..e69de29bb2 diff --git a/kf-protocol/kf-protocol-build/test-data/metadata_file_content.json b/kf-protocol/kf-protocol-build/test-data/metadata_file_content.json new file mode 100644 index 0000000000..f4013a8904 --- /dev/null +++ b/kf-protocol/kf-protocol-build/test-data/metadata_file_content.json @@ -0,0 +1,224 @@ +{ + "request": { + "name": "KfMetadataRequest", + "annotation": { + "api_key": 3, + "min_api_version": 0, + "max_api_version": 8 + }, + "fields": [ + { + "name": "topics", + "value": "Option>", + "comment": "/// The topics to fetch metadata for.\n" + }, + { + "name": "allow_auto_topic_creation", + "value": "bool", + "comment": "/// If this is true, the broker may auto-create topics that we requested which do not already\n/// exist, if it is configured to do so.\n", + "annotation": { + "min_version": 4, + "default": "true" + } + }, + { + "name": "include_cluster_authorized_operations", + "value": "bool", + "comment": "/// Whether to include cluster authorized operations.\n", + "annotation": { + "min_version": 8 + } + }, + { + "name": "include_topic_authorized_operations", + "value": "bool", + "comment": "/// Whether to include topic authorized operations.\n", + "annotation": { + "min_version": 8 + } + } + ], + "structures": [ + { + "name": "MetadataRequestTopic", + "fields": [ + { + "name": "name", + "value": "String", + "comment": "/// The topic name.\n" + } + ] + } + ] + }, + "response": { + "name": "KfMetadataResponse", + "fields": [ + { + "name": "throttle_time_ms", + "value": "i32", + "comment": "/// The duration in milliseconds for which the request was throttled due to a quota violation,\n/// or zero if the request did not violate any quota.\n", + "annotation": { + "min_version": 3 + } + }, + { + "name": "brokers", + "value": "Vec", + "comment": "/// Each broker in the response.\n" + }, + { + "name": "cluster_id", + "value": "Option", + "comment": "/// The cluster ID that responding broker belongs to.\n", + "annotation": { + "min_version": 2, + "ignorable": true, + "default": "null" + } + }, + { + "name": "controller_id", + "value": "i32", + "comment": "/// The ID of the controller broker.\n", + "annotation": { + "min_version": 1, + "ignorable": true, + "default": "-1" + } + }, + { + "name": "topics", + "value": "Vec", + "comment": "/// Each topic in the response.\n" + }, + { + "name": "cluster_authorized_operations", + "value": "i32", + "comment": "/// 32-bit bitfield to represent authorized operations for this cluster.\n", + "annotation": { + "min_version": 8 + } + } + ], + "structures": [ + { + "name": "MetadataResponseBroker", + "fields": [ + { + "name": "broker_id", + "value": "i32", + "comment": "/// The broker ID.\n" + }, + { + "name": "host", + "value": "String", + "comment": "/// The broker hostname.\n" + }, + { + "name": "port", + "value": "i32", + "comment": "/// The broker port.\n" + }, + { + "name": "rack", + "value": "Option", + "comment": "/// The rack of the broker, or null if it has not been assigned to a rack.\n", + "annotation": { + "min_version": 1, + "ignorable": true, + "default": "null" + } + } + ] + }, + { + "name": "MetadataResponseTopic", + "fields": [ + { + "name": "error_code", + "value": "i16", + "comment": "/// The topic error, or 0 if there was no error.\n" + }, + { + "name": "topic_name", + "value": "String", + "comment": "/// The topic name.\n" + }, + { + "name": "is_internal", + "value": "bool", + "comment": "/// True if the topic is internal.\n", + "annotation": { + "min_version": 1, + "ignorable": true, + "default": "false" + } + }, + { + "name": "partitions", + "value": "Vec", + "comment": "/// Each partition in the topic.\n" + }, + { + "name": "topic_authorized_operations", + "value": "i32", + "comment": "/// 32-bit bitfield to represent authorized operations for this topic.\n", + "annotation": { + "min_version": 8 + } + } + ] + }, + { + "name": "MetadataResponsePartition", + "fields": [ + { + "name": "error_code", + "value": "i16", + "comment": "/// The partition error, or 0 if there was no error.\n" + }, + { + "name": "partition_index", + "value": "i32", + "comment": "/// The partition index.\n" + }, + { + "name": "leader_id", + "value": "i32", + "comment": "/// The ID of the leader broker.\n" + }, + { + "name": "leader_epoch", + "value": "i32", + "comment": "/// The leader epoch of this partition.\n", + "annotation": { + "min_version": 7, + "ignorable": true, + "default": "-1" + } + }, + { + "name": "replica_nodes", + "value": "Vec", + "comment": "/// The set of all nodes that host this partition.\n" + }, + { + "name": "isr_nodes", + "value": "Vec", + "comment": "/// The set of nodes that are in sync with the leader for this partition.\n" + }, + { + "name": "offline_replicas", + "value": "Vec", + "comment": "/// The set of offline replicas of this partition.\n", + "annotation": { + "min_version": 5, + "ignorable": true + } + } + ] + } + ] + } +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-core/Cargo.toml b/kf-protocol/kf-protocol-core/Cargo.toml new file mode 100644 index 0000000000..8f05ca3525 --- /dev/null +++ b/kf-protocol/kf-protocol-core/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "kf-protocol-core" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +bytes = "0.4.12" +log = "0.4.6" +num-traits = "0.2.6" +num-derive = "0.2.4" +utils = { path= "../../utils"} +serde = { version ="1.0.82", features = ['derive'] } \ No newline at end of file diff --git a/kf-protocol/kf-protocol-core/src/buffer.rs b/kf-protocol/kf-protocol-core/src/buffer.rs new file mode 100644 index 0000000000..f1fd0782c6 --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/buffer.rs @@ -0,0 +1,38 @@ + + +#[derive(Default)] +struct MyMessage { + + value1: [u8; 2], + value2: [u8; 4], // u32 +} + +impl MyMessage { + + fn value1(&self) -> i16 { + i16::from_be_bytes(self.value1) + } + + fn set_value1(&mut self,val: i16) { + self.value1 = val.to_be_bytes() + } +} + + + +#[cfg(test)] +mod test { + + use super::MyMessage; + + #[test] + fn test_message() { + let mut m = MyMessage::default(); + + m.set_value1(10); + let bytes = m.value1; + assert_eq!(bytes[0],0); + assert_eq!(bytes[1],10); + } + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-core/src/decoder.rs b/kf-protocol/kf-protocol-core/src/decoder.rs new file mode 100644 index 0000000000..02f6be2dc6 --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/decoder.rs @@ -0,0 +1,853 @@ +use std::cmp::Ord; +use std::collections::BTreeMap; +use std::io::Error; +use std::io::ErrorKind; +use std::io::Read; +use std::marker::PhantomData; + +use bytes::Buf; +use bytes::BufMut; +use log::trace; + +use crate::Version; +use super::varint::varint_decode; + +// trait for encoding and decoding using Kafka Protocol +pub trait Decoder: Sized + Default { + + /// decode Kafka compliant protocol values from buf + fn decode_from(src: &mut T,version: Version) -> Result + where T: Buf, + Self: Default + { + let mut decoder = Self::default(); + decoder.decode(src,version)?; + Ok(decoder) + } + + + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> where T: Buf; +} + +pub trait DecoderVarInt { + + fn decode_varint(&mut self, src: &mut T) -> Result<(), Error> where T: Buf; +} + +impl Decoder for Vec +where + M: Default + Decoder, +{ + default fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut len: i32 = 0; + len.decode(src,version)?; + + trace!("decoding Vec len:{}", len); + + if len < 1 { + trace!("negative length, skipping"); + return Ok(()); + } + + decode_vec(len,self,src,version)?; + + Ok(()) + } +} + +fn decode_vec(len: i32,item: &mut Vec,src: &mut T, version: Version) -> Result<(),Error> + where T:Buf, M:Default + Decoder { + + for _ in 0..len { + let mut value = ::default(); + value.decode(src,version)?; + item.push(value); + } + + Ok(()) + +} + +impl Decoder for Option> + + where M: Default + Decoder { + + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut len: i32 = 0; + len.decode(src,version)?; + + trace!("decoding Vec len:{}", len); + + if len < 0 { + *self = None; + return Ok(()); + } + + let mut item: Vec = vec![]; + + decode_vec(len,&mut item,src,version)?; + *self = Some(item); + + Ok(()) + } + + +} + +impl Decoder for Option +where + M: Default + Decoder, +{ + default fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut some = false; + some.decode(src,version)?; + if some { + let mut value = ::default(); + value.decode(src,version)?; + *self = Some(value) + } else { + *self = None + } + Ok(()) + } +} + + + +impl Decoder for PhantomData +where + M: Default + Decoder, +{ + default fn decode(&mut self, _src: &mut T, _version: Version) -> Result<(), Error> + where + T: Buf, + { + Ok(()) + } +} + +impl Decoder for BTreeMap +where + K: Decoder + Ord, + V: Decoder, +{ + fn decode(&mut self, src: &mut T, version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut len: u16 = 0; + len.decode(src,version)?; + + let mut map: BTreeMap = BTreeMap::new(); + for _i in 0..len { + let mut key = K::default(); + key.decode(src,version)?; + let mut value = V::default(); + value.decode(src,version)?; + map.insert(key, value); + } + + *self = map; + Ok(()) + } +} + +impl Decoder for bool { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for bool", + )); + } + let value = src.get_u8(); + + match value { + 0 => *self = false, + 1 => *self = true, + _ => { + return Err(Error::new(ErrorKind::InvalidData, "not valid bool value")); + } + }; + + Ok(()) + } +} + +impl Decoder for i8 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for i8", + )); + } + let value = src.get_i8(); + *self = value; + Ok(()) + } +} + +impl Decoder for u8 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough buf for u8", + )); + } + let value = src.get_u8(); + *self = value; + Ok(()) + } +} + +impl Decoder for i16 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 2 { + return Err(Error::new(ErrorKind::UnexpectedEof, "can't read i16")); + } + let value = src.get_i16_be(); + *self = value; + Ok(()) + } +} + +impl Decoder for u16 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 2 { + return Err(Error::new(ErrorKind::UnexpectedEof, "can't read u16")); + } + let value = src.get_u16_be(); + *self = value; + Ok(()) + } +} + +impl Decoder for Option { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "can't read option flag for u16", + )); + } + let some_or_none = src.get_i8(); + if some_or_none == 0 { + *self = None; + return Ok(()); + } + + if src.remaining() < 2 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "can't read Option", + )); + } + let value = src.get_u16_be(); + *self = Some(value); + Ok(()) + } +} + +impl Decoder for i32 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 4 { + return Err(Error::new(ErrorKind::UnexpectedEof, "can't read i32")); + } + let value = src.get_i32_be(); + trace!("i32: {:#x} => {}", &value, &value); + *self = value; + Ok(()) + } +} + +impl Decoder for u32 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 4 { + return Err(Error::new(ErrorKind::UnexpectedEof, "can't read u32")); + } + let value = src.get_u32_be(); + trace!("u32: {:#x} => {}", &value, &value); + *self = value; + Ok(()) + } +} + +impl Decoder for i64 { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 4 { + return Err(Error::new(ErrorKind::UnexpectedEof, "can't read i64")); + } + let value = src.get_i64_be(); + trace!("i64: {:#x} => {}", &value, &value); + *self = value; + Ok(()) + } +} + +impl DecoderVarInt for i64 { + fn decode_varint(&mut self, src: &mut T) -> Result<(), Error> + where + T: Buf, + { + let (value, _) = varint_decode(src)?; + *self = value; + Ok(()) + } +} + +impl Decoder for Option { + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut len: i16 = 0; + len.decode(src,version)?; + + if len < 0 { + *self = None; + return Ok(()); + } + + if len == 0 { + *self = Some(String::default()); + } + + let value = decode_string(len,src)?; + *self = Some(value); + Ok(()) + } +} + +fn decode_string(len: i16,src: &mut T) -> Result where T:Buf{ + let mut value = String::default(); + let read_size = src.take(len as usize).reader().read_to_string(&mut value)?; + + if read_size != len as usize { + return Err(Error::new(ErrorKind::UnexpectedEof, "not enough string")); + } + Ok(value) +} + +impl Decoder for String { + fn decode(&mut self, src: &mut T,_version: Version) -> Result<(), Error> + where + T: Buf, + { + if src.remaining() < 2 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "can't read string length", + )); + } + let len = src.get_i16_be(); + if len <= 0 { + return Ok(()); + } + + let value = decode_string(len,src)?; + *self = value; + Ok(()) + } +} + + +impl Decoder for Vec { + + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut len: i32 = 0; + len.decode(src,version)?; + + trace!("decoding Vec len:{}", len); + + if len < 0 { + trace!("negative length, treat as empty values"); + return Ok(()); + } + + if src.remaining() < len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enought bytes", + )); + } + + let mut buf = src.take(len as usize); + self.put(&mut buf); + if self.len() != len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "varint: Vec>, expecting {} but received: {}", + len, + self.len() + ), + )); + } + + Ok(()) + + } +} + +impl DecoderVarInt for Vec { + fn decode_varint(&mut self, src: &mut T) -> Result<(), Error> + where + T: Buf, + { + let mut len: i64 = 0; + len.decode_varint(src)?; + + if len < 1 { + return Ok(()); + } + + let mut buf = src.take(len as usize); + self.put(&mut buf); + if self.len() != len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "varint: Vec>, expecting {} but received: {}", + len, + self.len() + ), + )); + } + + Ok(()) + } +} + +fn decode_option_vec_u(array: &mut Option>, src: &mut T, len: isize) -> Result<(), Error> +where + T: Buf, +{ + + if len < 0 { + *array = None; + return Ok(()); + } + + if len == 0 { + *array = Some(Vec::new()); + return Ok(()); + } + + let mut buf = src.take(len as usize); + let mut value: Vec = Vec::new(); + value.put(&mut buf); + if value.len() != len as usize { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "Option>>, expecting {} but received: {}", + len, + value.len() + ), + )); + } + + *array = Some(value); + + Ok(()) +} + +impl DecoderVarInt for Option> { + fn decode_varint(&mut self, src: &mut T) -> Result<(), Error> + where + T: Buf, + { + let mut len: i64 = 0; + len.decode_varint(src)?; + + decode_option_vec_u(self, src, len as isize) + } +} + +#[cfg(test)] +mod test { + + use crate::Decoder; + use crate::DecoderVarInt; + use crate::Version; + + use bytes::Buf; + use std::io::Cursor; + use std::io::Error; + + #[test] + fn test_decode_i18_not_enough() { + let data = []; // no values + let mut value: i8 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_i8() { + let data = [0x12]; + + let mut value: i8 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 18); + } + + #[test] + fn test_decode_u18_not_enough() { + let data = []; // no values + let mut value: u8 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_u8() { + let data = [0x12]; + + let mut value: u8 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 18); + } + + #[test] + fn test_decode_i16_not_enough() { + let data = [0x11]; // only one value + + let mut value: i16 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_i16() { + let data = [0x00, 0x05]; + + let mut value: i16 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 5); + } + + #[test] + fn test_decode_u16_not_enough() { + let data = [0x11]; // only one value + + let mut value: i16 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_u16() { + let data = [0x00, 0x05]; + + let mut value: u16 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 5); + } + + #[test] + fn test_decode_option_u16_none() { + let data = [0x00]; + + let mut value: Option = None; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, None); + } + + #[test] + fn test_decode_option_u16_val() { + let data = [0x01, 0x00, 0x10]; + + let mut value: Option = None; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, Some(16)); + } + + #[test] + fn test_decode_i32_not_enough() { + let data = [0x11, 0x11, 0x00]; // still need one more + + let mut value: i32 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_i32() { + let data = [0x00, 0x00, 0x00, 0x10]; + + let mut value: i32 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 16); + } + + #[test] + fn test_decode_i32_2() { + let data = [0x00, 0x00, 0x00, 0x01]; + + let mut value: i32 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 1); + } + + #[test] + fn test_decode_i64_not_enough() { + let data = [0x11, 0x11, 0x00]; // still need one more + + let mut value: i64 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_i64() { + let data = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20]; + + let mut value: i64 = 0; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, 32); + } + + #[test] + fn test_decode_invalid_string_not_len() { + let data = [0x11]; // doesn't have right bytes + + let mut value = String::from(""); + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_invalid_string() { + let data = [0x00, 0x0a, 0x63]; // len and string doesn't match + + let mut value = String::from(""); + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_null_option_string() { + let data = [0xff, 0xff]; // len and string doesn't match + + let mut value: Option = Some(String::from("test")); + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert!(value.is_none()); + } + + #[test] + fn test_decode_some_option_string() { + let data = [0x00, 0x02, 0x77, 0x6f]; // len and string doesn't match + + let mut value: Option = None; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert!(value.is_some()); + assert_eq!(value.unwrap(), "wo"); + } + + #[test] + fn test_decode_string_existing_value() { + let src = [0x0, 0x7, 0x30, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x30]; + let mut decode_target = "123".to_string(); + let result = decode_target.decode(&mut Cursor::new(&src),0); + assert!(result.is_ok()); + assert_eq!(decode_target, "0.0.0.0".to_string()); + } + + #[test] + fn test_decode_string() { + let data = [ + 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x2d, 0x31, + ]; + + let mut value = String::from(""); + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, "consumer-1"); + } + + #[test] + fn test_decode_bool_not_enough() { + let data = []; // no values + + let mut value: bool = false; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_bool() { + let data = [0x1]; + + let mut value: bool = false; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(value, true); + } + + #[test] + fn test_decode_bool_invalid_value() { + let data = [0x23]; // not bool + + let mut value: bool = false; + let result = value.decode(&mut Cursor::new(&data),0); + assert!(result.is_err()); + } + + #[test] + fn test_decode_valid_string_vectors() { + // array of strings with "test" + let data = [0x00, 0x00, 0x00, 0x01, 0x00, 0x04, 0x74, 0x65, 0x73, 0x74]; + + let mut values: Vec = Vec::new(); + let result = values.decode(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + assert_eq!(values.len(), 1); + let first_str = &values[0]; + assert_eq!(first_str, "test"); + } + + #[test] + fn test_decode_varint_trait() { + let data = [0x7e]; + + let mut value: i64 = 0; + let result = value.decode_varint(&mut Cursor::new(&data)); + assert!(result.is_ok()); + assert_eq!(value, 63); + } + + #[test] + fn test_decode_varint_vec8() { + let data = [0x06, 0x64, 0x6f, 0x67]; + + let mut value: Vec = Vec::new(); + let result = value.decode_varint(&mut Cursor::new(&data)); + assert!(result.is_ok()); + assert_eq!(value.len(), 3); + assert_eq!(value[0], 0x64); + } + + #[test] + fn test_decode_varint_vec8_fail() { + let data = [0x06, 0x64, 0x6f]; + + let mut value: Vec = Vec::new(); + let result = value.decode_varint(&mut Cursor::new(&data)); + assert!(result.is_err()); + } + + #[test] + fn test_decode_varint_array_option_vec8_null() { + let data = [0x01]; + + let mut value: Option> = Some(Vec::new()); + let result = value.decode_varint(&mut Cursor::new(&data)); + assert!(result.is_ok()); + assert!(value.is_none()); + } + + #[test] + fn test_varint_decode_array_opton_vec8_simple_array() { + let data = [0x06, 0x64, 0x6f, 0x67, 0x00]; // should only read first 3 + + let mut value: Option> = Some(Vec::new()); + let result = value.decode_varint(&mut Cursor::new(&data)); + assert!(result.is_ok()); + assert!(value.is_some()); + let array = value.unwrap(); + assert_eq!(array.len(), 3); + assert_eq!(array[0], 0x64); + } + + #[derive(Default)] + struct TestRecord { + value: i8, + value2: i8 + } + + impl Decoder for TestRecord { + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + self.value.decode(src,0)?; + if version > 1 { + self.value2.decode(src,0)?; + } + Ok(()) + } + } + + #[test] + fn test_decoding_struct() { + let data = [0x06]; + + // v1 + let result = TestRecord::decode_from(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + let record = result.unwrap(); + assert_eq!(record.value, 6); + assert_eq!(record.value2, 0); + + // v2 + let data2 = [0x06,0x09]; + let record2 = TestRecord::decode_from(&mut Cursor::new(&data2),2).expect("decode"); + assert_eq!(record2.value, 6); + assert_eq!(record2.value2, 9); + + } + +} diff --git a/kf-protocol/kf-protocol-core/src/encoder.rs b/kf-protocol/kf-protocol-core/src/encoder.rs new file mode 100644 index 0000000000..352d5e2283 --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/encoder.rs @@ -0,0 +1,792 @@ +// decode values +use std::io::Error; +use std::io::ErrorKind; +use std::io::Write; +use std::collections::BTreeMap; +use std::marker::PhantomData; + +use bytes::BufMut; +use bytes::BytesMut; +use bytes::Bytes; +use log::trace; + +use crate::Version; + +use super::varint::variant_encode; +use super::varint::variant_size; + + +// trait for encoding and decoding using Kafka Protocol +pub trait Encoder { + + /// size of this object in bytes + fn write_size(&self,version: Version) -> usize; + + + /// encoding contents for buffer + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> where T: BufMut; + + fn as_bytes(&self, version: Version) -> Result { + trace!("encoding as bytes"); + let mut out = vec![]; + self.encode(&mut out,version)?; + let mut buf = BytesMut::with_capacity(out.len()); + buf.put_slice(&out); + Ok(buf.freeze()) + } +} + + +pub trait EncoderVarInt { + + fn var_write_size(&self) -> usize; + + /// encoding contents for buffer + fn encode_varint(&self, dest: &mut T) -> Result<(), Error> where T: BufMut; +} + +impl Encoder for Vec where M: Encoder, +{ + + fn write_size(&self,version: Version) -> usize { + self.iter().fold(4, |sum, val| sum + val.write_size(version) ) + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for vec", + )); + } + + dest.put_u32_be(self.len() as u32); + + for ref v in self { + v.encode(dest,version)?; + } + + Ok(()) + } +} + +impl Encoder for Option where M: Encoder { + + default fn write_size(&self,version: Version) -> usize { + + match *self { + Some(ref value) => { + true.write_size(version) + value.write_size(version) + }, + None => false.write_size(version) + } + } + + default fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> where T: BufMut { + + match *self { + Some(ref value) => { + true.encode(dest,version)?; + value.encode(dest,version) + }, + None => false.encode(dest,version) + } + } +} + + +impl Encoder for PhantomData where M: Encoder { + + fn write_size(&self,_version: Version) -> usize { + + 0 + } + + fn encode(&self, _dest: &mut T,_version: Version) -> Result<(), Error> where T: BufMut { + Ok(()) + } +} + + +impl Encoder for Option> where M: Encoder, +{ + + fn write_size(&self,version: Version) -> usize { + match self { + Some(inner) => inner.write_size(version), + None => 4 + } + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + if self.is_none() { + let len: i32 = -1; + len.encode(dest,version)?; + trace!("Option: None encode as: {:X?}",-1 as i32); + return Ok(()); + } + + let inner = self.as_ref().unwrap(); + inner.encode(dest,version) + } +} + + +impl Encoder for BTreeMap where K: Encoder, V: Encoder { + + fn write_size(&self,version: Version) -> usize { + + let mut len: usize = (0 as u16).write_size(version); + + for (key, value) in self.iter() { + len += key.write_size(version); + len += value.write_size(version); + } + + len + + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> where T: BufMut { + + let len = self.len() as u16; + len.encode(dest,version)?; + + for (key, value) in self.iter() { + key.encode(dest,version)?; + value.encode(dest,version)?; + } + + Ok(()) + + } + +} + + +impl Encoder for bool { + + fn write_size(&self,_version: Version) -> usize { + 1 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for bool", + )); + } + if *self { + dest.put_i8(1); + } else { + dest.put_i8(0); + } + Ok(()) + } +} + +impl Encoder for i8 { + + fn write_size(&self,_version: Version) -> usize { + 1 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for i8", + )); + } + dest.put_i8(*self); + Ok(()) + } +} + +impl Encoder for u8 { + + fn write_size(&self,_version: Version) -> usize { + 1 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for i8", + )); + } + dest.put_u8(*self); + Ok(()) + } +} + +impl Encoder for i16 { + + fn write_size(&self,_version: Version) -> usize { + 2 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 2 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for i16", + )); + } + dest.put_i16_be(*self); + trace!("encoding i16: {:#x}",*self); + Ok(()) + } +} + +impl Encoder for u16 { + + fn write_size(&self,_version: Version) -> usize { + 2 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 2 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for u16", + )); + } + dest.put_u16_be(*self); + trace!("encoding u16: {:#x}",*self); + Ok(()) + } +} + +impl Encoder for Option { + + fn write_size(&self,_version: Version) -> usize { + + if self.is_none() { + return 1 + } else { + 3 + } + } + + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 1 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for len of 1", + )); + } + if self.is_none() { + dest.put_i8(0); + return Ok(()); + } + + dest.put_i8(1); + if dest.remaining_mut() < 2 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for u16", + )); + } + let u16_value = self.as_ref().unwrap(); + dest.put_u16_be(*u16_value); + + Ok(()) + } +} + +impl Encoder for i32 { + + fn write_size(&self,_version: Version) -> usize { + 4 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for i32", + )); + } + dest.put_i32_be(*self); + trace!("encoding i32: {:#x}",*self); + Ok(()) + } +} + +impl Encoder for u32 { + + fn write_size(&self,_version: Version) -> usize { + 4 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 4 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for u32", + )); + } + dest.put_u32_be(*self); + Ok(()) + } +} + +impl Encoder for i64 { + + fn write_size(&self,_version: Version) -> usize { + 8 + } + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 8 { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for i164", + )); + } + dest.put_i64_be(*self); + Ok(()) + } +} + +impl EncoderVarInt for i64 { + + fn var_write_size(&self) -> usize { + variant_size(*self) + } + + fn encode_varint(&self, dest: &mut T) -> Result<(), Error> + where + T: BufMut, + { + variant_encode(dest, *self)?; + Ok(()) + } +} + +impl Encoder for Option { + + fn write_size(&self,_version: Version) -> usize { + + if self.is_none() { + 2 + } else { + 2 + self.as_ref().unwrap().len() + } + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + + if self.is_none() { + let len: i16 = -1; + len.encode(dest,version)?; + trace!("Option: None encode as: {:X?}",-1 as i16); + return Ok(()); + } + + let str_value = self.as_ref().unwrap(); + + str_value.encode(dest,version) + } +} + +impl Encoder for String { + + fn write_size(&self,_version: Version) -> usize { + + 2 + self.len() + } + + + fn encode(&self, dest: &mut T,_version: Version) -> Result<(), Error> + where + T: BufMut, + { + if dest.remaining_mut() < 2 + self.len() { + return Err(Error::new( + ErrorKind::UnexpectedEof, + "not enough capacity for string", + )); + } + + dest.put_u16_be(self.len() as u16); + + let mut writer = dest.writer(); + let bytes_written = writer.write(self.as_bytes())?; + + if bytes_written != self.len() { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "out of {} bytes, {} not written", + self.len(), + self.len() - bytes_written + ), + )); + } + + Ok(()) + } +} + +impl EncoderVarInt for Option> { + + fn var_write_size(&self) -> usize { + + if self.is_none() { + let len: i64 = -1; + return variant_size(len) + } + + let b_values = self.as_ref().unwrap(); + + let len: i64 = b_values.len() as i64; + let bytes = variant_size(len); + + bytes + b_values.len() + } + + + fn encode_varint(&self, dest: &mut T) -> Result<(), Error> + where + T: BufMut, + { + if self.is_none() { + let len: i64 = -1; + variant_encode(dest, len)?; + return Ok(()); + } + + let b_values = self.as_ref().unwrap(); + + let len: i64 = b_values.len() as i64; + len.encode_varint(dest)?; + + if dest.remaining_mut() < b_values.len() { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("not enough capacity for byte array: {}", b_values.len()), + )); + } + + dest.put_slice(b_values); + + Ok(()) + } +} + + +#[cfg(test)] +mod test { + + use bytes::BytesMut; + use bytes::BufMut; + use std::io::Cursor; + use std::io::Error as IoError; + + use crate::Encoder; + use crate::EncoderVarInt; + use crate::Version; + + + #[test] + fn test_encode_i8() { + let mut dest = vec![]; + let value: i8 = 5; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 1); + assert_eq!(dest[0], 0x05); + assert_eq!(value.write_size(0),1); + } + + #[test] + fn test_encode_u8() { + let mut dest = vec![]; + let value: u8 = 8; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 1); + assert_eq!(dest[0], 0x08); + assert_eq!(value.write_size(0),1); + } + + #[test] + fn test_encode_i16_not_enough() { + let mut buf = BytesMut::with_capacity(1); + let mut dest = Cursor::new(&mut buf); + let value: i16 = 0; + let result = value.encode(&mut dest,0); + assert!(result.is_err()); + } + + #[test] + fn test_encode_i16() { + let mut dest = vec![]; + let value: i16 = 5; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 2); + assert_eq!(dest[0], 0x00); + assert_eq!(dest[1], 0x05); + assert_eq!(value.write_size(0),2); + } + + #[test] + fn test_encode_u16() { + let mut dest = vec![]; + let value: u16 = 16; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 2); + assert_eq!(dest[0], 0x00); + assert_eq!(dest[1], 0x10); + assert_eq!(value.write_size(0),2); + } + + #[test] + fn test_encode_option_u16_none() { + let mut dest = vec![]; + let value: Option = None; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 1); + assert_eq!(dest[0], 0x00); + assert_eq!(value.write_size(0),1); + } + + #[test] + fn test_encode_option_u16_with_val() { + let mut dest = vec![]; + let value: Option = Some(16); + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 3); + assert_eq!(dest[0], 0x01); + assert_eq!(dest[1], 0x00); + assert_eq!(dest[2], 0x10); + assert_eq!(value.write_size(0),3); + } + + #[test] + fn test_encode_i32() { + let mut dest = vec![]; + let value: i32 = 5; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 4); + assert_eq!(dest[3], 0x05); + assert_eq!(value.write_size(0),4); + } + + #[test] + fn test_encode_i64() { + let mut dest = vec![]; + let value: i64 = 5; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 8); + assert_eq!(dest[0], 0x00); + assert_eq!(dest[7], 0x05); + assert_eq!(value.write_size(0),8); + } + + #[test] + fn test_encode_string_option_none() { + let mut dest = vec![]; + let value: Option = None; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 2); + assert_eq!(dest[0], 0xff); + assert_eq!(dest[1], 0xff); + assert_eq!(value.write_size(0),2); + } + + #[test] + fn test_encode_string_option_some() { + let mut dest = vec![]; + let value: Option = Some(String::from("wo")); + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 4); + assert_eq!(dest[0], 0x00); + assert_eq!(dest[1], 0x02); + assert_eq!(dest[2], 0x77); + assert_eq!(dest[3], 0x6f); + assert_eq!(value.write_size(0),4); + } + + #[test] + fn test_encode_string() { + let mut dest = vec![]; + let value = String::from("wo"); + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 4); + assert_eq!(dest[0], 0x00); + assert_eq!(dest[1], 0x02); + assert_eq!(dest[2], 0x77); + assert_eq!(dest[3], 0x6f); + assert_eq!(value.write_size(0),4); + } + + + #[test] + fn test_encode_bool() { + let mut dest = vec![]; + let value = true; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 1); + assert_eq!(dest[0], 0x01); + assert_eq!(value.write_size(0),1); + } + + #[test] + fn test_encode_string_vectors() { + let mut dest = vec![]; + let value: Vec = vec![String::from("test")]; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 10); + assert_eq!(dest[3], 0x01); + assert_eq!(dest[9], 0x74); + assert_eq!(value.write_size(0),10); // vec len 4: string len: 2, string 4 + } + + #[test] + fn test_encode_u8_vectors() { + let mut dest = vec![]; + let value: Vec = vec![0x10, 0x11]; + let result = value.encode(&mut dest,0); + assert!(result.is_ok()); + assert_eq!(dest.len(), 6); + assert_eq!(dest[3], 0x02); + assert_eq!(dest[5], 0x11); + assert_eq!(value.write_size(0),6); + } + + #[test] + fn test_varint_encode_array_opton_vec8_none() { + let mut dest = vec![]; + let value: Option> = None; + let result = value.encode_varint(&mut dest); + assert!(result.is_ok()); + assert_eq!(dest.len(), 1); + assert_eq!(dest[0], 0x01); + } + + #[test] + fn test_varint_encode_array_opton_vec8_simple_array() { + let mut dest = vec![]; + let value: Option> = Some(vec![0x64, 0x6f, 0x67]); + let result = value.encode_varint(&mut dest); + assert!(result.is_ok()); + assert_eq!(dest.len(), 4); + } + + + + #[derive(Default)] + struct TestRecord { + value: i8, + value2: i8 + } + + impl Encoder for TestRecord { + + fn write_size(&self,version: Version) -> usize { + self.value.write_size(version) + + { + if version > 1 { + self.value2.write_size(version) + } else { + 0 + } + } + + } + + fn encode(&self, dest: &mut T,version: Version) -> Result<(), IoError> + where T: BufMut + { + self.value.encode(dest,version)?; + if version > 1 { + self.value2.encode(dest,version)?; + } + Ok(()) + } + } + + #[test] + fn test_encoding_struct() { + + // v1 + let mut dest = vec![]; + let mut record = TestRecord::default(); + record.value = 20; + record.value2 = 10; + record.encode(&mut dest,0).expect("encode"); + assert_eq!(dest.len(),1); + assert_eq!(dest[0],20); + assert_eq!(record.write_size(0),1); + + let mut dest2 = vec![]; + record.encode(&mut dest2,2).expect("encodv2 encodee"); + assert_eq!(dest2.len(),2); + assert_eq!(dest2[1],10); + + + // v2 + /* + let data2 = [0x06,0x09]; + let record2 = TestRecord::decode_from(&mut Cursor::new(&data2),2).expect("decode"); + assert_eq!(record2.value, 6); + assert_eq!(record2.value2, 9); + */ + + } + + +} diff --git a/kf-protocol/kf-protocol-core/src/lib.rs b/kf-protocol/kf-protocol-core/src/lib.rs new file mode 100644 index 0000000000..8a5421ff0e --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/lib.rs @@ -0,0 +1,20 @@ +#![feature(specialization)] + +mod decoder; +mod encoder; +mod varint; +mod zerocopy; +// mod buffer; + +pub use self::decoder::Decoder; +pub use self::decoder::DecoderVarInt; +pub use self::encoder::Encoder; +pub use self::encoder::EncoderVarInt; + + +pub mod bytes { + pub use bytes::Buf; + pub use bytes::BufMut; +} + +pub type Version = i16; diff --git a/kf-protocol/kf-protocol-core/src/varint.rs b/kf-protocol/kf-protocol-core/src/varint.rs new file mode 100644 index 0000000000..4afbe10a85 --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/varint.rs @@ -0,0 +1,150 @@ +// varint decoder +// Google Protocol Buffers. +// also kafka: ByteUtils.java + +use std::io::Error; +use std::io::ErrorKind; + +use log::trace; +use bytes::Buf; +use bytes::BufMut; + + + +// zigzag decoding +pub fn varint_decode(buf: &mut T) -> Result<(i64,usize),Error> where T:Buf { + + let mut num: i64 = 0; + let mut shift: usize = 0; + + loop { + if buf.remaining() == 0 { + return Err(Error::new(ErrorKind::UnexpectedEof,"no more bytes left")); + } + + let b = buf.get_u8(); + trace!("var byte: {:#X}",b); + + num |= ((b & 0x7f) as i64) << shift; + shift += 7; + + if b & 0x80 == 0 { + break; + } + + } + + Ok(( (num >> 1) ^ - (num & 1),shift/7)) +} + +// store varaint +pub fn variant_encode(buf: &mut T,num: i64) -> Result<(),Error> where T:BufMut { + + let mut v = (num << 1) ^ (num >> 31); + + while (v & 0xffffff80) != 0 { + let b: u8 = (( v & 0x7f) | 0x80) as u8; + if buf.remaining_mut() == 0 { + return Err(Error::new(ErrorKind::UnexpectedEof,"no more bytes left")); + } + buf.put_u8(b); + v >>= 7; + } + if buf.remaining_mut() == 0 { + return Err(Error::new(ErrorKind::UnexpectedEof,"no more bytes left")); + } + buf.put(v as u8); + Ok(()) +} + +pub fn variant_size(num: i64) -> usize { + + let mut v = (num << 1) ^ (num >> 31); + let mut bytes = 1; + + while (v & 0xffffff80) != 0 { + bytes += 1; + v >>= 7; + } + + bytes +} + + + + + +#[cfg(test)] +mod test { + + use std::io::Cursor; + use bytes::{BytesMut, BufMut}; + use super::varint_decode; + use super::variant_encode; + use super::variant_size; + + + #[test] + fn test_varint_decode_with_test_set() { + let test_set = vec![ + (0, vec![0x00]), + (-1, vec![0x1]), + (1, vec![0x2]), + (63, vec![0x7e]), + (7, vec![14]), + (10, vec![0x14]), + (4, vec![08]), + (8191,vec![0xfe,0x7f]), + (-134217729, vec![0x81, 0x80, 0x80, 0x80, 0x01]) + ]; + + for (expected,input) in test_set { + let mut buf = BytesMut::with_capacity(1024); + buf.put_slice(&input); + + let mut src = Cursor::new(&buf); + let result = varint_decode(&mut src); + assert!(result.is_ok()); + let (value,shift) = result.unwrap(); + assert_eq!(value,expected); + assert_eq!(shift,input.len()); + } + + } + + + #[test] + fn test_varint_encode_with_test_set() { + let test_set = vec![ + (0, vec![0x00]), + (-1, vec![0x1]), + (1, vec![0x2]), + (63, vec![0x7e]), + (7, vec![14]), + (10, vec![0x14]), + (4, vec![08]), + (8191,vec![0xfe,0x7f]), + (-134217729, vec![0x81, 0x80, 0x80, 0x80, 0x01]) + ]; + + for (input,output) in test_set { + let mut src = vec![]; + let result = variant_encode(&mut src,input); + assert!(result.is_ok()); + assert_eq!(src.len(),output.len()); + assert_eq!(variant_size(input),output.len()); + for i in 0..src.len() { + assert_eq!(src[i],output[i]); + } + } + + } + + + + + + + + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-core/src/zerocopy.rs b/kf-protocol/kf-protocol-core/src/zerocopy.rs new file mode 100644 index 0000000000..b8ea1b412e --- /dev/null +++ b/kf-protocol/kf-protocol-core/src/zerocopy.rs @@ -0,0 +1,39 @@ +#[cfg(test)] +mod test { + + use std::slice::from_raw_parts; + use std::mem::size_of; + + //use super::BigU32; + + #[repr(C)] + struct Buffer { + i1: u16, + i2: u16 + } + + // doing basic test of big integration conversion and so forth + #[test] + fn test_zero_copy() { + + let b = Buffer { + i1: (10 as u16).to_be(), + i2: 11 + }; + + let p: *const Buffer = &b; + let p: *const u8 = p as *const u8; + let bytes = unsafe { from_raw_parts(p,size_of::())}; + + println!("{:X}{:X}{:X}{:X}",bytes[0],bytes[1],bytes[2],bytes[3]); + + // should print out as 0AB0 + + let z: *const Buffer = p as *const Buffer; + let k: &Buffer = unsafe { &*z}; + assert_eq!(k.i2,11); + assert_eq!(k.i1.to_be(),10); + } + + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/Cargo.toml b/kf-protocol/kf-protocol-derive/Cargo.toml new file mode 100644 index 0000000000..1d98a974be --- /dev/null +++ b/kf-protocol/kf-protocol-derive/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "kf-protocol-derive" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + +[lib] +proc-macro = true +doctest = false + +[dependencies] +proc-macro2 = "0.4.30" +quote = "0.6.12" +syn = { version = "0.15.34", features = ["extra-traits"] } +log = "0.4.6" diff --git a/kf-protocol/kf-protocol-derive/README.md b/kf-protocol/kf-protocol-derive/README.md new file mode 100644 index 0000000000..57c2f1d319 --- /dev/null +++ b/kf-protocol/kf-protocol-derive/README.md @@ -0,0 +1,11 @@ +# procedure macro library + +## Debugging + +Install 'cargo expand' from +https://github.com/dtolnay/cargo-expand + +To debug macro: +```cargo expand --test varint``` + +This will generate expansion of macro \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/rust-toolchain b/kf-protocol/kf-protocol-derive/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/kf-protocol/kf-protocol-derive/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/src/api.rs b/kf-protocol/kf-protocol-derive/src/api.rs new file mode 100644 index 0000000000..8872148334 --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/api.rs @@ -0,0 +1,185 @@ +use quote::quote; +use proc_macro2::TokenStream; +use syn::Attribute; +use syn::Data; +use syn::DataStruct; +use syn::DeriveInput; +use syn::Fields; +use syn::spanned::Spanned; +use syn::Ident; +use proc_macro2::Span; + +use super::util::find_attr; +use super::util::find_int_name_value; +use super::util::find_string_name_value; + + +pub(crate) fn generate_request_traits(input: &DeriveInput) -> TokenStream { + + let name = &input.ident; + + let api_trait = generate_request_trait_impl(name,&input.attrs); + + quote! { + #api_trait + } +} + + + +/// generate implementation for decoding kf protocol +pub(crate) fn parse_and_generate_api(input: &DeriveInput) -> TokenStream { + + let name = &input.ident; + + match input.data { + Data::Struct(ref data) => generate_api(data,name), + _ => unimplemented!() + } +} + +fn generate_api(data: &DataStruct,name: &Ident) -> TokenStream { + + let encoder = generate_encoder(data,name); + + + quote! { + #encoder + } +} + +fn generate_encoder(data: &DataStruct,name: &Ident) -> TokenStream { + + + match data.fields { + Fields::Named(ref fields) => { + + let fields_code = fields.named.iter().map(|f| { + + quote! { + #f, + } + + }); + + let definition = quote! { + + #[derive(Encode,Decode,RequestApi,Debug)] + #[fluvio_kf(default)] + pub struct #name { + #(#fields_code)* + } + + }; + + + let methods = fields.named.iter().map(|f| { + + let fname = &f.ident.as_ref().unwrap(); + let ty = &f.ty; + + let new_name = format!("set_{}",fname); + let setter_name = Ident::new(&new_name, Span::call_site()); + + quote! { + + #[allow(dead_code)] + #[inline] + pub fn #fname(&self) -> &#ty { + &self.#fname + } + + #[allow(dead_code)] + #[inline] + pub fn #setter_name(&mut self, val: #ty) { + self.#fname = val; + } + } + + }); + + let accessor = quote! { + + impl #name { + + #(#methods)* + + } + }; + + quote! { + #definition + + #accessor + } + }, + _ => unimplemented!() + } + +} + + + +fn generate_request_trait_impl(name: &Ident,attrs: &Vec) -> TokenStream { + + // check if we have api version + let version_meta = if let Some(version) = find_attr(attrs, "fluvio_kf") { + version + } else { + return quote!{} + }; + + let api_key = if let Some(version) = find_int_name_value(&version_meta,"api_key") { version } + else { + return quote!{} + }; + + + let min_version = if let Some(version) = find_int_name_value(&version_meta,"api_min_version") { version } + else { + return syn::Error::new(version_meta.span(),"no min version found").to_compile_error() + }; + + + + let response = if let Some(version) = find_string_name_value(&version_meta,"response") { version } + else { + return syn::Error::new(version_meta.span(),"no response found").to_compile_error() + }; + + let response_type = Ident::new(&response.value(), Span::call_site()); + + let max_version = if let Some(max_version) = find_int_name_value(&version_meta,"api_max_version") { + + if max_version < min_version { + syn::Error::new(version_meta.span(),"max version must be greater than or equal to min version").to_compile_error() + } else { + quote! { + const MAX_API_VERSION: i16 = #max_version as i16; + } + + } + + } else { + quote!{} + }; + + + quote! { + + impl Request for #name { + + const API_KEY: u16 = #api_key as u16; + + const MIN_API_VERSION: i16 = #min_version as i16; + + #max_version + + type Response = #response_type; + + } + + } + + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/src/de.rs b/kf-protocol/kf-protocol-derive/src/de.rs new file mode 100644 index 0000000000..757f8146f4 --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/de.rs @@ -0,0 +1,355 @@ +use quote::quote; +use proc_macro2::TokenStream; +use proc_macro2::Span; +use syn::Attribute; +use syn::Data; +use syn::DataStruct; +use syn::DataEnum; +use syn::DeriveInput; +use syn::Fields; +use syn::Ident; +use syn::LitInt; +use syn::IntSuffix; +use syn::Expr; +use syn::Lit; +use syn::UnOp; + + +use crate::default_int_type; +use super::version::Version; +use super::util::find_attr; +use super::util::find_string_name_value; + +/// generate implementation for decoding kf protocol +pub fn generate_decode_traits(input: &DeriveInput) -> TokenStream { + + let name = &input.ident; + + let int_type = default_int_type(&input.attrs); + + let decoded_field_tokens = decode_fields(&input.data,&int_type,name); + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + let try_enum = generate_try_enum_if(&input.data,&int_type,name); + + quote! { + + impl #impl_generics kf_protocol::Decoder for #name #ty_generics #where_clause { + fn decode(&mut self, src: &mut T,version: kf_protocol::Version) -> Result<(),std::io::Error> where T: kf_protocol::bytes::Buf { + log::trace!("decoding struct: {}",stringify!(#name)); + #decoded_field_tokens + Ok(()) + } + + } + + #try_enum + + } +} + +/// generate syntax for decoding +fn decode_fields(data: &Data,int_type: &Ident,name: &Ident) -> TokenStream { + match *data { + Data::Struct(ref data) => parse_struct(name,data), + Data::Enum(ref enum_data) => parse_enum(enum_data,int_type,name), + _ => unimplemented!() + } +} + + +fn parse_struct(struct_name: &Ident,data: &DataStruct) -> TokenStream { + + match data.fields { + Fields::Named(ref fields) => { + let recurse = fields.named.iter().map(|f| { + let fname = &f.ident; + if f.attrs + .iter() + .flat_map(Attribute::interpret_meta) + .find(|meta| meta.name() == "varint") + .is_some() + { + quote! { + + log::trace!("start decoding varint field <{}>",stringify!(#fname)); + let result = self.#fname.decode_varint(src); + if result.is_ok() { + log::trace!("decoding ok varint <{}> => {:?}",stringify!(#fname),&self.#fname); + } else { + log::trace!("decoding varint error <{}> ==> {}",stringify!(#fname),result.as_ref().unwrap_err()); + return result; + } + + } + } else { + + let base = quote! { + + log::trace!("start decoding struct: <{}> field: <{}>",stringify!(#struct_name),stringify!(#fname)); + let result = self.#fname.decode(src,version); + if result.is_ok() { + log::trace!("decoding struct: <{}> field: <{}> => {:#?}",stringify!(#struct_name),stringify!(#fname),&self.#fname); + } else { + log::trace!("error decoding <{}> ==> {}",stringify!(#fname),result.as_ref().unwrap_err()); + return result; + } + }; + + if let Some(version) = Version::find_version(&f.attrs) { + match fname { + Some(field_name) => version.expr(base,field_name), + None => base + } + } else { + base + } + + } + }); + + quote! { + #(#recurse)* + } + }, + _ => unimplemented!() + } + +} + + +fn parse_enum(_data: &DataEnum,int_type: &Ident,_name: &Ident) -> TokenStream { + + quote! { + use std::convert::TryInto; + + let mut value: #int_type = 0; + value.decode(src,version)?; + + let convert: Self = value.try_into()?; + *self = convert; + } +} + + +/// generate syntax for decoding +fn generate_try_enum_if(data: &Data,int_type: &Ident,name: &Ident) -> TokenStream { + match *data { + Data::Struct(ref _data) => quote! {}, + Data::Enum(ref enum_data) => generate_try_enum(enum_data,int_type,name), + _ => unimplemented!() + } +} + +fn generate_try_enum(data: &DataEnum,int_type: &Ident,name: &Ident) -> TokenStream { + + + let mut variant_expr = vec![]; + + for (idx, variant) in data.variants.iter().enumerate() { + let id = &variant.ident; + + match &variant.fields { + Fields::Unit => { + + if let Some(expr) = &variant.discriminant { + + let int_expr_result = match &expr.1 { + + Expr::Lit(lit) => { + + + match &lit.lit { + Lit::Int(int_lit) => quote! { + #int_lit => Ok(#name::#id), + }, + _ => quote! { + compile_error!("unsupported") + } + } + + }, + Expr::Unary(t) => { + match t.op { + UnOp::Neg(_) => { + + quote! { + #t => Ok(#name::#id), + } + }, + _ => quote! { + compile_error!("unsupported") + } + } + + }, + + _ => { + quote! { + compile_error!("unsupported") + } + } + }; + + + variant_expr.push(int_expr_result); + + + } else { + + + + let idx_val = LitInt::new(idx as u64, IntSuffix::None,Span::call_site()); + + variant_expr.push(quote! { + #idx_val => Ok(#name::#id), + }); + } + // + + }, + Fields::Named(_named_fields) => { + variant_expr.push(quote! { + compiler_error!("name fields are not supported"); + }); + }, + Fields::Unnamed(_unamed) => { + variant_expr.push(quote! { + compiler_error!("unnamed fields are not supported"); + }); + } + } + } + + + variant_expr.push(quote! { + _ => return Err(std::io::Error::new( + std::io::ErrorKind::UnexpectedEof, + format!("invalid value: {}",value) + )) + }); + + quote! { + impl std::convert::TryFrom<#int_type> for #name { + type Error = std::io::Error; + + fn try_from(value: #int_type) -> Result { + + match value { + + #(#variant_expr)* + } + + } + } + } + + +} + + + + +/// generate implementation for decoding kf protocol +pub fn generate_default_traits(input: &DeriveInput) -> TokenStream { + + let name = &input.ident; + + let default_impl = generate_default_impl(input,name); + + + quote! { + + #default_impl + + } +} + + +/// generate syntax for decoding +fn generate_default_impl(input: &DeriveInput,name: &Ident) -> TokenStream { + + let data = &input.data; + match *data { + Data::Struct(ref data) => impl_default_impl(input,data,name), + Data::Enum( _) => quote! {}, + _ => unimplemented!() + } +} + + +// generates parts of the impl +// +// impl Default for TestRequest { +// fn default() -> Self { +// +// Self { +// field: 10, +// field2: 20, +// +// +// } +// +fn impl_default_impl(input: &DeriveInput, data: &DataStruct,name: &Ident) -> TokenStream { + + + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + match data.fields { + Fields::Named(ref fields) => { + let recurse = fields.named.iter().map(|f| { + let fname = &f.ident; + + + if let Some(default_attr) = find_attr(&f.attrs,"fluvio_kf") { + + if let Some(expr_str) = find_string_name_value(&default_attr, "default") { + + + + use std::str::FromStr; + use syn::spanned::Spanned; + + + match TokenStream::from_str(&expr_str.value()) { + Err(_err) => syn::Error::new(f.span(),"can't parse default value").to_compile_error(), + Ok(liter) => { + quote! { + #fname: #liter, + } + } + } + + + } else { + quote!{ + #fname: std::default::Default::default(), + } + } + } else { + quote!{ + #fname: std::default::Default::default(), + } + } + + }); + + quote! { + + impl #impl_generics Default for #name #ty_generics #where_clause { + + fn default() -> Self { + Self { + #(#recurse)* + } + } + } + + } + }, + _ => unimplemented!() + } + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/src/lib.rs b/kf-protocol/kf-protocol-derive/src/lib.rs new file mode 100644 index 0000000000..09c5075cdf --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/lib.rs @@ -0,0 +1,214 @@ +#![recursion_limit = "128"] + +extern crate proc_macro; + +mod de; +mod ser; +mod util; +mod api; +mod version; + +pub(crate) use self::util::default_int_type; +use self::de::generate_decode_traits; +use self::ser::generate_encode_traits; +use self::api::generate_request_traits; +use self::api::parse_and_generate_api; +use self::de::generate_default_traits; + +use proc_macro::TokenStream; +use syn::DeriveInput; + +/// Custom derive for decoding structure or enum from bytes using Kafka protocol format. +/// This assumes all fields implement kafka decode traits. +/// +/// # Examples +/// +/// ``` +/// use kf_protocol::Decoder; +/// use kf_protocol::derive::Decode; +/// +/// #[derive(Decode)] +/// pub struct SimpleRecord { +/// val: u8 +/// } +/// +/// fn main() { +/// +/// let data = [ +/// 0x04 +/// ]; +/// +/// let record = SimpleRecord::decode_from(&mut Cursor::new(&data),0).expect("decode"); +/// assert_eq!(record.val,4); +/// } +/// +/// ``` +/// +/// +/// Decode applys to either Struct of Enum. For enum, it implements `TryFrom` trait. +/// Currenly it only supports integer variants. +/// +/// So this works +/// +/// ``` +/// #[derive(Decode)] +/// pub enum ThreeChoice { +/// First = 1, +/// Second = 2, +/// Third = 3 +/// } +/// ``` +/// +/// Also, enum without integer literal works as well +/// ``` +/// #[derive(Decode)] +/// pub enum ThreeChoice { +/// First, +/// Second, +/// Third +/// } +/// ``` +/// +/// In this case, 1 is decoded as First, 2 as Second, 3 as Third. +/// +/// Currently, mixing enum variants are not supported. +/// +/// +/// Decode support container and field level attributes. +/// Container level applys to struct. +/// For field attributes +/// * `#[varint]` force decode using varint format. +/// * `#fluvio_kf(min_version = )]` decodes only if version is equal or greater than min_version +/// * `#fluvio_kf(max_version = )]`decodes only if version is less or greater than max_version +/// +/// +#[proc_macro_derive(Decode, attributes(varint, fluvio_kf))] +pub fn kf_decode(input: TokenStream) -> TokenStream { + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = generate_decode_traits(&ast); + expanded.into() +} + + +/// Custom derive for encoding structure or enum to bytes using Kafka protocol format. +/// This assumes all fields(or enum variants) implement kafka encode traits. +/// +/// # Examples +/// +/// ``` +/// use kf_protocol::Encoder; +/// use kf_protocol::derive::Encode; +/// +/// #[derive(Encode)] +/// pub struct SimpleRecord { +/// val: u8 +/// } +/// +/// fn main() { +/// +/// let data = vec![]; +/// +/// let record = SimpleRecord { val: 4}; +/// recprd.encode(&mut data,0); +/// +/// assert_eq!(data[0],4); +/// } +/// +/// ``` +/// +/// +/// Encode applys to either Struct of Enum. +/// +/// +/// Encode respects version attributes. See Decode derive. +/// +/// +/// +#[proc_macro_derive(Encode, attributes(varint, fluvio_kf))] +pub fn kf_encode(input: TokenStream) -> TokenStream { + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = generate_encode_traits(&ast); + expanded.into() +} + +#[proc_macro] +pub fn kf_api(input: TokenStream) -> TokenStream { + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = parse_and_generate_api(&ast); + expanded.into() +} + +/// Custom derive for implementating Request trait. +/// This derives requires `fluvio_kf` +/// +/// # Examples +/// +/// ``` +/// use kf_protocol::derive::Decode; +/// use kf_protocol::derive::Encode; +/// use kf_protocol::api::Request; +/// use kf_protocol::derive::RequestApi; +/// +/// #[fluvio_kf(default,api_min_version = 5, api_max_version = 6, api_key = 10, response = "SimpleResponse")] +/// #[derive(Request,Encode,Decode,Default)] +/// pub struct SimpleRequest { +/// val: u8 +/// } +/// +/// +/// #[derive(Encode,Decode,Default)] +/// #[fluvio_kf(default)] +/// pub struct TestResponse { +/// pub value: i8, +/// } +/// +/// ``` +/// +/// RequestApi derives respects following attributes in `fluvio_kf` +/// +/// * `api_min_version`: min version that API supports. This is required +/// * `api_max_version`: max version that API supports. This is optional. +/// * `api_key`: API number. This is required +/// * `response`: Response struct. This is required +/// +#[proc_macro_derive(RequestApi, attributes(varint, fluvio_kf))] +pub fn kf_request(input: TokenStream) -> TokenStream { + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = generate_request_traits(&ast); + expanded.into() +} + + + +/// Custom derive for generating default structure +/// +/// +/// Example: +/// +/// ``` +/// #[derive(KfDefault)] +/// #[fluvio_kf(default)] +/// pub struct SimpleRecord { +/// #[fluvio_kf(default = "-1" )] +/// val: u8 +/// } +/// +/// fn main() { +/// +/// let record = SimpleRecord::default; +/// assert_eq!(record.val,-1); +/// } +/// ``` +/// +/// `default` assignment can be any Rust expression. +#[proc_macro_derive(KfDefault, attributes(fluvio_kf))] +pub fn kf_default(input: TokenStream) -> TokenStream { + let ast: DeriveInput = syn::parse(input).unwrap(); + + let expanded = generate_default_traits(&ast); + expanded.into() +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/src/ser.rs b/kf-protocol/kf-protocol-derive/src/ser.rs new file mode 100644 index 0000000000..9da185328c --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/ser.rs @@ -0,0 +1,303 @@ +use quote::quote; +use syn::DeriveInput; +use syn::Ident; +use syn::Fields; +use syn::Attribute; +use syn::Data; +use syn::DataStruct; +use syn::DataEnum; +use syn::Expr; +use syn::LitInt; +use syn::Lit; +use syn::IntSuffix; +use proc_macro2::Span; +use proc_macro2::TokenStream; +use syn::spanned::Spanned; +use syn::UnOp; + +use crate::default_int_type; +use super::version::Version; + +/// generate implementation for encoding kf protocol +pub fn generate_encode_traits(input: &DeriveInput) -> TokenStream { + + let name = &input.ident; + + let encoded_field_tokens = encode_fields_for_writing(&input.data,&input.attrs,name); + let size_field_tokens = encode_field_sizes(&input.data,&input.attrs,name); + let generics = &input.generics; + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + quote! { + + impl #impl_generics kf_protocol::Encoder for #name #ty_generics #where_clause { + + fn encode(&self, src: &mut T, version: kf_protocol::Version) -> Result<(),std::io::Error> where T: kf_protocol::bytes::BufMut { + log::trace!("encoding struct: {} version: {}",stringify!(#name),version); + #encoded_field_tokens + Ok(()) + } + + fn write_size(&self, version: kf_protocol::Version) -> usize { + + log::trace!("write size for struct: {} version {}",stringify!(#name),version); + let mut len: usize = 0; + #size_field_tokens + len + } + + + } + } +} + +/// generate syntax for encoding +fn encode_fields_for_writing(data: &Data,attrs: &Vec,name: &Ident) -> TokenStream { + + match *data { + Data::Struct(ref struct_data) => parse_structf_encoding(name,struct_data), + Data::Enum(ref enum_data) => parse_enum_for_writing(enum_data,attrs,name), + _ => unimplemented!() + } + +} + + +fn parse_structf_encoding(struct_name: &Ident,data: &DataStruct) -> TokenStream { + + match data.fields { + Fields::Named(ref fields) => { + + let recurse = fields.named.iter().map(|f| { + let fname = &f.ident; + + if f.attrs.iter().flat_map(Attribute::interpret_meta).find( |meta| meta.name() == "varint").is_some() { + quote! { + log::trace!("encoding varint struct: <{}> field <{}> => {:?}",stringify!(#struct_name),stringify!(#fname),&self.#fname); + let result = self.#fname.encode_varint(src); + if result.is_err() { + log::error!("error varint encoding <{}> ==> {}",stringify!(#fname),result.as_ref().unwrap_err()); + return result; + } + + } + } else { + + let base = quote! { + log::trace!("encoding struct: <{}>, field <{}> => {:?}",stringify!(#struct_name),stringify!(#fname),&self.#fname); + let result = self.#fname.encode(src,version); + if result.is_err() { + log::error!("Error Encoding <{}> ==> {}",stringify!(#fname),result.as_ref().unwrap_err()); + return result; + } + + }; + + if let Some(version) = Version::find_version(&f.attrs) { + if let Some(msg) = version.validation_msg() { + syn::Error::new(f.span(),msg).to_compile_error() + + } else { + match fname { + Some(field_name) => version.expr(base,field_name), + None => base + } + + } + + } else { + base + } + } + + }); + + quote! { + #(#recurse)* + } + }, + _ => unimplemented!() + } +} + +fn parse_enum_for_writing(data: &DataEnum, attrs: &Vec,name: &Ident) -> TokenStream { + + // find repr sentation + let int_type = default_int_type(attrs); + let mut variant_expr = vec![]; + for (idx, variant) in data.variants.iter().enumerate() { + let id = &variant.ident; + //print!("id: {} => ",id); + + match &variant.fields { + Fields::Unit => { + // print!("unit = "); + if let Some(expr) = &variant.discriminant { + + + let expr = match &expr.1 { + Expr::Lit(lit) => { + match &lit.lit { + Lit::Int(int_lit) => quote! { + #name::#id => { + let val = #int_lit as #int_type; + val.encode(src,version)?; + } + + }, + _ => quote! { + compile_error!("unsupported") + } + } + }, + Expr::Unary(t) => { + match t.op { + UnOp::Neg(_) => quote!{ + #name::#id => { + let val = #t as #int_type; + val.encode(src,version)?; + } + }, + _ => quote! { + compile_error!("unsupported") + } + } + + }, + _ => quote! { + compile_error!("unsupported") + } + }; + variant_expr.push(expr); + + } else { + let idx_val = LitInt::new(idx as u64, IntSuffix::None,Span::call_site()); + variant_expr.push(quote! { + #name::#id => { + let val = #idx_val as #int_type; + val.encode(src,version)?; + }, + }); + } + // + + }, + Fields::Named(_named_fields) => { + variant_expr.push(quote! { + compiler_error!("named fields are not supported"); + }); + }, + Fields::Unnamed(_) => { + // println!("unamed"); + + variant_expr.push(quote! { + #name::#id(val) => val.encode(src,version)?, + }); + + } + } + } + + quote! { + match self { + #(#variant_expr)* + } + } +} + + +/// generate syntax for encoding +fn encode_field_sizes(data: &Data,attrs: &Vec,name: &Ident) -> TokenStream { + + match *data { + Data::Struct(ref struct_data) => parse_structf_size(name,struct_data), + Data::Enum(ref enum_data) => parse_enum_for_size(enum_data,attrs,name), + _ => unimplemented!() + } + +} + + +fn parse_structf_size(struct_name: &Ident,data: &DataStruct) -> TokenStream { + + match data.fields { + Fields::Named(ref fields) => { + + let recurse = fields.named.iter().map(|f| { + let fname = &f.ident; + if f.attrs.iter().flat_map(Attribute::interpret_meta).find( |meta| meta.name() == "varint").is_some() { + quote! { + let write_size = self.#fname.var_write_size(); + log::trace!("varint write size: <{}>, field: <{}> is: {}",stringify!(#struct_name),stringify!(#fname),write_size); + len = len + write_size; + } + } else { + let base = quote! { + let write_size = self.#fname.write_size(version); + log::trace!("write size: <{}> field: <{}> => {}",stringify!(#struct_name),stringify!(#fname),write_size); + len = len + write_size; + }; + + if let Some(version) = Version::find_version(&f.attrs) { + if let Some(msg) = version.validation_msg() { + syn::Error::new(f.span(),msg).to_compile_error() + + } else { + match fname { + Some(field_name) => version.expr(base,field_name), + None => base + } + } + + } else { + base + } + } + + }); + + quote! { + #(#recurse)* + } + }, + _ => unimplemented!() + } +} + + + +fn parse_enum_for_size(data: &DataEnum,attrs: &Vec,name: &Ident) -> TokenStream { + + let int_type = default_int_type(attrs); + + let mut variant_expr = vec![]; + + for (_, variant) in data.variants.iter().enumerate() { + let id = &variant.ident; + // print!("id: {} => ",id); + + match &variant.fields { + Fields::Unnamed(_) => { + // println!("unamed"); + + variant_expr.push(quote! { + #name::#id(val) => val.write_size(version), + }); + }, + _ => {}, + } + } + + if variant_expr.len() > 0 { + quote! { + len = match self { + #(#variant_expr)* + }; + } + } else { + quote! { + len = std::mem::size_of::<#int_type>(); + } + } + +} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-derive/src/util.rs b/kf-protocol/kf-protocol-derive/src/util.rs new file mode 100644 index 0000000000..bae23d8887 --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/util.rs @@ -0,0 +1,145 @@ + +use syn::Ident; +use syn::Attribute; +use syn::Meta; +use syn::NestedMeta; +use syn::MetaNameValue; +use syn::Lit; +use syn::LitStr; +use proc_macro2::Span; + + +/// find type using rep, if not found return u8 +pub(crate) fn default_int_type(attrs: &Vec) -> Ident { + let mut rep_list = vec![]; + for attr in attrs { + let meta = attr.parse_meta().expect("meta"); + match meta { + Meta::List(meta_list) => { + for attr_meta in meta_list.nested.iter() { + match attr_meta { + NestedMeta::Meta(inner_meta) => { + match inner_meta { + Meta::Word(ident) => rep_list.push(ident.clone()), + _ => {} + } + } + _ => {} + } + + } + } + _ => {} + } + + } + + if rep_list.len() == 0 { Ident::new("u8",Span::call_site()) } else { rep_list.remove(0) } + +} + + + +pub(crate) fn find_attr(attrs: &Vec,name: &str) -> Option { + attrs.iter() + .find_map(|a| { + if let Ok(meta) = a.parse_meta() { + if meta.name() == name { + Some(meta) + } else { + //println!("attr name: {}",meta.name()); + None + } + } else { + //println!("unrecog attribute"); + None + } + }) +} + +pub(crate) fn find_name_attribute<'a>(meta: &'a Meta,name: &str) -> Option<&'a MetaNameValue> { + find_meta(meta,name).map(|meta| match meta { + Meta::NameValue(name_value) => name_value, + _ => panic!("should not happen") + }) +} + + +pub(crate) fn find_meta<'a>(meta: &'a Meta,name: &str) -> Option<&'a Meta> { + + match meta { + Meta::List(list) => { + for attr in list.nested.iter() { + match attr { + NestedMeta::Meta(named_meta) => { + + match named_meta { + Meta::NameValue(name_value) => { + if name_value.ident == name { + return Some(named_meta) + } + + } + Meta::Word(word_value) => { + if word_value == name { + return Some(named_meta) + } + {} + }, + Meta::List(_) => { + {} + } + } + + }, + _ => {} + } + + } + + {} + }, + _ => {} + } + + None + +} + + +/// find name value with integer value +pub(crate) fn find_int_name_value<'a>(version_meta: &'a Meta,attr_name: &str) -> Option { + + if let Some(attr) = find_name_attribute(&version_meta,attr_name) { + + match &attr.lit { + Lit::Int(version_val) => { + // println!("version value: {}",version_val.value()); + Some(version_val.value()) + }, + _ => unimplemented!() + } + } else { + None + } + +} + + +/// find name value with str value +pub(crate) fn find_string_name_value<'a>(version_meta: &'a Meta,attr_name: &str) -> Option { + + if let Some(attr) = find_name_attribute(&version_meta,attr_name) { + + match &attr.lit { + Lit::Str(val) => { + Some(val.clone()) + }, + _ => unimplemented!() + } + } else { + None + } + +} + diff --git a/kf-protocol/kf-protocol-derive/src/version.rs b/kf-protocol/kf-protocol-derive/src/version.rs new file mode 100644 index 0000000000..0f058479ff --- /dev/null +++ b/kf-protocol/kf-protocol-derive/src/version.rs @@ -0,0 +1,82 @@ + + +use syn::Attribute; +use syn::Ident; +use quote::quote; +use proc_macro2::TokenStream; + +use super::util::find_attr; +use super::util::find_int_name_value; + + +pub(crate) struct Version { + min: i16, + max: Option +} + +impl Version { + + // find fluvio versions + pub(crate) fn find_version(attrs: &Vec) -> Option { + + if let Some(version) = find_attr(attrs,"fluvio_kf") { + + if let Some(min) = find_int_name_value(&version, "min_version") { + + let max = find_int_name_value(&version,"max_version"); + Some( + Self { + min: min as i16, + max: max.map(|v| v as i16) + } + ) + } else { + None + } + } else { + None + } + } + + pub(crate) fn validation_msg(&self) -> Option { + if let Some(max) = self.max { + if self.min > max { + Some("max version is less then min".to_owned()) + } else { + None + } + } else { + if self.min < 0 { + Some("min version must be positive".to_owned()) + } else { + None + } + } + } + + // generate expression + pub(crate) fn expr(&self,input: TokenStream,field_name: &Ident) -> TokenStream { + + let min = self.min; + + + if let Some(max) = self.max { + quote! { + if version >= #min && version <= #max { + #input + } else { + log::trace!("field: <{}> is skipped because version: {} is outside min: {}, max: {}",stringify!(#field_name),version,#min,#max); + } + } + } else { + quote! { + if version >= #min { + #input + } else { + log::trace!("field: <{}> is skipped because version: {} is less than min: {}",stringify!(#field_name),version,#min); + } + } + } + + } +} diff --git a/kf-protocol/kf-protocol-dump/Cargo.toml b/kf-protocol/kf-protocol-dump/Cargo.toml new file mode 100644 index 0000000000..8173e0dc72 --- /dev/null +++ b/kf-protocol/kf-protocol-dump/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "kf-protocol-dump" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + + +[dependencies] +log = "0.4.6" +structopt = { version = "0.2.14" } +kf-protocol = { path = "../kf-protocol-core", package = "kf-protocol-core" } +kf-protocol-api = { path = "../kf-protocol-api", package = "kf-protocol-api" } +kf-protocol-message = { path = "../kf-protocol-message", package = "kf-protocol-message" } + +utils = { path= "../../utils"} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-dump/src/main.rs b/kf-protocol/kf-protocol-dump/src/main.rs new file mode 100644 index 0000000000..1f264744e3 --- /dev/null +++ b/kf-protocol/kf-protocol-dump/src/main.rs @@ -0,0 +1,108 @@ + +mod public_api; + +use std::path::PathBuf; +use std::io::Error as IoError; + +use structopt::StructOpt; + +use kf_protocol_api::KfRequestMessage; + +pub(crate) use self::public_api::PublicRequest; + +#[derive(Debug, StructOpt)] +pub struct DumpOpt { + #[structopt(long = "resp", parse(from_os_str))] + resp_file: Option, + #[structopt(parse(from_os_str))] + file_name: PathBuf, +} + +macro_rules! decode { + ($req:expr,$file:expr) => { + println!( + "response {:#?}", + $req.decode_response_from_file($file, $req.header.api_version())? + ) + }; +} + +pub(crate) fn dump_file(opt: DumpOpt) -> Result<(), IoError> { + println!("opening file: {:#?}", opt.file_name); + let api_request = PublicRequest::decode_from_file(opt.file_name)?; + println!("request: {:#?}", api_request); + if let Some(file) = opt.resp_file { + match api_request { + PublicRequest::KfApiVersionsRequest(req) => decode!(req, file), + PublicRequest::KfProduceRequest(req) => decode!(req, file), + PublicRequest::KfFetchRequest(req) => decode!(req, file), + PublicRequest::KfJoinGroupRequest(req) => decode!(req, file), + PublicRequest::KfUpdateMetadataRequest(req) => decode!(req, file), + } + } + + Ok(()) +} + +fn main() -> Result<(), IoError> { + utils::init_logger(); + + let opt = DumpOpt::from_args(); + dump_file(opt) +} + +#[cfg(test)] +mod test { + + use log::debug; + use std::io; + use kf_protocol_api::AllKfApiKey; + use kf_protocol_api::ResponseMessage; + use kf_protocol_api::KfRequestMessage; + use kf_protocol_message::fetch::DefaultKfFetchResponse; + + use crate::PublicRequest; + + #[test] + fn test_fetch_request() -> Result<(), io::Error> { + let file = "test-data/fetch-request1.bin"; + let msg = PublicRequest::decode_from_file(file)?; + match msg { + PublicRequest::KfFetchRequest(fetch_msg) => { + assert_eq!(fetch_msg.header.api_key(), AllKfApiKey::Fetch as u16); + assert_eq!(fetch_msg.request.min_bytes, 1); + debug!("request: {:#?}", fetch_msg.request); + } + _ => assert!(false, "not fetch"), + } + Ok(()) + } + + #[test] + fn test_fetch_response() -> Result<(), io::Error> { + // [2018-12-20 21:47:16,551] lenght 13* 16 +1 -6 = + + let file = "test-data/fetch-response1.bin"; + let fetch_response = ResponseMessage::::decode_from_file(file, 7)?; + let response = fetch_response.response; + debug!("response: {:#?}", response); + let wrapper = &(response.topics[0].partitions[0]); + let batches = &(wrapper.records.batches); + assert_eq!(batches.len(), 2); + let values = batches[0].records[0] + .value + .inner_value_ref() + .as_ref() + .unwrap(); + let raw = String::from_utf8_lossy(values).to_string(); + assert_eq!(raw, "quick brown fox jump over the lazy dog"); + let values2 = batches[1].records[0] + .value + .inner_value_ref() + .as_ref() + .unwrap(); + let raw2 = String::from_utf8_lossy(values2).to_string(); + debug!("raw2: {}", raw2); + Ok(()) + } +} diff --git a/kf-protocol/kf-protocol-dump/src/public_api.rs b/kf-protocol/kf-protocol-dump/src/public_api.rs new file mode 100644 index 0000000000..697b621368 --- /dev/null +++ b/kf-protocol/kf-protocol-dump/src/public_api.rs @@ -0,0 +1,68 @@ +use std::io::ErrorKind; +use std::io::Error as IoError; +use std::convert::TryInto; + +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::Decoder; +use kf_protocol_api::KfRequestMessage; +use kf_protocol_api::RequestMessage; +use kf_protocol_api::RequestHeader; + +use kf_protocol_message::api_versions::KfApiVersionsRequest; +use kf_protocol_message::produce::DefaultKfProduceRequest; +use kf_protocol_message::fetch::KfFetchRequest; +use kf_protocol_message::fetch::DefaultKfFetchRequest; +use kf_protocol_message::group::KfJoinGroupRequest; +use kf_protocol_message::metadata::KfUpdateMetadataRequest; +use kf_protocol_api::api_decode; +use kf_protocol_api::AllKfApiKey; + +#[derive(Debug)] +pub enum PublicRequest { + KfApiVersionsRequest(RequestMessage), + KfProduceRequest(RequestMessage), + KfFetchRequest(RequestMessage), + KfJoinGroupRequest(RequestMessage), + KfUpdateMetadataRequest(RequestMessage), +} + +impl Default for PublicRequest { + fn default() -> PublicRequest { + PublicRequest::KfApiVersionsRequest(RequestMessage::::default()) + } +} + +impl KfRequestMessage for PublicRequest { + type ApiKey = AllKfApiKey; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf, + { + trace!("decoding with header: {:#?}", header); + match header.api_key().try_into()? { + AllKfApiKey::ApiVersion => { + api_decode!(PublicRequest, KfApiVersionsRequest, src, header) + } + AllKfApiKey::Produce => { + let request = DefaultKfProduceRequest::decode_from(src, header.api_version())?; + Ok(PublicRequest::KfProduceRequest(RequestMessage::new( + header, request, + ))) + } + AllKfApiKey::Fetch => api_decode!(PublicRequest, KfFetchRequest, src, header), + AllKfApiKey::JoinGroup => api_decode!(PublicRequest, KfJoinGroupRequest, src, header), + AllKfApiKey::UpdateMetadata => { + api_decode!(PublicRequest, KfUpdateMetadataRequest, src, header) + } + _ => Err(IoError::new( + ErrorKind::Other, + "trying to decoded unrecog api", + )), + } + } +} diff --git a/kf-protocol/kf-protocol-dump/test-data/api-request-v1.bin b/kf-protocol/kf-protocol-dump/test-data/api-request-v1.bin new file mode 100644 index 0000000000..56d1dc5ddf Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/api-request-v1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/api-response-v1.bin b/kf-protocol/kf-protocol-dump/test-data/api-response-v1.bin new file mode 100644 index 0000000000..8e94a0d2c3 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/api-response-v1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/apirequest.bin b/kf-protocol/kf-protocol-dump/test-data/apirequest.bin new file mode 100644 index 0000000000..dfe2683fba Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/apirequest.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/apirequest.txt b/kf-protocol/kf-protocol-dump/test-data/apirequest.txt new file mode 100644 index 0000000000..0914d7771d --- /dev/null +++ b/kf-protocol/kf-protocol-dump/test-data/apirequest.txt @@ -0,0 +1,6 @@ +00000014 +00120001 +00000001 +000a636f +63737565 +65657231 \ No newline at end of file diff --git a/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-1.log b/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-1.log new file mode 100644 index 0000000000..b0ab2b5c2a Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-1.log differ diff --git a/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-2.log b/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-2.log new file mode 100644 index 0000000000..ae60e38c2d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/commit-log-batch-2.log differ diff --git a/kf-protocol/kf-protocol-dump/test-data/commit-log-time-2.timeindex b/kf-protocol/kf-protocol-dump/test-data/commit-log-time-2.timeindex new file mode 100644 index 0000000000..66f900cc1d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/commit-log-time-2.timeindex differ diff --git a/kf-protocol/kf-protocol-dump/test-data/create-topic-1-update-metadata.kafka.bin b/kf-protocol/kf-protocol-dump/test-data/create-topic-1-update-metadata.kafka.bin new file mode 100644 index 0000000000..e86772079f Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/create-topic-1-update-metadata.kafka.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/create-topic-2-leader_isr.bin b/kf-protocol/kf-protocol-dump/test-data/create-topic-2-leader_isr.bin new file mode 100644 index 0000000000..2f36243614 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/create-topic-2-leader_isr.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/create-topic-3-update-metadata.bin b/kf-protocol/kf-protocol-dump/test-data/create-topic-3-update-metadata.bin new file mode 100644 index 0000000000..8ea592293c Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/create-topic-3-update-metadata.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-follower-empty-1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-follower-empty-1.bin new file mode 100644 index 0000000000..567c2627a8 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-follower-empty-1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-out.txt b/kf-protocol/kf-protocol-dump/test-data/fetch-out.txt new file mode 100644 index 0000000000..e15183205e --- /dev/null +++ b/kf-protocol/kf-protocol-dump/test-data/fetch-out.txt @@ -0,0 +1,161 @@ +request: FetchRequest( + RequestMessage { + header: RequestHeader { + api_key: Fetch, + api_version: 7, + correlation_id: 9, + client_id: "consumer-1" + }, + request: FetchRequest { + replica_id: -1, + max_wait_time: 500, + min_bytes: 1, + max_bytes: 52428800, + isolation_level: 0, + session_id: 0, + session_epoch: 0, + topics: [ + TopicRequest { + topic: "test", + partitions: [ + PartitionRequest { + partition: 0, + fetch_request: 0, + log_start_offset: -1, + partition_max_bytes: 4503599627370496 + } + ] + } + ], + forgotten_topics: [] + }, + data: PhantomData + } +) +response ResponseMessage { + correlation_id: 9, + response: FetchResponse { + throttle_time_ms: 0, + error_code: 0, + session_id: 190659252, + responses: [ + FetchTopicResponse { + topic: "test", + partition_responses: [ + PartitionResponse { + header: PartitionHeader { + partition: 0, + error_code: 0, + high_watermark: 7, + last_stable_offset: -1, + log_start_offset: 0, + aborted_transactions: [] + }, + record: RecordSet { + length: 457, + batches: [ + DefaultBatch { + base_offset: 0, + length: 101, + header: BatchHeader { + partition_leader_epoch: 0, + magic: 2, + crc: -401493314, + attributes: 0, + last_offset_delta: 1, + first_timestamp: 1552430401926, + max_time_stamp: 1552430402393, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1 + }, + records: [ + "quick brown fox jump over the lazy", + "dog" + ] + }, + DefaultBatch { + base_offset: 2, + length: 75, + header: BatchHeader { + partition_leader_epoch: 0, + magic: 2, + crc: -256726549, + attributes: 0, + last_offset_delta: 0, + first_timestamp: 1552430418228, + max_time_stamp: 1552430418228, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1 + }, + records: [ + "in addition to text" + ] + }, + DefaultBatch { + base_offset: 3, + length: 65, + header: BatchHeader { + partition_leader_epoch: 0, + magic: 2, + crc: -1540681629, + attributes: 0, + last_offset_delta: 0, + first_timestamp: 1552430427082, + max_time_stamp: 1552430427082, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1 + }, + records: [ + "lwkwwkeee" + ] + }, + DefaultBatch { + base_offset: 4, + length: 94, + header: BatchHeader { + partition_leader_epoch: 0, + magic: 2, + crc: -816240605, + attributes: 0, + last_offset_delta: 1, + first_timestamp: 1552430434148, + max_time_stamp: 1552430434945, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1 + }, + records: [ + "alwke fekejfek fjiefje", + "ekfjekfe" + ] + }, + DefaultBatch { + base_offset: 6, + length: 62, + header: BatchHeader { + partition_leader_epoch: 0, + magic: 2, + crc: 954585130, + attributes: 0, + last_offset_delta: 0, + first_timestamp: 1552430435666, + max_time_stamp: 1552430435666, + producer_id: -1, + producer_epoch: -1, + first_sequence: -1 + }, + records: [ + "jfeeie" + ] + } + ] + } + } + ] + } + ] + } +} diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-9093-rep4-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-9093-rep4-v7.bin new file mode 100644 index 0000000000..944c66ed37 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-9093-rep4-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-9094-rep4-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-9094-rep4-v7.bin new file mode 100644 index 0000000000..944c66ed37 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-9094-rep4-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-follow-empty-2.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-follow-empty-2.bin new file mode 100644 index 0000000000..0c11e1b48d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-follow-empty-2.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-follower-some1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-follower-some1.bin new file mode 100644 index 0000000000..dddd8b5f30 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-follower-some1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-part1-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-part1-v7.bin new file mode 100644 index 0000000000..9592cfb498 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-part1-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-part2-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-part2-v7.bin new file mode 100644 index 0000000000..d15568754b Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-part2-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset0.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset0.bin new file mode 100644 index 0000000000..903aba807d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset0.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset1.bin new file mode 100644 index 0000000000..964c0f74f0 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7-offset1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7.bin new file mode 100644 index 0000000000..903aba807d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-request1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-request1.bin new file mode 100644 index 0000000000..43cf8f0855 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-request1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-9093-rep4-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-9093-rep4-v7.bin new file mode 100644 index 0000000000..ea37657a35 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-9093-rep4-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-1.bin new file mode 100644 index 0000000000..8173a9065c Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-2.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-2.bin new file mode 100644 index 0000000000..a8a29f1564 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follow-empty-2.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-follower-some1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follower-some1.bin new file mode 100644 index 0000000000..d6f134249e Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-follower-some1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-part1-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-part1-v7.bin new file mode 100644 index 0000000000..d4d41a41e2 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-part1-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset0.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset0.bin new file mode 100644 index 0000000000..0acaf9a684 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset0.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset1.bin new file mode 100644 index 0000000000..5275c5178c Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7-offset1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7.bin new file mode 100644 index 0000000000..af3296de32 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response-v7.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/fetch-response1.bin b/kf-protocol/kf-protocol-dump/test-data/fetch-response1.bin new file mode 100644 index 0000000000..de966affc1 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/fetch-response1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/joingroup-request-v2.bin b/kf-protocol/kf-protocol-dump/test-data/joingroup-request-v2.bin new file mode 100644 index 0000000000..415bb2d794 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/joingroup-request-v2.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/joingroup-response-v2.bin b/kf-protocol/kf-protocol-dump/test-data/joingroup-response-v2.bin new file mode 100644 index 0000000000..941884b51f Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/joingroup-response-v2.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/listoffset-request-9094-rep4-v5.bin b/kf-protocol/kf-protocol-dump/test-data/listoffset-request-9094-rep4-v5.bin new file mode 100644 index 0000000000..6a67b174a5 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/listoffset-request-9094-rep4-v5.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/listoffset-request.bin b/kf-protocol/kf-protocol-dump/test-data/listoffset-request.bin new file mode 100644 index 0000000000..d0a7d7b372 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/listoffset-request.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/listoffset-response-9094-rep4-v5.bin b/kf-protocol/kf-protocol-dump/test-data/listoffset-response-9094-rep4-v5.bin new file mode 100644 index 0000000000..6ce2d386e2 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/listoffset-response-9094-rep4-v5.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/listoffset-response-v2.bin b/kf-protocol/kf-protocol-dump/test-data/listoffset-response-v2.bin new file mode 100644 index 0000000000..78f24aa6ec Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/listoffset-response-v2.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/metadata-request-v5.bin b/kf-protocol/kf-protocol-dump/test-data/metadata-request-v5.bin new file mode 100644 index 0000000000..69d196571a Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/metadata-request-v5.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/metadata-response-v5.bin b/kf-protocol/kf-protocol-dump/test-data/metadata-response-v5.bin new file mode 100644 index 0000000000..285d813636 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/metadata-response-v5.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/produc-req.bin b/kf-protocol/kf-protocol-dump/test-data/produc-req.bin new file mode 100644 index 0000000000..8809c91e39 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/produc-req.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/produce-offset-req.bin b/kf-protocol/kf-protocol-dump/test-data/produce-offset-req.bin new file mode 100644 index 0000000000..8809c91e39 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/produce-offset-req.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/produce-request.bin b/kf-protocol/kf-protocol-dump/test-data/produce-request.bin new file mode 100644 index 0000000000..a5ff69ab8d Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/produce-request.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/topic-list-request1.bin b/kf-protocol/kf-protocol-dump/test-data/topic-list-request1.bin new file mode 100644 index 0000000000..ed472826e8 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/topic-list-request1.bin differ diff --git a/kf-protocol/kf-protocol-dump/test-data/topic-list-response1.bin b/kf-protocol/kf-protocol-dump/test-data/topic-list-response1.bin new file mode 100644 index 0000000000..686e539f38 Binary files /dev/null and b/kf-protocol/kf-protocol-dump/test-data/topic-list-response1.bin differ diff --git a/kf-protocol/kf-protocol-message/.gitignore b/kf-protocol/kf-protocol-message/.gitignore new file mode 100644 index 0000000000..1c12cf9bd4 --- /dev/null +++ b/kf-protocol/kf-protocol-message/.gitignore @@ -0,0 +1 @@ +schema diff --git a/kf-protocol/kf-protocol-message/Cargo.toml b/kf-protocol/kf-protocol-message/Cargo.toml new file mode 100644 index 0000000000..860d87bd28 --- /dev/null +++ b/kf-protocol/kf-protocol-message/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "kf-protocol-message" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + + +[dependencies] +log = "0.4.6" +crc32c = "0.4.0" +content_inspector = "0.2.4" +serde = { version ="1.0.82", features = ['derive'] } +paste = "0.1.5" +utils = { path= "../../utils"} +kf-protocol-derive = { path = "../kf-protocol-derive" } +kf-protocol = { path = "../kf-protocol-core", package = "kf-protocol-core" } +kf-protocol-api = { path = "../kf-protocol-api"} + +[build-dependencies] +kf-protocol-build = { path = "../kf-protocol-build"} + diff --git a/kf-protocol/kf-protocol-message/Makefile b/kf-protocol/kf-protocol-message/Makefile new file mode 100644 index 0000000000..148cca7e39 --- /dev/null +++ b/kf-protocol/kf-protocol-message/Makefile @@ -0,0 +1,2 @@ +generate: + ../../target/debug/kfspec2code generate -i ../../../kafka/clients/src/main/resources/common/message/ -d ../kf-protocol-message/src/kf_code_gen/ \ No newline at end of file diff --git a/kf-protocol/kf-protocol-message/README.md b/kf-protocol/kf-protocol-message/README.md new file mode 100644 index 0000000000..de1fcf0e53 --- /dev/null +++ b/kf-protocol/kf-protocol-message/README.md @@ -0,0 +1,9 @@ +# Build + +Create schema directory "schema" + +Copy protocol schema .json files to it + +then do build + +```cargo build``` \ No newline at end of file diff --git a/kf-protocol/kf-protocol-message/src/fetch_handler.rs b/kf-protocol/kf-protocol-message/src/fetch_handler.rs new file mode 100644 index 0000000000..0ee8f059c4 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/fetch_handler.rs @@ -0,0 +1,45 @@ +use std::fmt::Debug; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use kf_protocol_api::DefaultRecords; + +use crate::fetch::{KfFetchResponse, KfFetchRequest}; +use crate::fetch::FetchableTopicResponse; + +pub type DefaultKfFetchRequest = KfFetchRequest; +pub type DefaultKfFetchResponse = KfFetchResponse; + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl KfFetchResponse +where + R: Encoder + Decoder + Debug, +{ + pub fn find_topic(&self, topic: &String) -> Option<&FetchableTopicResponse> + where + R: Debug, + { + for r_topic in &self.topics { + if r_topic.name == *topic { + return Some(r_topic); + } + } + None + } +} + +#[cfg(test)] +mod test { + + use super::DefaultKfFetchRequest; + + #[test] + fn test_request() { + let _ = DefaultKfFetchRequest::default(); + assert!(true); + } +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/add_offsets_to_txn.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/add_offsets_to_txn.rs new file mode 100644 index 0000000000..76e78362f5 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/add_offsets_to_txn.rs @@ -0,0 +1,58 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfAddOffsetsToTxnRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAddOffsetsToTxnRequest { + /// The transactional id corresponding to the transaction. + pub transactional_id: String, + + /// Current producer id in use by the transactional id. + pub producer_id: i64, + + /// Current epoch associated with the producer id. + pub producer_epoch: i16, + + /// The unique group identifier. + pub group_id: String, +} + +// ----------------------------------- +// KfAddOffsetsToTxnResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAddOffsetsToTxnResponse { + /// Duration in milliseconds for which the request was throttled due to a quota violation, or + /// zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The response error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfAddOffsetsToTxnRequest +// ----------------------------------- + +impl Request for KfAddOffsetsToTxnRequest { + const API_KEY: u16 = 25; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfAddOffsetsToTxnResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/add_partitions_to_txn.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/add_partitions_to_txn.rs new file mode 100644 index 0000000000..3c57723a85 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/add_partitions_to_txn.rs @@ -0,0 +1,85 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfAddPartitionsToTxnRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAddPartitionsToTxnRequest { + /// The transactional id corresponding to the transaction. + pub transactional_id: String, + + /// Current producer id in use by the transactional id. + pub producer_id: i64, + + /// Current epoch associated with the producer id. + pub producer_epoch: i16, + + /// The partitions to add to the transation. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AddPartitionsToTxnTopic { + /// The name of the topic. + pub name: String, + + /// The partition indexes to add to the transaction + pub partitions: Vec, +} + +// ----------------------------------- +// KfAddPartitionsToTxnResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAddPartitionsToTxnResponse { + /// Duration in milliseconds for which the request was throttled due to a quota violation, or + /// zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The results for each topic. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AddPartitionsToTxnTopicResult { + /// The topic name. + pub name: String, + + /// The results for each partition + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AddPartitionsToTxnPartitionResult { + /// The partition indexes. + pub partition_index: i32, + + /// The response error code. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfAddPartitionsToTxnRequest +// ----------------------------------- + +impl Request for KfAddPartitionsToTxnRequest { + const API_KEY: u16 = 24; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfAddPartitionsToTxnResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_configs.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_configs.rs new file mode 100644 index 0000000000..e020b54b22 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_configs.rs @@ -0,0 +1,88 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfAlterConfigsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAlterConfigsRequest { + /// The updates for each resource. + pub resources: Vec, + + /// True if we should validate the request, but not change the configurations. + pub validate_only: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterConfigsResource { + /// The resource type. + pub resource_type: i8, + + /// The resource name. + pub resource_name: String, + + /// The configurations. + pub configs: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterableConfig { + /// The configuration key name. + pub name: String, + + /// The value to set for the configuration key. + pub value: Option, +} + +// ----------------------------------- +// KfAlterConfigsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAlterConfigsResponse { + /// Duration in milliseconds for which the request was throttled due to a quota violation, or + /// zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The responses for each resource. + pub resources: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterConfigsResourceResponse { + /// The resource error code. + pub error_code: ErrorCode, + + /// The resource error message, or null if there was no error. + pub error_message: Option, + + /// The resource type. + pub resource_type: i8, + + /// The resource name. + pub resource_name: String, +} + +// ----------------------------------- +// Implementation - KfAlterConfigsRequest +// ----------------------------------- + +impl Request for KfAlterConfigsRequest { + const API_KEY: u16 = 33; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfAlterConfigsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_replica_log_dirs.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_replica_log_dirs.rs new file mode 100644 index 0000000000..f7f4b2ea9d --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/alter_replica_log_dirs.rs @@ -0,0 +1,85 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfAlterReplicaLogDirsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAlterReplicaLogDirsRequest { + /// The alterations to make for each directory. + pub dirs: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterReplicaLogDir { + /// The absolute directory path. + pub path: String, + + /// The topics to add to the directory. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterReplicaLogDirTopic { + /// The topic name. + pub name: String, + + /// The partition indexes. + pub partitions: Vec, +} + +// ----------------------------------- +// KfAlterReplicaLogDirsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfAlterReplicaLogDirsResponse { + /// Duration in milliseconds for which the request was throttled due to a quota violation, or + /// zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The results for each topic. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterReplicaLogDirTopicResult { + /// The name of the topic. + pub topic_name: String, + + /// The results for each partition. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AlterReplicaLogDirPartitionResult { + /// The partition index. + pub partition_index: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfAlterReplicaLogDirsRequest +// ----------------------------------- + +impl Request for KfAlterReplicaLogDirsRequest { + const API_KEY: u16 = 34; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfAlterReplicaLogDirsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/api_versions.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/api_versions.rs new file mode 100644 index 0000000000..4d24ea9b45 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/api_versions.rs @@ -0,0 +1,62 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfApiVersionsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfApiVersionsRequest {} + +// ----------------------------------- +// KfApiVersionsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfApiVersionsResponse { + /// The top-level error code. + pub error_code: ErrorCode, + + /// The APIs supported by the broker. + pub api_keys: Vec, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ApiVersionsResponseKey { + /// The API index. + pub index: i16, + + /// The minimum supported version, inclusive. + pub min_version: i16, + + /// The maximum supported version, inclusive. + pub max_version: i16, +} + +// ----------------------------------- +// Implementation - KfApiVersionsRequest +// ----------------------------------- + +impl Request for KfApiVersionsRequest { + const API_KEY: u16 = 18; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfApiVersionsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/controlled_shutdown.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/controlled_shutdown.rs new file mode 100644 index 0000000000..089dc29081 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/controlled_shutdown.rs @@ -0,0 +1,61 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfControlledShutdownRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfControlledShutdownRequest { + /// The id of the broker for which controlled shutdown has been requested. + pub broker_id: i32, + + /// The broker epoch. + #[fluvio_kf(min_version = 2, ignorable)] + pub broker_epoch: i64, +} + +// ----------------------------------- +// KfControlledShutdownResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfControlledShutdownResponse { + /// The top-level error code. + pub error_code: ErrorCode, + + /// The partitions that the broker still leads. + pub remaining_partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct RemainingPartition { + /// The name of the topic. + pub topic_name: String, + + /// The index of the partition. + pub partition_index: i32, +} + +// ----------------------------------- +// Implementation - KfControlledShutdownRequest +// ----------------------------------- + +impl Request for KfControlledShutdownRequest { + const API_KEY: u16 = 7; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfControlledShutdownResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/create_acls.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_acls.rs new file mode 100644 index 0000000000..075dea9ca1 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_acls.rs @@ -0,0 +1,83 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfCreateAclsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateAclsRequest { + /// The ACLs that we want to create. + pub creations: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableAcl { + /// The type of the resource. + pub resource_type: i8, + + /// The resource name for the ACL. + pub resource_name: String, + + /// The pattern type for the ACL. + #[fluvio_kf(min_version = 1)] + pub resource_pattern_type: i8, + + /// The principal for the ACL. + pub principal: String, + + /// The host for the ACL. + pub host: String, + + /// The operation type for the ACL (read, write, etc.). + pub operation: i8, + + /// The permission type for the ACL (allow, deny, etc.). + pub permission_type: i8, +} + +// ----------------------------------- +// KfCreateAclsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateAclsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The results for each ACL creation. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableAclResult { + /// The result error, or zero if there was no error. + pub error_code: ErrorCode, + + /// The result message, or null if there was no error. + pub error_message: Option, +} + +// ----------------------------------- +// Implementation - KfCreateAclsRequest +// ----------------------------------- + +impl Request for KfCreateAclsRequest { + const API_KEY: u16 = 30; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfCreateAclsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/create_delegation_token.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_delegation_token.rs new file mode 100644 index 0000000000..f3a871c496 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_delegation_token.rs @@ -0,0 +1,82 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfCreateDelegationTokenRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateDelegationTokenRequest { + /// A list of those who are allowed to renew this token before it expires. + pub renewers: Vec, + + /// The maximum lifetime of the token in milliseconds, or -1 to use the server side default. + pub max_lifetime_ms: i64, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableRenewers { + /// The type of the Kafka principal. + pub principal_type: String, + + /// The name of the Kafka principal. + pub principal_name: String, +} + +// ----------------------------------- +// KfCreateDelegationTokenResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateDelegationTokenResponse { + /// The top-level error, or zero if there was no error. + pub error_code: ErrorCode, + + /// The principal type of the token owner. + pub principal_type: String, + + /// The name of the token owner. + pub principal_name: String, + + /// When this token was generated. + pub issue_timestamp_ms: i64, + + /// When this token expires. + pub expiry_timestamp_ms: i64, + + /// The maximum lifetime of this token. + pub max_timestamp_ms: i64, + + /// The token UUID. + pub token_name: String, + + /// HMAC of the delegation token. + pub hmac: Vec, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, +} + +// ----------------------------------- +// Implementation - KfCreateDelegationTokenRequest +// ----------------------------------- + +impl Request for KfCreateDelegationTokenRequest { + const API_KEY: u16 = 38; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfCreateDelegationTokenResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/create_partitions.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_partitions.rs new file mode 100644 index 0000000000..2153fae2f4 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_partitions.rs @@ -0,0 +1,85 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfCreatePartitionsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreatePartitionsRequest { + /// Each topic that we want to create new partitions inside. + pub topics: Vec, + + /// The time in ms to wait for the partitions to be created. + pub timeout_ms: i32, + + /// If true, then validate the request, but don't actually increase the number of partitions. + pub validate_only: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatePartitionsTopic { + /// The topic name. + pub name: String, + + /// The new partition count. + pub count: i32, + + /// The new partition assignments. + pub assignments: Option>, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatePartitionsAssignment { + /// The assigned broker IDs. + pub broker_ids: Vec, +} + +// ----------------------------------- +// KfCreatePartitionsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreatePartitionsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The partition creation results for each topic. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatePartitionsTopicResult { + /// The topic name. + pub name: String, + + /// The result error, or zero if there was no error. + pub error_code: ErrorCode, + + /// The result message, or null if there was no error. + pub error_message: Option, +} + +// ----------------------------------- +// Implementation - KfCreatePartitionsRequest +// ----------------------------------- + +impl Request for KfCreatePartitionsRequest { + const API_KEY: u16 = 37; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfCreatePartitionsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/create_topics.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_topics.rs new file mode 100644 index 0000000000..de307fb9ec --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/create_topics.rs @@ -0,0 +1,108 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfCreateTopicsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateTopicsRequest { + /// The topics to create. + pub topics: Vec, + + /// How long to wait in milliseconds before timing out the request. + pub timeout_ms: i32, + + /// If true, check that the topics can be created as specified, but don't create anything. + #[fluvio_kf(min_version = 1)] + pub validate_only: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableTopic { + /// The topic name. + pub name: String, + + /// The number of partitions to create in the topic, or -1 if we are specifying a manual + /// partition assignment. + pub num_partitions: i32, + + /// The number of replicas to create for each partition in the topic, or -1 if we are specifying + /// a manual partition assignment. + pub replication_factor: i16, + + /// The manual partition assignment, or the empty array if we are using automatic assignment. + pub assignments: Vec, + + /// The custom topic configurations to set. + pub configs: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableReplicaAssignment { + /// The partition index. + pub partition_index: i32, + + /// The brokers to place the partition on. + pub broker_ids: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreateableTopicConfig { + /// The configuration name. + pub name: String, + + /// The configuration value. + pub value: Option, +} + +// ----------------------------------- +// KfCreateTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfCreateTopicsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 2, ignorable)] + pub throttle_time_ms: i32, + + /// Results for each topic we tried to create. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct CreatableTopicResult { + /// The topic name. + pub name: String, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The error message, or null if there was no error. + #[fluvio_kf(min_version = 1, ignorable)] + pub error_message: Option, +} + +// ----------------------------------- +// Implementation - KfCreateTopicsRequest +// ----------------------------------- + +impl Request for KfCreateTopicsRequest { + const API_KEY: u16 = 19; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 3; + const DEFAULT_API_VERSION: i16 = 3; + + type Response = KfCreateTopicsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_acls.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_acls.rs new file mode 100644 index 0000000000..7053e3aa52 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_acls.rs @@ -0,0 +1,117 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDeleteAclsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteAclsRequest { + /// The filters to use when deleting ACLs. + pub filters: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteAclsFilter { + /// The resource type. + pub resource_type_filter: i8, + + /// The resource name. + pub resource_name_filter: Option, + + /// The pattern type. + #[fluvio_kf(min_version = 1)] + pub pattern_type_filter: i8, + + /// The principal filter, or null to accept all principals. + pub principal_filter: Option, + + /// The host filter, or null to accept all hosts. + pub host_filter: Option, + + /// The ACL operation. + pub operation: i8, + + /// The permission type. + pub permission_type: i8, +} + +// ----------------------------------- +// KfDeleteAclsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteAclsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The results for each filter. + pub filter_results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteAclsFilterResult { + /// The error code, or 0 if the filter succeeded. + pub error_code: ErrorCode, + + /// The error message, or null if the filter succeeded. + pub error_message: Option, + + /// The ACLs which matched this filter. + pub matching_acls: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteAclsMatchingAcl { + /// The deletion error code, or 0 if the deletion succeeded. + pub error_code: ErrorCode, + + /// The deletion error message, or null if the deletion succeeded. + pub error_message: Option, + + /// The ACL resource type. + pub resource_type: i8, + + /// The ACL resource name. + pub resource_name: String, + + /// The ACL resource pattern type. + #[fluvio_kf(min_version = 1)] + pub pattern_type: i8, + + /// The ACL principal. + pub principal: String, + + /// The ACL host. + pub host: String, + + /// The ACL operation. + pub operation: i8, + + /// The ACL permission type. + pub permission_type: i8, +} + +// ----------------------------------- +// Implementation - KfDeleteAclsRequest +// ----------------------------------- + +impl Request for KfDeleteAclsRequest { + const API_KEY: u16 = 31; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDeleteAclsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_groups.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_groups.rs new file mode 100644 index 0000000000..09cb060301 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_groups.rs @@ -0,0 +1,58 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDeleteGroupsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteGroupsRequest { + /// The group names to delete. + pub groups_names: Vec, +} + +// ----------------------------------- +// KfDeleteGroupsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteGroupsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The deletion results + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeletableGroupResult { + /// The group id + pub group_id: String, + + /// The deletion error, or 0 if the deletion succeeded. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfDeleteGroupsRequest +// ----------------------------------- + +impl Request for KfDeleteGroupsRequest { + const API_KEY: u16 = 42; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDeleteGroupsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_records.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_records.rs new file mode 100644 index 0000000000..a5498bcf2a --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_records.rs @@ -0,0 +1,91 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDeleteRecordsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteRecordsRequest { + /// Each topic that we want to delete records from. + pub topics: Vec, + + /// How long to wait for the deletion to complete, in milliseconds. + pub timeout_ms: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteRecordsTopic { + /// The topic name. + pub name: String, + + /// Each partition that we want to delete records from. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteRecordsPartition { + /// The partition index. + pub partition_index: i32, + + /// The deletion offset. + pub offset: i64, +} + +// ----------------------------------- +// KfDeleteRecordsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteRecordsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// Each topic that we wanted to delete records from. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteRecordsTopicResult { + /// The topic name. + pub name: String, + + /// Each partition that we wanted to delete records from. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeleteRecordsPartitionResult { + /// The partition index. + pub partition_index: i32, + + /// The partition low water mark. + pub low_watermark: i64, + + /// The deletion error code, or 0 if the deletion succeeded. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfDeleteRecordsRequest +// ----------------------------------- + +impl Request for KfDeleteRecordsRequest { + const API_KEY: u16 = 21; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDeleteRecordsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_topics.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_topics.rs new file mode 100644 index 0000000000..eda87578ee --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/delete_topics.rs @@ -0,0 +1,62 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDeleteTopicsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteTopicsRequest { + /// The names of the topics to delete + pub topic_names: Vec, + + /// The length of time in milliseconds to wait for the deletions to complete. + pub timeout_ms: i32, +} + +// ----------------------------------- +// KfDeleteTopicsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDeleteTopicsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1)] + pub throttle_time_ms: i32, + + /// The results for each topic. + pub responses: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DeletableTopicResult { + /// The topic name + pub name: String, + + /// The deletion error, or 0 if the deletion succeeded. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfDeleteTopicsRequest +// ----------------------------------- + +impl Request for KfDeleteTopicsRequest { + const API_KEY: u16 = 20; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 3; + const DEFAULT_API_VERSION: i16 = 3; + + type Response = KfDeleteTopicsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_acls.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_acls.rs new file mode 100644 index 0000000000..e9e179a4b1 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_acls.rs @@ -0,0 +1,105 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDescribeAclsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeAclsRequest { + /// The resource type. + pub resource_type: i8, + + /// The resource name, or null to match any resource name. + pub resource_name_filter: Option, + + /// The resource pattern to match. + #[fluvio_kf(min_version = 1)] + pub resource_pattern_type: i8, + + /// The principal to match, or null to match any principal. + pub principal_filter: Option, + + /// The host to match, or null to match any host. + pub host_filter: Option, + + /// The operation to match. + pub operation: i8, + + /// The permission type to match. + pub permission_type: i8, +} + +// ----------------------------------- +// KfDescribeAclsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeAclsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The error message, or null if there was no error. + pub error_message: Option, + + /// Each Resource that is referenced in an ACL. + pub resources: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeAclsResource { + /// The resource type. + pub typ: i8, + + /// The resource name. + pub name: String, + + /// The resource pattern type. + #[fluvio_kf(min_version = 1)] + pub pattern_type: i8, + + /// The ACLs. + pub acls: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AclDescription { + /// The ACL principal. + pub principal: String, + + /// The ACL host. + pub host: String, + + /// The ACL operation. + pub operation: i8, + + /// The ACL permission type. + pub permission_type: i8, +} + +// ----------------------------------- +// Implementation - KfDescribeAclsRequest +// ----------------------------------- + +impl Request for KfDescribeAclsRequest { + const API_KEY: u16 = 29; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDescribeAclsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_configs.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_configs.rs new file mode 100644 index 0000000000..76f3fc46fc --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_configs.rs @@ -0,0 +1,121 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDescribeConfigsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeConfigsRequest { + /// The resources whose configurations we want to describe. + pub resources: Vec, + + /// True if we should include all synonyms. + #[fluvio_kf(min_version = 1)] + pub include_synoyms: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeConfigsResource { + /// The resource type. + pub resource_type: i8, + + /// The resource name. + pub resource_name: String, + + /// The configuration keys to list, or null to list all configuration keys. + pub configuration_keys: Option>, +} + +// ----------------------------------- +// KfDescribeConfigsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeConfigsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The results for each resource. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeConfigsResult { + /// The error code, or 0 if we were able to successfully describe the configurations. + pub error_code: ErrorCode, + + /// The error message, or null if we were able to successfully describe the configurations. + pub error_message: Option, + + /// The resource type. + pub resource_type: i8, + + /// The resource name. + pub resource_name: String, + + /// Each listed configuration. + pub configs: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeConfigsResourceResult { + /// The configuration name. + pub name: String, + + /// The configuration value. + pub value: Option, + + /// True if the configuration is read-only. + pub read_only: bool, + + /// The configuration source. + #[fluvio_kf(min_version = 1, ignorable)] + pub config_source: i8, + + /// True if this configuration is sensitive. + pub is_sensitive: bool, + + /// The synonyms for this configuration key. + #[fluvio_kf(min_version = 1, ignorable)] + pub synonyms: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeConfigsSynonym { + /// The synonym name. + #[fluvio_kf(min_version = 1)] + pub name: String, + + /// The synonym value. + #[fluvio_kf(min_version = 1)] + pub value: Option, + + /// The synonym source. + #[fluvio_kf(min_version = 1)] + pub source: i8, +} + +// ----------------------------------- +// Implementation - KfDescribeConfigsRequest +// ----------------------------------- + +impl Request for KfDescribeConfigsRequest { + const API_KEY: u16 = 32; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfDescribeConfigsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_delegation_token.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_delegation_token.rs new file mode 100644 index 0000000000..d1a4b9e966 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_delegation_token.rs @@ -0,0 +1,97 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDescribeDelegationTokenRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeDelegationTokenRequest { + /// Each owner that we want to describe delegation tokens for, or null to describe all tokens. + pub owners: Option>, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeDelegationTokenOwner { + /// The owner principal type. + pub principal_type: String, + + /// The owner principal name. + pub principal_name: String, +} + +// ----------------------------------- +// KfDescribeDelegationTokenResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeDelegationTokenResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The tokens. + pub tokens: Vec, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribedDelegationToken { + /// The token principal type. + pub principal_type: String, + + /// The token principal name. + pub principal_name: String, + + /// The token issue timestamp in milliseconds. + pub issue_timestamp: i64, + + /// The token expiry timestamp in milliseconds. + pub expiry_timestamp: i64, + + /// The token maximum timestamp length in milliseconds. + pub max_timestamp: i64, + + /// The token ID. + pub token_name: String, + + /// The token HMAC. + pub hmac: Vec, + + /// Those who are able to renew this token before it expires. + pub renewers: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribedDelegationTokenRenewer { + /// The renewer principal type + pub principal_type: String, + + /// The renewer principal name + pub principal_name: String, +} + +// ----------------------------------- +// Implementation - KfDescribeDelegationTokenRequest +// ----------------------------------- + +impl Request for KfDescribeDelegationTokenRequest { + const API_KEY: u16 = 41; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDescribeDelegationTokenResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_groups.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_groups.rs new file mode 100644 index 0000000000..8e54c8c83c --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_groups.rs @@ -0,0 +1,89 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDescribeGroupsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeGroupsRequest { + /// The names of the groups to describe + pub groups: Vec, +} + +// ----------------------------------- +// KfDescribeGroupsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeGroupsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// Each described group. + pub groups: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribedGroup { + /// The describe error, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The group ID string. + pub group_id: String, + + /// The group state string, or the empty string. + pub group_state: String, + + /// The group protocol type, or the empty string. + pub protocol_type: String, + + /// The group protocol data, or the empty string. + pub protocol_data: String, + + /// The group members. + pub members: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribedGroupMember { + /// The member ID assigned by the group coordinator. + pub member_id: String, + + /// The client ID used in the member's latest join group request. + pub client_id: String, + + /// The client host. + pub client_host: String, + + /// The metadata corresponding to the current group protocol in use. + pub member_metadata: Vec, + + /// The current assignment provided by the group leader. + pub member_assignment: Vec, +} + +// ----------------------------------- +// Implementation - KfDescribeGroupsRequest +// ----------------------------------- + +impl Request for KfDescribeGroupsRequest { + const API_KEY: u16 = 15; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfDescribeGroupsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_log_dirs.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_log_dirs.rs new file mode 100644 index 0000000000..d9e20d5784 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/describe_log_dirs.rs @@ -0,0 +1,95 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfDescribeLogDirsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeLogDirsRequest { + /// Each topic that we want to describe log directories for, or null for all topics. + pub topics: Option>, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribableLogDirTopic { + /// The topic name + pub topic: String, + + /// The partition indxes. + pub partition_index: Vec, +} + +// ----------------------------------- +// KfDescribeLogDirsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfDescribeLogDirsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The log directories. + pub results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeLogDirsResult { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The absolute log directory path. + pub log_dir: String, + + /// Each topic. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeLogDirsTopic { + /// The topic name. + pub name: String, + + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct DescribeLogDirsPartition { + /// The partition index. + pub partition_index: i32, + + /// The size of the log segments in this partition in bytes. + pub partition_size: i64, + + /// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) + /// or current replica's LEO (if it is the future log for the partition) + pub offset_lag: i64, + + /// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log + /// of the replica in the future. + pub is_future_key: bool, +} + +// ----------------------------------- +// Implementation - KfDescribeLogDirsRequest +// ----------------------------------- + +impl Request for KfDescribeLogDirsRequest { + const API_KEY: u16 = 35; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfDescribeLogDirsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/elect_preferred_leaders.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/elect_preferred_leaders.rs new file mode 100644 index 0000000000..19aaf84116 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/elect_preferred_leaders.rs @@ -0,0 +1,83 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfElectPreferredLeadersRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfElectPreferredLeadersRequest { + /// The topic partitions to elect the preferred leader of. + pub topic_partitions: Option>, + + /// The time in ms to wait for the election to complete. + #[fluvio_kf()] + pub timeout_ms: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TopicPartitions { + /// The name of a topic. + pub topic: String, + + /// The partitions of this topic whose preferred leader should be elected + pub partition_id: Vec, +} + +// ----------------------------------- +// KfElectPreferredLeadersResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfElectPreferredLeadersResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub replica_election_results: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ReplicaElectionResult { + /// The topic name + pub topic: String, + + /// The results for each partition + pub partition_result: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct PartitionResult { + /// The partition id + pub partition_id: i32, + + /// The result error, or zero if there was no error. + pub error_code: ErrorCode, + + /// The result message, or null if there was no error. + pub error_message: Option, +} + +// ----------------------------------- +// Implementation - KfElectPreferredLeadersRequest +// ----------------------------------- + +impl Request for KfElectPreferredLeadersRequest { + const API_KEY: u16 = 43; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 0; + const DEFAULT_API_VERSION: i16 = 0; + + type Response = KfElectPreferredLeadersResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/end_txn.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/end_txn.rs new file mode 100644 index 0000000000..073f5cd10d --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/end_txn.rs @@ -0,0 +1,58 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfEndTxnRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfEndTxnRequest { + /// The ID of the transaction to end. + pub transactional_id: String, + + /// The producer ID. + pub producer_id: i64, + + /// The current epoch associated with the producer. + pub producer_epoch: i16, + + /// True if the transaction was committed, false if it was aborted. + pub committed: bool, +} + +// ----------------------------------- +// KfEndTxnResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfEndTxnResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfEndTxnRequest +// ----------------------------------- + +impl Request for KfEndTxnRequest { + const API_KEY: u16 = 26; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfEndTxnResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/expire_delegation_token.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/expire_delegation_token.rs new file mode 100644 index 0000000000..cfe17669ae --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/expire_delegation_token.rs @@ -0,0 +1,55 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfExpireDelegationTokenRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfExpireDelegationTokenRequest { + /// The HMAC of the delegation token to be expired. + pub hmac: Vec, + + /// The expiry time period in milliseconds. + pub expiry_time_period_ms: i64, +} + +// ----------------------------------- +// KfExpireDelegationTokenResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfExpireDelegationTokenResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The timestamp in milliseconds at which this token expires. + pub expiry_timestamp_ms: i64, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, +} + +// ----------------------------------- +// Implementation - KfExpireDelegationTokenRequest +// ----------------------------------- + +impl Request for KfExpireDelegationTokenRequest { + const API_KEY: u16 = 40; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfExpireDelegationTokenResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/fetch.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/fetch.rs new file mode 100644 index 0000000000..f171c34255 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/fetch.rs @@ -0,0 +1,209 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use std::fmt::Debug; +use std::marker::PhantomData; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Isolation; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfFetchRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfFetchRequest +where + R: Encoder + Decoder + Default + Debug, +{ + /// The broker ID of the follower, of -1 if this request is from a consumer. + pub replica_id: i32, + + /// The maximum time in milliseconds to wait for the response. + pub max_wait: i32, + + /// The minimum bytes to accumulate in the response. + pub min_bytes: i32, + + /// The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored. + #[fluvio_kf(min_version = 3, ignorable)] + pub max_bytes: i32, + + /// This setting controls the visibility of transactional records. Using READ_UNCOMMITTED + /// (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), + /// non-transactional and COMMITTED transactional records are visible. To be more concrete, + /// READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable + /// offset), and enables the inclusion of the list of aborted transactions in the result, which + /// allows consumers to discard ABORTED transactional records + #[fluvio_kf(min_version = 4)] + pub isolation_level: Isolation, + + /// The fetch session ID. + #[fluvio_kf(min_version = 7)] + pub session_id: i32, + + /// The fetch session ID. + #[fluvio_kf(min_version = 7)] + pub epoch: i32, + + /// The topics to fetch. + pub topics: Vec, + + /// In an incremental fetch request, the partitions to remove. + #[fluvio_kf(min_version = 7)] + pub forgotten: Vec, + + pub data: PhantomData, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct FetchableTopic { + /// The name of the topic to fetch. + pub name: String, + + /// The partitions to fetch. + pub fetch_partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ForgottenTopic { + /// The partition name. + #[fluvio_kf(min_version = 7)] + pub name: String, + + /// The partitions indexes to forget. + #[fluvio_kf(min_version = 7)] + pub forgotten_partition_indexes: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct FetchPartition { + /// The partition index. + pub partition_index: i32, + + /// The current leader epoch of the partition. + #[fluvio_kf(min_version = 9, ignorable)] + pub current_leader_epoch: i32, + + /// The message offset. + pub fetch_offset: i64, + + /// The earliest available offset of the follower replica. The field is only used when the + /// request is sent by the follower. + #[fluvio_kf(min_version = 5)] + pub log_start_offset: i64, + + /// The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may + /// not be honored. + pub max_bytes: i32, +} + +// ----------------------------------- +// KfFetchResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfFetchResponse +where + R: Encoder + Decoder + Default + Debug, +{ + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The top level response error code. + #[fluvio_kf(min_version = 7)] + pub error_code: ErrorCode, + + /// The fetch session ID, or 0 if this is not part of a fetch session. + #[fluvio_kf(min_version = 7)] + pub session_id: i32, + + /// The response topics. + pub topics: Vec>, + pub data: PhantomData, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct FetchableTopicResponse +where + R: Encoder + Decoder + Default + Debug, +{ + /// The topic name. + pub name: String, + + /// The topic partitions. + pub partitions: Vec>, + pub data: PhantomData, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct FetchablePartitionResponse +where + R: Encoder + Decoder + Default + Debug, +{ + /// The partiiton index. + pub partition_index: i32, + + /// The error code, or 0 if there was no fetch error. + pub error_code: ErrorCode, + + /// The current high water mark. + pub high_watermark: i64, + + /// The last stable offset (or LSO) of the partition. This is the last offset such that the + /// state of all transactional records prior to this offset have been decided (ABORTED or + /// COMMITTED) + #[fluvio_kf(min_version = 4, ignorable)] + pub last_stable_offset: i64, + + /// The current log start offset. + #[fluvio_kf(min_version = 5, ignorable)] + pub log_start_offset: i64, + + /// The aborted transactions. + #[fluvio_kf(min_version = 4)] + pub aborted: Option>, + + /// The record data. + pub records: R, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct AbortedTransaction { + /// The producer id associated with the aborted transaction. + #[fluvio_kf(min_version = 4)] + pub producer_id: i64, + + /// The first offset in the aborted transaction. + #[fluvio_kf(min_version = 4)] + pub first_offset: i64, +} + +// ----------------------------------- +// Implementation - KfFetchRequest +// ----------------------------------- + +impl Request for KfFetchRequest +where + R: Debug + Decoder + Encoder, +{ + const API_KEY: u16 = 1; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 10; + const DEFAULT_API_VERSION: i16 = 10; + + type Response = KfFetchResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/find_coordinator.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/find_coordinator.rs new file mode 100644 index 0000000000..ae3a0bbca9 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/find_coordinator.rs @@ -0,0 +1,67 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfFindCoordinatorRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfFindCoordinatorRequest { + /// The coordinator key. + pub key: String, + + /// The coordinator key type. (Group, transaction, etc.) + #[fluvio_kf(min_version = 1)] + pub key_type: i8, +} + +// ----------------------------------- +// KfFindCoordinatorResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfFindCoordinatorResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The error message, or null if there was no error. + #[fluvio_kf(min_version = 1, ignorable)] + pub error_message: Option, + + /// The node id. + pub node_id: i32, + + /// The host name. + pub host: String, + + /// The port. + pub port: i32, +} + +// ----------------------------------- +// Implementation - KfFindCoordinatorRequest +// ----------------------------------- + +impl Request for KfFindCoordinatorRequest { + const API_KEY: u16 = 10; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfFindCoordinatorResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/heartbeat.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/heartbeat.rs new file mode 100644 index 0000000000..ba0f96d66f --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/heartbeat.rs @@ -0,0 +1,56 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfHeartbeatRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfHeartbeatRequest { + /// The group id. + pub group_id: String, + + /// The generation of the group. + pub generationid: i32, + + /// The member ID. + pub member_id: String, +} + +// ----------------------------------- +// KfHeartbeatResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfHeartbeatResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfHeartbeatRequest +// ----------------------------------- + +impl Request for KfHeartbeatRequest { + const API_KEY: u16 = 12; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfHeartbeatResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/init_producer_id.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/init_producer_id.rs new file mode 100644 index 0000000000..dec15520e2 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/init_producer_id.rs @@ -0,0 +1,59 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfInitProducerIdRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfInitProducerIdRequest { + /// The transactional id, or null if the producer is not transactional. + pub transactional_id: Option, + + /// The time in ms to wait for before aborting idle transactions sent by this producer. + pub transaction_timeout_ms: i32, +} + +// ----------------------------------- +// KfInitProducerIdResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfInitProducerIdResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The current producer id. + pub producer_id: i64, + + /// The current epoch associated with the producer id. + pub producer_epoch: i16, +} + +// ----------------------------------- +// Implementation - KfInitProducerIdRequest +// ----------------------------------- + +impl Request for KfInitProducerIdRequest { + const API_KEY: u16 = 22; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfInitProducerIdResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/join_group.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/join_group.rs new file mode 100644 index 0000000000..079821cdb8 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/join_group.rs @@ -0,0 +1,101 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::ProtocolMetadata; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfJoinGroupRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfJoinGroupRequest { + /// The group identifier. + pub group_id: String, + + /// The coordinator considers the consumer dead if it receives no heartbeat after this timeout + /// in milliseconds. + pub session_timeout_ms: i32, + + /// The maximum time in milliseconds that the coordinator will wait for each member to rejoin + /// when rebalancing the group. + #[fluvio_kf(min_version = 1, ignorable)] + pub rebalance_timeout_ms: i32, + + /// The member id assigned by the group coordinator. + pub member_id: String, + + /// The unique name the for class of protocols implemented by the group we want to join. + pub protocol_type: String, + + /// The list of protocols that the member supports. + pub protocols: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct JoinGroupRequestProtocol { + /// The protocol name. + pub name: String, + + /// The protocol metadata. + pub metadata: ProtocolMetadata, +} + +// ----------------------------------- +// KfJoinGroupResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfJoinGroupResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 2, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The generation ID of the group. + pub generation_id: i32, + + /// The group protocol selected by the coordinator. + pub protocol_name: String, + + /// The leader of the group. + pub leader: String, + + /// The member ID assigned by the group coordinator. + pub member_id: String, + + pub members: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct JoinGroupResponseMember { + /// The group member ID. + pub member_id: String, + + /// The group member metadata. + pub metadata: ProtocolMetadata, +} + +// ----------------------------------- +// Implementation - KfJoinGroupRequest +// ----------------------------------- + +impl Request for KfJoinGroupRequest { + const API_KEY: u16 = 11; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 4; + const DEFAULT_API_VERSION: i16 = 4; + + type Response = KfJoinGroupResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/leader_and_isr.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/leader_and_isr.rs new file mode 100644 index 0000000000..a3dd8b1bb1 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/leader_and_isr.rs @@ -0,0 +1,167 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfLeaderAndIsrRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfLeaderAndIsrRequest { + /// The current controller ID. + pub controller_id: i32, + + /// The current controller epoch. + pub controller_epoch: i32, + + /// The current broker epoch. + #[fluvio_kf(min_version = 2, ignorable)] + pub broker_epoch: i64, + + /// Each topic. + #[fluvio_kf(min_version = 2)] + pub topic_states: Vec, + + /// The state of each partition + #[fluvio_kf(max_version = 1)] + pub partition_states_v0: Vec, + + /// The current live leaders. + pub live_leaders: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct LeaderAndIsrRequestTopicState { + /// The topic name. + #[fluvio_kf(min_version = 2)] + pub name: String, + + /// The state of each partition + pub partition_states: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct LeaderAndIsrRequestPartitionStateV0 { + /// The topic name. + #[fluvio_kf(max_version = 1)] + pub topic_name: String, + + /// The partition index. + #[fluvio_kf(max_version = 1)] + pub partition_index: i32, + + /// The controller epoch. + #[fluvio_kf(max_version = 1)] + pub controller_epoch: i32, + + /// The broker ID of the leader. + #[fluvio_kf(max_version = 1)] + pub leader_key: i32, + + /// The leader epoch. + #[fluvio_kf(max_version = 1)] + pub leader_epoch: i32, + + /// The in-sync replica IDs. + #[fluvio_kf(max_version = 1)] + pub isr_replicas: Vec, + + /// The ZooKeeper version. + #[fluvio_kf(max_version = 1)] + pub zk_version: i32, + + /// The replica IDs. + #[fluvio_kf(max_version = 1)] + pub replicas: Vec, + + /// Whether the replica should have existed on the broker or not. + #[fluvio_kf(min_version = 1, max_version = 1, ignorable)] + pub is_new: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct LeaderAndIsrLiveLeader { + /// The leader's broker ID. + pub broker_id: i32, + + /// The leader's hostname. + pub host_name: String, + + /// The leader's port. + pub port: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct LeaderAndIsrRequestPartitionState { + /// The partition index. + pub partition_index: i32, + + /// The controller epoch. + pub controller_epoch: i32, + + /// The broker ID of the leader. + pub leader_key: i32, + + /// The leader epoch. + pub leader_epoch: i32, + + /// The in-sync replica IDs. + pub isr_replicas: Vec, + + /// The ZooKeeper version. + pub zk_version: i32, + + /// The replica IDs. + pub replicas: Vec, + + /// Whether the replica should have existed on the broker or not. + #[fluvio_kf(min_version = 1, ignorable)] + pub is_new: bool, +} + +// ----------------------------------- +// KfLeaderAndIsrResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfLeaderAndIsrResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// Each partition. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct LeaderAndIsrResponsePartition { + /// The topic name. + pub topic_name: String, + + /// The partition index. + pub partition_index: i32, + + /// The partition error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfLeaderAndIsrRequest +// ----------------------------------- + +impl Request for KfLeaderAndIsrRequest { + const API_KEY: u16 = 4; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfLeaderAndIsrResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/leave_group.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/leave_group.rs new file mode 100644 index 0000000000..70a3b3760d --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/leave_group.rs @@ -0,0 +1,53 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfLeaveGroupRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfLeaveGroupRequest { + /// The ID of the group to leave. + pub group_id: String, + + /// The member ID to remove from the group. + pub member_id: String, +} + +// ----------------------------------- +// KfLeaveGroupResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfLeaveGroupResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfLeaveGroupRequest +// ----------------------------------- + +impl Request for KfLeaveGroupRequest { + const API_KEY: u16 = 13; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfLeaveGroupResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/list_groups.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/list_groups.rs new file mode 100644 index 0000000000..2128dd5522 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/list_groups.rs @@ -0,0 +1,59 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfListGroupsRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfListGroupsRequest {} + +// ----------------------------------- +// KfListGroupsResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfListGroupsResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// Each group in the response. + pub groups: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ListedGroup { + /// The group ID. + pub group_id: String, + + /// The group protocol type. + pub protocol_type: String, +} + +// ----------------------------------- +// Implementation - KfListGroupsRequest +// ----------------------------------- + +impl Request for KfListGroupsRequest { + const API_KEY: u16 = 16; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfListGroupsResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/list_offset.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/list_offset.rs new file mode 100644 index 0000000000..96652bd38d --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/list_offset.rs @@ -0,0 +1,114 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Isolation; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfListOffsetRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfListOffsetRequest { + /// The broker ID of the requestor, or -1 if this request is being made by a normal consumer. + pub replica_id: i32, + + /// This setting controls the visibility of transactional records. Using READ_UNCOMMITTED + /// (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), + /// non-transactional and COMMITTED transactional records are visible. To be more concrete, + /// READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable + /// offset), and enables the inclusion of the list of aborted transactions in the result, which + /// allows consumers to discard ABORTED transactional records + #[fluvio_kf(min_version = 2)] + pub isolation_level: Isolation, + + /// Each topic in the request. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ListOffsetTopic { + /// The topic name. + pub name: String, + + /// Each partition in the request. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ListOffsetPartition { + /// The partition index. + pub partition_index: i32, + + /// The current leader epoch. + #[fluvio_kf(min_version = 4)] + pub current_leader_epoch: i32, + + /// The current timestamp. + pub timestamp: i64, +} + +// ----------------------------------- +// KfListOffsetResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfListOffsetResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 2, ignorable)] + pub throttle_time_ms: i32, + + /// Each topic in the response. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ListOffsetTopicResponse { + /// The topic name + pub name: String, + + /// Each partition in the response. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct ListOffsetPartitionResponse { + /// The partition index. + pub partition_index: i32, + + /// The partition error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The timestamp associated with the returned offset. + #[fluvio_kf(min_version = 1)] + pub timestamp: i64, + + /// The returned offset. + #[fluvio_kf(min_version = 1)] + pub offset: i64, + + #[fluvio_kf(min_version = 4)] + pub leader_epoch: i32, +} + +// ----------------------------------- +// Implementation - KfListOffsetRequest +// ----------------------------------- + +impl Request for KfListOffsetRequest { + const API_KEY: u16 = 2; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 5; + const DEFAULT_API_VERSION: i16 = 5; + + type Response = KfListOffsetResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/metadata.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/metadata.rs new file mode 100644 index 0000000000..bc4f1aee3e --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/metadata.rs @@ -0,0 +1,130 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfMetadataRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfMetadataRequest { + /// The topics to fetch metadata for. + pub topics: Option>, + + /// If this is true, the broker may auto-create topics that we requested which do not already + /// exist, if it is configured to do so. + #[fluvio_kf(min_version = 4)] + pub allow_auto_topic_creation: bool, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct MetadataRequestTopic { + /// The topic name. + pub name: String, +} + +// ----------------------------------- +// KfMetadataResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfMetadataResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 3)] + pub throttle_time_ms: i32, + + /// Each broker in the response. + pub brokers: Vec, + + /// The cluster ID that responding broker belongs to. + #[fluvio_kf(min_version = 2, ignorable)] + pub cluster_id: Option, + + /// The ID of the controller broker. + #[fluvio_kf(min_version = 1, ignorable)] + pub controller_id: i32, + + /// Each topic in the response. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct MetadataResponseBroker { + /// The broker ID. + pub node_id: i32, + + /// The broker hostname. + pub host: String, + + /// The broker port. + pub port: i32, + + /// The rack of the broker, or null if it has not been assigned to a rack. + #[fluvio_kf(min_version = 1, ignorable)] + pub rack: Option, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct MetadataResponseTopic { + /// The topic error, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The topic name. + pub name: String, + + /// True if the topic is internal. + #[fluvio_kf(min_version = 1, ignorable)] + pub is_internal: bool, + + /// Each partition in the topic. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct MetadataResponsePartition { + /// The partition error, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The partition index. + pub partition_index: i32, + + /// The ID of the leader broker. + pub leader_id: i32, + + /// The leader epoch of this partition. + #[fluvio_kf(min_version = 7, ignorable)] + pub leader_epoch: i32, + + /// The set of all nodes that host this partition. + pub replica_nodes: Vec, + + /// The set of nodes that are in sync with the leader for this partition. + pub isr_nodes: Vec, + + /// The set of offline replicas of this partition. + #[fluvio_kf(min_version = 5, ignorable)] + pub offline_replicas: Vec, +} + +// ----------------------------------- +// Implementation - KfMetadataRequest +// ----------------------------------- + +impl Request for KfMetadataRequest { + const API_KEY: u16 = 3; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 7; + const DEFAULT_API_VERSION: i16 = 7; + + type Response = KfMetadataResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/mod.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/mod.rs new file mode 100644 index 0000000000..fe70f69376 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/mod.rs @@ -0,0 +1,53 @@ +pub mod api_versions; + +pub mod fetch; +pub mod produce; + +pub mod find_coordinator; +pub mod sync_group; +pub mod join_group; +pub mod list_groups; +pub mod heartbeat; +pub mod describe_groups; +pub mod leave_group; +pub mod delete_groups; + +pub mod create_topics; +pub mod delete_topics; + +pub mod metadata; +pub mod update_metadata; + +pub mod list_offset; +pub mod offset_fetch; + +pub mod leader_and_isr; + +/* +pub mod add_offsets_to_txn; +pub mod add_partitions_to_txn; +pub mod alter_configs; +pub mod alter_replica_log_dirs; +pub mod controlled_shutdown; +pub mod create_acls; +pub mod create_delegation_token; +pub mod create_partitions; +pub mod delete_acls; +pub mod delete_records; +pub mod describe_acls; +pub mod describe_configs; +pub mod describe_delegation_token; +pub mod describe_log_dirs; +pub mod elect_preferred_leaders; +pub mod end_txn; +pub mod expire_delegation_token; +pub mod init_producer_id; +pub mod offset_commit; +pub mod offset_for_leader_epoch; +pub mod renew_delegation_token; +pub mod sasl_authenticate; +pub mod sasl_handshake; +pub mod stop_replica; +pub mod txn_offset_commit; +pub mod write_txn_markers; +*/ diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_commit.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_commit.rs new file mode 100644 index 0000000000..7c556cec9e --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_commit.rs @@ -0,0 +1,112 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfOffsetCommitRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetCommitRequest { + /// The unique group identifier. + pub group_id: String, + + /// The generation of the group. + #[fluvio_kf(min_version = 1, ignorable)] + pub generation_id: i32, + + /// The member ID assigned by the group coordinator. + #[fluvio_kf(min_version = 1, ignorable)] + pub member_id: String, + + /// The time period in ms to retain the offset. + #[fluvio_kf(min_version = 2, max_version = 4, ignorable)] + pub retention_time_ms: i64, + + /// The topics to commit offsets for. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetCommitRequestTopic { + /// The topic name. + pub name: String, + + /// Each partition to commit offsets for. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetCommitRequestPartition { + /// The partition index. + pub partition_index: i32, + + /// The message offset to be committed. + pub committed_offset: i64, + + /// The leader epoch of this partition. + #[fluvio_kf(min_version = 6, ignorable)] + pub committed_leader_epoch: i32, + + /// The timestamp of the commit. + #[fluvio_kf(min_version = 1, max_version = 1)] + pub commit_timestamp: i64, + + /// Any associated metadata the client wants to keep. + pub committed_metadata: Option, +} + +// ----------------------------------- +// KfOffsetCommitResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetCommitResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 3, ignorable)] + pub throttle_time_ms: i32, + + /// The responses for each topic. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetCommitResponseTopic { + /// The topic name. + pub name: String, + + /// The responses for each partition in the topic. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetCommitResponsePartition { + /// The partition index. + pub partition_index: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfOffsetCommitRequest +// ----------------------------------- + +impl Request for KfOffsetCommitRequest { + const API_KEY: u16 = 8; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 6; + const DEFAULT_API_VERSION: i16 = 6; + + type Response = KfOffsetCommitResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_fetch.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_fetch.rs new file mode 100644 index 0000000000..9d7b17b97c --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_fetch.rs @@ -0,0 +1,93 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfOffsetFetchRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetFetchRequest { + /// The group to fetch offsets for. + pub group_id: String, + + /// Each topic we would like to fetch offsets for, or null to fetch offsets for all topics. + pub topics: Option>, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetFetchRequestTopic { + pub name: String, + + /// The partition indexes we would like to fetch offsets for. + pub partition_indexes: Vec, +} + +// ----------------------------------- +// KfOffsetFetchResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetFetchResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 3, ignorable)] + pub throttle_time_ms: i32, + + /// The responses per topic. + pub topics: Vec, + + /// The top-level error code, or 0 if there was no error. + #[fluvio_kf(min_version = 2)] + pub error_code: ErrorCode, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetFetchResponseTopic { + /// The topic name. + pub name: String, + + /// The responses per partition + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetFetchResponsePartition { + /// The partition index. + pub partition_index: i32, + + /// The committed message offset. + pub committed_offset: i64, + + /// The leader epoch. + #[fluvio_kf(min_version = 5)] + pub committed_leader_epoch: i32, + + /// The partition metadata. + pub metadata: Option, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfOffsetFetchRequest +// ----------------------------------- + +impl Request for KfOffsetFetchRequest { + const API_KEY: u16 = 9; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 5; + const DEFAULT_API_VERSION: i16 = 5; + + type Response = KfOffsetFetchResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_for_leader_epoch.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_for_leader_epoch.rs new file mode 100644 index 0000000000..4344ed269a --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/offset_for_leader_epoch.rs @@ -0,0 +1,100 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfOffsetForLeaderEpochRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetForLeaderEpochRequest { + /// Each topic to get offsets for. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetForLeaderTopic { + /// The topic name. + pub name: String, + + /// Each partition to get offsets for. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetForLeaderPartition { + /// The partition index. + pub partition_index: i32, + + /// An epoch used to fence consumers/replicas with old metadata. If the epoch provided by the + /// client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH + /// error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH + /// error code will be returned. + #[fluvio_kf(min_version = 2, ignorable)] + pub current_leader_epoch: i32, + + /// The epoch to look up an offset for. + pub leader_epoch: i32, +} + +// ----------------------------------- +// KfOffsetForLeaderEpochResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfOffsetForLeaderEpochResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 2, ignorable)] + pub throttle_time_ms: i32, + + /// Each topic we fetched offsets for. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetForLeaderTopicResult { + /// The topic name. + pub name: String, + + /// Each partition in the topic we fetched offsets for. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct OffsetForLeaderPartitionResult { + /// The error code 0, or if there was no error. + pub error_code: ErrorCode, + + /// The partition index. + pub partition_index: i32, + + /// The leader epoch of the partition. + #[fluvio_kf(min_version = 1, ignorable)] + pub leader_epoch: i32, + + /// The end offset of the epoch. + pub end_offset: i64, +} + +// ----------------------------------- +// Implementation - KfOffsetForLeaderEpochRequest +// ----------------------------------- + +impl Request for KfOffsetForLeaderEpochRequest { + const API_KEY: u16 = 23; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfOffsetForLeaderEpochResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/produce.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/produce.rs new file mode 100644 index 0000000000..0ce69c35d4 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/produce.rs @@ -0,0 +1,132 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use std::fmt::Debug; +use std::marker::PhantomData; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfProduceRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfProduceRequest +where + R: Encoder + Decoder + Default + Debug, +{ + /// The transactional ID, or null if the producer is not transactional. + #[fluvio_kf(min_version = 3)] + pub transactional_id: Option, + + /// The number of acknowledgments the producer requires the leader to have received before + /// considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the + /// leader and -1 for the full ISR. + pub acks: i16, + + /// The timeout to await a response in miliseconds. + pub timeout_ms: i32, + + /// Each topic to produce to. + pub topics: Vec>, + pub data: PhantomData, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TopicProduceData +where + R: Encoder + Decoder + Default + Debug, +{ + /// The topic name. + pub name: String, + + /// Each partition to produce to. + pub partitions: Vec>, + pub data: PhantomData, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct PartitionProduceData +where + R: Encoder + Decoder + Default + Debug, +{ + /// The partition index. + pub partition_index: i32, + + /// The record data to be produced. + pub records: R, +} + +// ----------------------------------- +// KfProduceResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfProduceResponse { + /// Each produce response + pub responses: Vec, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TopicProduceResponse { + /// The topic name + pub name: String, + + /// Each partition that we produced to within the topic. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct PartitionProduceResponse { + /// The partition index. + pub partition_index: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The base offset. + pub base_offset: i64, + + /// The timestamp returned by broker after appending the messages. If CreateTime is used for the + /// topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will + /// be the broker local time when the messages are appended. + #[fluvio_kf(min_version = 2, ignorable)] + pub log_append_time_ms: i64, + + /// The log start offset. + #[fluvio_kf(min_version = 5, ignorable)] + pub log_start_offset: i64, +} + +// ----------------------------------- +// Implementation - KfProduceRequest +// ----------------------------------- + +impl Request for KfProduceRequest +where + R: Debug + Decoder + Encoder, +{ + const API_KEY: u16 = 0; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 7; + const DEFAULT_API_VERSION: i16 = 7; + + type Response = KfProduceResponse; + +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/renew_delegation_token.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/renew_delegation_token.rs new file mode 100644 index 0000000000..2cf5d8c013 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/renew_delegation_token.rs @@ -0,0 +1,55 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfRenewDelegationTokenRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfRenewDelegationTokenRequest { + /// The HMAC of the delegation token to be renewed. + pub hmac: Vec, + + /// The renewal time period in milliseconds. + pub renew_period_ms: i64, +} + +// ----------------------------------- +// KfRenewDelegationTokenResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfRenewDelegationTokenResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The timestamp in milliseconds at which this token expires. + pub expiry_timestamp_ms: i64, + + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, +} + +// ----------------------------------- +// Implementation - KfRenewDelegationTokenRequest +// ----------------------------------- + +impl Request for KfRenewDelegationTokenRequest { + const API_KEY: u16 = 39; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfRenewDelegationTokenResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_authenticate.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_authenticate.rs new file mode 100644 index 0000000000..afae905deb --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_authenticate.rs @@ -0,0 +1,55 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfSaslAuthenticateRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSaslAuthenticateRequest { + /// The SASL authentication bytes from the client, as defined by the SASL mechanism. + pub auth_bytes: Vec, +} + +// ----------------------------------- +// KfSaslAuthenticateResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSaslAuthenticateResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The error message, or null if there was no error. + pub error_message: Option, + + /// The SASL authentication bytes from the server, as defined by the SASL mechanism. + pub auth_bytes: Vec, + + /// The SASL authentication bytes from the server, as defined by the SASL mechanism. + #[fluvio_kf(min_version = 1)] + pub session_lifetime_ms: i64, +} + +// ----------------------------------- +// Implementation - KfSaslAuthenticateRequest +// ----------------------------------- + +impl Request for KfSaslAuthenticateRequest { + const API_KEY: u16 = 36; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfSaslAuthenticateResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_handshake.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_handshake.rs new file mode 100644 index 0000000000..82f20e70ab --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/sasl_handshake.rs @@ -0,0 +1,48 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfSaslHandshakeRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSaslHandshakeRequest { + /// The SASL mechanism chosen by the client. + pub mechanism: String, +} + +// ----------------------------------- +// KfSaslHandshakeResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSaslHandshakeResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The mechanisms enabled in the server. + pub mechanisms: Vec, +} + +// ----------------------------------- +// Implementation - KfSaslHandshakeRequest +// ----------------------------------- + +impl Request for KfSaslHandshakeRequest { + const API_KEY: u16 = 17; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfSaslHandshakeResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/stop_replica.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/stop_replica.rs new file mode 100644 index 0000000000..54f579efb4 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/stop_replica.rs @@ -0,0 +1,85 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfStopReplicaRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfStopReplicaRequest { + /// The controller id. + pub controller_id: i32, + + /// The controller epoch. + pub controller_epoch: i32, + + /// The broker epoch. + #[fluvio_kf(min_version = 1, ignorable)] + pub broker_epoch: i64, + + /// Whether these partitions should be deleted. + pub delete_partitions: bool, + + /// The topics to stop. + #[fluvio_kf(min_version = 1)] + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct StopReplicaRequestTopic { + /// The topic name. + #[fluvio_kf(min_version = 1)] + pub name: String, + + /// The partition indexes. + #[fluvio_kf(min_version = 1)] + pub partition_indexes: Vec, +} + +// ----------------------------------- +// KfStopReplicaResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfStopReplicaResponse { + /// The top-level error code, or 0 if there was no top-level error. + pub error_code: ErrorCode, + + /// The responses for each partition. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct StopReplicaResponsePartition { + /// The topic name. + pub topic_name: String, + + /// The partition index. + pub partition_index: i32, + + /// The partition error code, or 0 if there was no partition error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfStopReplicaRequest +// ----------------------------------- + +impl Request for KfStopReplicaRequest { + const API_KEY: u16 = 5; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 1; + const DEFAULT_API_VERSION: i16 = 1; + + type Response = KfStopReplicaResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/sync_group.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/sync_group.rs new file mode 100644 index 0000000000..5173b56f9e --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/sync_group.rs @@ -0,0 +1,72 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::GroupAssignment; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfSyncGroupRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSyncGroupRequest { + /// The unique group identifier. + pub group_id: String, + + /// The generation of the group. + pub generation_id: i32, + + /// The member ID assigned by the group. + pub member_id: String, + + /// Each assignment. + pub assignments: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct SyncGroupRequestAssignment { + /// The ID of the member to assign. + pub member_id: String, + + /// The member assignment. + pub assignment: GroupAssignment, +} + +// ----------------------------------- +// KfSyncGroupResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfSyncGroupResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + #[fluvio_kf(min_version = 1, ignorable)] + pub throttle_time_ms: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, + + /// The member assignment. + pub assignment: GroupAssignment, +} + +// ----------------------------------- +// Implementation - KfSyncGroupRequest +// ----------------------------------- + +impl Request for KfSyncGroupRequest { + const API_KEY: u16 = 14; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfSyncGroupResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/txn_offset_commit.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/txn_offset_commit.rs new file mode 100644 index 0000000000..fd13523e6c --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/txn_offset_commit.rs @@ -0,0 +1,104 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfTxnOffsetCommitRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfTxnOffsetCommitRequest { + /// The ID of the transaction. + pub transactional_id: String, + + /// The ID of the group. + pub group_id: String, + + /// The current producer ID in use by the transactional ID. + pub producer_id: i64, + + /// The current epoch associated with the producer ID. + pub producer_epoch: i16, + + /// Each topic that we want to committ offsets for. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TxnOffsetCommitRequestTopic { + /// The topic name. + pub name: String, + + /// The partitions inside the topic that we want to committ offsets for. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TxnOffsetCommitRequestPartition { + /// The index of the partition within the topic. + pub partition_index: i32, + + /// The message offset to be committed. + pub committed_offset: i64, + + /// The leader epoch of the last consumed record. + #[fluvio_kf(min_version = 2, ignorable)] + pub committed_leader_epoch: i32, + + /// Any associated metadata the client wants to keep. + pub committed_metadata: Option, +} + +// ----------------------------------- +// KfTxnOffsetCommitResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfTxnOffsetCommitResponse { + /// The duration in milliseconds for which the request was throttled due to a quota violation, + /// or zero if the request did not violate any quota. + pub throttle_time_ms: i32, + + /// The responses for each topic. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TxnOffsetCommitResponseTopic { + /// The topic name. + pub name: String, + + /// The responses for each partition in the topic. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct TxnOffsetCommitResponsePartition { + /// The partitition index. + pub partition_index: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfTxnOffsetCommitRequest +// ----------------------------------- + +impl Request for KfTxnOffsetCommitRequest { + const API_KEY: u16 = 28; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 2; + const DEFAULT_API_VERSION: i16 = 2; + + type Response = KfTxnOffsetCommitResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/update_metadata.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/update_metadata.rs new file mode 100644 index 0000000000..b029bc888c --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/update_metadata.rs @@ -0,0 +1,178 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfUpdateMetadataRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfUpdateMetadataRequest { + /// The controller id. + pub controller_id: i32, + + /// The controller epoch. + pub controller_epoch: i32, + + /// The broker epoch. + #[fluvio_kf(min_version = 5, ignorable)] + pub broker_epoch: i64, + + /// Each topic that we would like to update. + #[fluvio_kf(min_version = 5)] + pub topic_states: Vec, + + /// Each partition that we would like to update. + #[fluvio_kf(max_version = 4)] + pub partition_states_v0: Vec, + + pub brokers: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct UpdateMetadataRequestTopicState { + /// The topic name. + pub topic_name: String, + + /// The partition that we would like to update. + #[fluvio_kf(min_version = 5)] + pub partition_states: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct UpdateMetadataRequestPartitionStateV0 { + /// The topic name. + #[fluvio_kf(max_version = 4)] + pub topic_name: String, + + /// The partition index. + #[fluvio_kf(max_version = 4)] + pub partition_index: i32, + + /// The controller epoch. + #[fluvio_kf(max_version = 4)] + pub controller_epoch: i32, + + /// The ID of the broker which is the current partition leader. + #[fluvio_kf(max_version = 4)] + pub leader: i32, + + /// The leader epoch of this partition. + #[fluvio_kf(max_version = 4)] + pub leader_epoch: i32, + + /// The brokers which are in the ISR for this partition. + #[fluvio_kf(max_version = 4)] + pub isr: Vec, + + /// The Zookeeper version. + #[fluvio_kf(max_version = 4)] + pub zk_version: i32, + + /// All the replicas of this partition. + #[fluvio_kf(max_version = 4)] + pub replicas: Vec, + + /// The replicas of this partition which are offline. + #[fluvio_kf(min_version = 4, max_version = 4)] + pub offline_replicas: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct UpdateMetadataRequestBroker { + pub id: i32, + + /// The broker endpoints. + #[fluvio_kf(min_version = 1)] + pub endpoints: Vec, + + /// The rack which this broker belongs to. + #[fluvio_kf(min_version = 2, ignorable)] + pub rack: Option, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct UpdateMetadataRequestEndpoint { + /// The port of this endpoint + #[fluvio_kf(min_version = 1)] + pub port: i32, + + /// The hostname of this endpoint + #[fluvio_kf(min_version = 1)] + pub host: String, + + /// The listener name. + #[fluvio_kf(min_version = 3)] + pub listener: String, + + /// The security protocol type. + #[fluvio_kf(min_version = 1)] + pub security_protocol: i16, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct UpdateMetadataPartitionState { + /// The partition index. + #[fluvio_kf(min_version = 5)] + pub partition_index: i32, + + /// The controller epoch. + #[fluvio_kf(min_version = 5)] + pub controller_epoch: i32, + + /// The ID of the broker which is the current partition leader. + #[fluvio_kf(min_version = 5)] + pub leader: i32, + + /// The leader epoch of this partition. + #[fluvio_kf(min_version = 5)] + pub leader_epoch: i32, + + /// The brokers which are in the ISR for this partition. + #[fluvio_kf(min_version = 5)] + pub isr: Vec, + + /// The Zookeeper version. + #[fluvio_kf(min_version = 5)] + pub zk_version: i32, + + /// All the replicas of this partition. + #[fluvio_kf(min_version = 5)] + pub replicas: Vec, + + /// The replicas of this partition which are offline. + #[fluvio_kf(min_version = 5)] + pub offline_replicas: Vec, +} + +// ----------------------------------- +// KfUpdateMetadataResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfUpdateMetadataResponse { + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfUpdateMetadataRequest +// ----------------------------------- + +impl Request for KfUpdateMetadataRequest { + const API_KEY: u16 = 6; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 5; + const DEFAULT_API_VERSION: i16 = 5; + + type Response = KfUpdateMetadataResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/kf_code_gen/write_txn_markers.rs b/kf-protocol/kf-protocol-message/src/kf_code_gen/write_txn_markers.rs new file mode 100644 index 0000000000..428d7925f2 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/kf_code_gen/write_txn_markers.rs @@ -0,0 +1,99 @@ +/// WARNING: CODE GENERATED FILE +/// * This file is generated by kfspec2code. +/// * Any changes applied to this file will be lost when a new spec is generated. +use serde::{Deserialize, Serialize}; + +use kf_protocol_api::ErrorCode; +use kf_protocol_api::Request; + +use kf_protocol_derive::Decode; +use kf_protocol_derive::Encode; +use kf_protocol_derive::KfDefault; + +// ----------------------------------- +// KfWriteTxnMarkersRequest +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfWriteTxnMarkersRequest { + /// The transaction markers to be written. + pub markers: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct WritableTxnMarker { + /// The current producer ID. + pub producer_id: i64, + + /// The current epoch associated with the producer ID. + pub producer_epoch: i16, + + /// The result of the transaction to write to the partitions (false = ABORT, true = COMMIT). + pub transaction_result: bool, + + /// Each topic that we want to write transaction marker(s) for. + pub topics: Vec, + + /// Epoch associated with the transaction state partition hosted by this transaction coordinator + pub coordinator_epoch: i32, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct WritableTxnMarkerTopic { + /// The topic name. + pub name: String, + + /// The indexes of the partitions to write transaction markers for. + pub partition_indexes: Vec, +} + +// ----------------------------------- +// KfWriteTxnMarkersResponse +// ----------------------------------- + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct KfWriteTxnMarkersResponse { + /// The results for writing makers. + pub markers: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct WritableTxnMarkerResult { + /// The current producer ID in use by the transactional ID. + pub producer_id: i64, + + /// The results by topic. + pub topics: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct WritableTxnMarkerTopicResult { + /// The topic name. + pub name: String, + + /// The results by partition. + pub partitions: Vec, +} + +#[derive(Encode, Decode, Serialize, Deserialize, KfDefault, Debug)] +pub struct WritableTxnMarkerPartitionResult { + /// The partition index. + pub partition_index: i32, + + /// The error code, or 0 if there was no error. + pub error_code: ErrorCode, +} + +// ----------------------------------- +// Implementation - KfWriteTxnMarkersRequest +// ----------------------------------- + +impl Request for KfWriteTxnMarkersRequest { + const API_KEY: u16 = 27; + + const MIN_API_VERSION: i16 = 0; + const MAX_API_VERSION: i16 = 0; + const DEFAULT_API_VERSION: i16 = 0; + + type Response = KfWriteTxnMarkersResponse; +} diff --git a/kf-protocol/kf-protocol-message/src/lib.rs b/kf-protocol/kf-protocol-message/src/lib.rs new file mode 100644 index 0000000000..f48cdc9bcd --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/lib.rs @@ -0,0 +1,53 @@ +mod kf_code_gen; + +pub mod fetch_handler; +pub mod produce_handler; + +pub type KfApiVersions = Vec; + +pub mod api_versions { + pub use crate::kf_code_gen::api_versions::*; +} + +pub mod topic { + pub use crate::kf_code_gen::create_topics::*; + pub use crate::kf_code_gen::delete_topics::*; +} + +pub mod metadata { + pub use crate::kf_code_gen::metadata::*; + pub use crate::kf_code_gen::update_metadata::*; +} + +pub mod produce { + pub use crate::kf_code_gen::produce::*; + pub use crate::produce_handler::DefaultKfProduceRequest; + pub use crate::produce_handler::DefaultKfTopicRequest; + pub use crate::produce_handler::DefaultKfPartitionRequest; +} + +pub mod fetch { + pub use crate::kf_code_gen::fetch::*; + pub use crate::fetch_handler::DefaultKfFetchResponse; + pub use crate::fetch_handler::DefaultKfFetchRequest; +} + +pub mod group { + pub use crate::kf_code_gen::find_coordinator::*; + pub use crate::kf_code_gen::join_group::*; + pub use crate::kf_code_gen::sync_group::*; + pub use crate::kf_code_gen::leave_group::*; + pub use crate::kf_code_gen::delete_groups::*; + pub use crate::kf_code_gen::list_groups::*; + pub use crate::kf_code_gen::describe_groups::*; + pub use crate::kf_code_gen::heartbeat::*; +} + +pub mod offset { + pub use crate::kf_code_gen::list_offset::*; + pub use crate::kf_code_gen::offset_fetch::*; +} + +pub mod isr { + pub use crate::kf_code_gen::leader_and_isr::*; +} diff --git a/kf-protocol/kf-protocol-message/src/produce_handler.rs b/kf-protocol/kf-protocol-message/src/produce_handler.rs new file mode 100644 index 0000000000..fcbaae4849 --- /dev/null +++ b/kf-protocol/kf-protocol-message/src/produce_handler.rs @@ -0,0 +1,47 @@ +use std::fmt::Debug; + +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use kf_protocol_api::DefaultRecords; + +use crate::produce::{KfProduceResponse, KfProduceRequest}; +use crate::produce::TopicProduceData; +use crate::produce::{PartitionProduceData, PartitionProduceResponse}; + +pub type DefaultKfProduceRequest = KfProduceRequest; +pub type DefaultKfTopicRequest = TopicProduceData; +pub type DefaultKfPartitionRequest = PartitionProduceData; + +// ----------------------------------- +// Implementation - KfProduceRequest +// ----------------------------------- + +impl KfProduceRequest where R: Encoder + Decoder + Debug { + + /// Find partition in request + pub fn find_partition_request(&self, topic: &str, partition: i32) -> Option<&PartitionProduceData> { + if let Some(request) = self.topics.iter().find(|request| request.name == topic) { + request.partitions.iter().find( |part_request| part_request.partition_index == partition) + } else { + None + } + } +} + +// ----------------------------------- +// Implementation - KfProduceResponse +// ----------------------------------- + +impl KfProduceResponse { + + /// Find partition in Response + pub fn find_partition_response(&self, topic: &str, partition: i32) -> Option<&PartitionProduceResponse> { + + if let Some(response) = self.responses.iter().find(|response| response.name == topic) { + response.partitions.iter().find( |part_response| part_response.partition_index == partition) + } else { + None + } + } +} diff --git a/kf-protocol/kf-protocol-serde/Cargo.toml b/kf-protocol/kf-protocol-serde/Cargo.toml new file mode 100644 index 0000000000..1087cb6cc9 --- /dev/null +++ b/kf-protocol/kf-protocol-serde/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "kf-protocol" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[[bin]] +name = "kafka-dump" +path = "src/bin/kafka-dump.rs" +required-features = ["cli"] + +[features] +#cli = ["clap"] +#serde_parser = ["serde"] + +[dependencies] +log = "0.4.6" +kf-protocol-core = { path = "kf-protocol-core" } + +#clap = { version = "2.32.0", optional = true } diff --git a/kf-protocol/kf-protocol-serde/src/de.rs b/kf-protocol/kf-protocol-serde/src/de.rs new file mode 100644 index 0000000000..ae1a771578 --- /dev/null +++ b/kf-protocol/kf-protocol-serde/src/de.rs @@ -0,0 +1,521 @@ +use std::io::Read; +use serde::Deserialize; +use serde::Deserializer; +use serde::de::Visitor; +use bytes::Buf; +use log::trace; + +use super::Error; +use super::ErrorKind; + +pub struct KafkaDeserializer { + buf: B +} + +impl KafkaDeserializer where B:Buf { + + pub fn from_buf(buf: B) -> Self { + KafkaDeserializer { + buf + } + } + + +} + +#[allow(dead_code)] +pub fn from_buf<'a,T,B>(src: B) -> Result + where T: Deserialize<'a>, + B: Buf +{ + + let mut k_der = KafkaDeserializer::from_buf(src); + T::deserialize(&mut k_der) + +} + +impl <'de,'a,B>Deserializer<'de> for &'a mut KafkaDeserializer + where B:Buf +{ + type Error = Error; + + fn deserialize_any(self, _visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_any"); + Err(ErrorKind::DeserializeAnyNotSupported.into()) + } + + fn deserialize_bool(self, visitor: V) -> Result + where V: serde::de::Visitor<'de> + { + trace!("deserialize_bool"); + if self.buf.remaining() < 1 { + return Err(Box::new(ErrorKind::NotEnoughBytes)) + } + + let value = self.buf.get_u8(); + + match value { + 0 => visitor.visit_bool(false), + 1 => visitor.visit_bool(true), + _ => Err(ErrorKind::InvalidBoolEncoding(value).into()) + + } + + } + + fn deserialize_i8(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_i8"); + if self.buf.remaining() < 1 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + let value = self.buf.get_i8(); + visitor.visit_i8(value) + } + + fn deserialize_u8(self, visitor: V) -> Result + where V: serde::de::Visitor<'de>, + { + trace!("deserialize_u8"); + if self.buf.remaining() < 1 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + let value = self.buf.get_u8(); + + visitor.visit_u8(value) + } + + + fn deserialize_u16(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_u16"); + if self.buf.remaining() < 2 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_u16(self.buf.get_u16_be()) + + } + + + fn deserialize_i16(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_i16"); + + if self.buf.remaining() < 2 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_i16(self.buf.get_i16_be()) + } + + + + fn deserialize_u32(self, visitor: V) -> Result + where V: Visitor<'de>, + { + + trace!("deserialize_u32"); + + if self.buf.remaining() < 4 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_u32(self.buf.get_u32_be()) + } + + fn deserialize_i32(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_i32"); + + if self.buf.remaining() < 4 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_i32(self.buf.get_i32_be()) + } + + fn deserialize_u64(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_u64"); + + if self.buf.remaining() < 8 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_u64(self.buf.get_u64_be()) + } + + fn deserialize_i64(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_i64"); + + if self.buf.remaining() < 8 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + visitor.visit_i64(self.buf.get_i64_be()) + } + + fn deserialize_f32(self, _visitor: V) -> Result + where V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("f32".to_owned()).into()) + } + + fn deserialize_f64(self, _visitor: V) -> Result + where V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("f64".to_owned()).into()) + } + + fn deserialize_unit(self, _visitor: V) -> Result + where V: serde::de::Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("Unit".to_owned()).into()) + } + + + fn deserialize_char(self, _visitor: V) -> Result + where V: serde::de::Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("char".to_owned()).into()) + } + + fn deserialize_str(self, _visitor: V) -> Result + where V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("str slice".to_owned()).into()) + } + + fn deserialize_string(self, visitor: V) -> Result + where V: serde::de::Visitor<'de> + { + if self.buf.remaining() < 2 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + let len = self.buf.get_u16_be() as usize; + if len <= 0 { + return visitor.visit_string("".into()); + } + + let mut out_string = "".to_owned(); + let read_size= self.buf.by_ref().take(len).reader().read_to_string(&mut out_string)?; + + if read_size != len { + return Err(ErrorKind::Custom(format!("not enough string, desired: {} but read: {}",len, read_size)).into()); + } + + visitor.visit_string(out_string) + } + + fn deserialize_bytes(self, _visitor: V) -> Result + where V: serde::de::Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("bytes".to_owned()).into()) + } + + fn deserialize_byte_buf(self, _visitor: V) -> Result + where V: serde::de::Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("bytes buf".to_owned()).into()) + } + + fn deserialize_option(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserializing option: remaining: {}",self.buf.remaining()); + if self.buf.remaining() < 2 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + let mut buf = self.buf.by_ref().take(2); + let len = buf.get_i16_be() as usize; + + trace!("check for length: remaining: {}",self.buf.remaining()); + if len == 0 { + visitor.visit_none() + } else { + visitor.visit_some(&mut *self) + } + } + + fn deserialize_unit_struct( self, _name: &'static str,_visitor: V) -> Result + where V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("unit struct".to_owned()).into()) + } + + fn deserialize_newtype_struct(self, _name: &'static str, _visitor: V) -> Result + where V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("new type".to_owned()).into()) + } + + fn deserialize_seq(self, visitor: V) -> Result + where V: Visitor<'de>, + { + trace!("deserialize_sequence"); + + if self.buf.remaining() < 4 { + return Err(ErrorKind::NotEnoughBytes.into()) + } + + let len = self.buf.get_i32_be(); + self.deserialize_tuple(len as usize, visitor) + } + + fn deserialize_tuple(self, len: usize, visitor: V) -> Result + where V: Visitor<'de>, + { + struct Access<'a, B> { + deserializer: &'a mut KafkaDeserializer, + len: usize, + } + + impl<'de,'a,B:Buf> serde::de::SeqAccess<'de> for Access<'a,B> + { + type Error = Error; + + fn next_element_seed(&mut self, seed: T) -> Result,Error> + where + T: serde::de::DeserializeSeed<'de>, + { + if self.len > 0 { + self.len -= 1; + let value = (serde::de::DeserializeSeed::deserialize( + seed, + &mut *self.deserializer, + ))?; + Ok(Some(value)) + } else { + Ok(None) + } + } + + fn size_hint(&self) -> Option { + Some(self.len) + } + } + + visitor.visit_seq(Access { + deserializer: self, + len: len, + }) + + } + + fn deserialize_tuple_struct( + self, + _name: &'static str, + _len: usize, + _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("tuple struct".to_owned()).into()) + } + + fn deserialize_map(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + trace!("deserialize map"); + Err(ErrorKind::NotSupportedFormat("map".to_owned()).into() + ) + } + + fn deserialize_struct( + self, + _name: &'static str, + fields: &'static [&'static str], + visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + self.deserialize_tuple(fields.len(), visitor) + } + + + fn deserialize_enum( + self, + _name: &'static str, + _variants: &'static [&'static str], + _visitor: V, + ) -> Result + where + V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("enum".to_owned()).into()) + } + + fn deserialize_identifier(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("identifier".to_owned()).into()) + } + + fn deserialize_ignored_any(self, _visitor: V) -> Result + where + V: Visitor<'de>, + { + Err(ErrorKind::NotSupportedFormat("ignored".to_owned()).into()) + } + +} + + +#[cfg(test)] +mod test { + + use std::io::Cursor; + use serde::Deserialize; + use pretty_env_logger; + + use super::from_buf; + use super::Error; + + + fn init_logger() { + let _ = pretty_env_logger::try_init(); + } + + + #[derive(Deserialize,Debug,Default)] + struct Dummy1 { + value: u8, + off: bool + } + + + + #[test] + fn test_de_u8() -> Result<(),Error> { + let data = [0x05,0x01]; + + let buf = &mut Cursor::new(&data); + let dummy: Dummy1 = from_buf(buf)?; + assert_eq!(dummy.value,5); + assert_eq!(dummy.off,true); + Ok(()) + } + + #[derive(Deserialize,Debug,Default)] + struct Dummy2 { + value: i32 + } + + #[test] + fn test_serde_decode_i32() -> Result<(),Error> { + init_logger(); + + let data = [0x00, 0x00, 0x00, 0x10]; + + let buf = &mut Cursor::new(&data); + let dummy: Dummy2 = from_buf(buf)?; + assert_eq!(dummy.value,16); + Ok(()) + } + + #[derive(Deserialize,Debug,Default)] + struct Dummy3 { + value: u32 + } + + #[test] + fn test_serde_decode_u32() -> Result<(),Error> { + init_logger(); + + let data = [0x00, 0x00, 0x00, 0x10]; + + let buf = &mut Cursor::new(&data); + let dummy: Dummy3 = from_buf(buf)?; + assert_eq!(dummy.value,16); + Ok(()) + } + + #[derive(Deserialize,Debug,Default)] + struct DummyString { + value: String + } + + #[test] + fn test_serde_decode_string() -> Result<(),Error> { + + let data = [ + 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x2d, 0x31, + ]; + + let buf = &mut Cursor::new(&data); + let dummy: DummyString = from_buf(buf)?; + assert_eq!(dummy.value,"consumer-1"); + Ok(()) + } + + #[derive(Deserialize,Debug,Default)] + struct DummySequence { + value: Vec + } + + #[test] + fn test_serde_decode_seq_u32() -> Result<(),Error> { + + let data = [ + 0x00, 0x00, 0x00, 0x3, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03 + ]; + + let buf = &mut Cursor::new(&data); + let dummy: DummySequence = from_buf(buf)?; + assert_eq!(dummy.value.len(),3); + assert_eq!(dummy.value[0],1); + assert_eq!(dummy.value[1],2); + assert_eq!(dummy.value[2],3); + Ok(()) + } + + #[derive(Deserialize,Debug,Default)] + struct DummyOptionString { + value: Option + } + + #[test] + fn test_serde_decode_option_string() -> Result<(),Error> { + init_logger(); + + let data_none = [ + 0x00, 0x00 + ]; + + let buf = &mut Cursor::new(&data_none); + let dummy: DummyOptionString = from_buf(buf)?; + assert!(dummy.value.is_none()); + + let data_some = [ + 0x00, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x2d, 0x31, + ]; + + let buf = &mut Cursor::new(&data_some); + let dummy: DummyOptionString = from_buf(buf)?; + assert!(dummy.value.is_some()); + let str_value = dummy.value.unwrap(); + // this will fail, we can't do look ahead with Buf trait + // assert_eq!(str_value,"consumer-1"); + + + Ok(()) + } + +} + diff --git a/kf-protocol/kf-protocol-serde/src/error.rs b/kf-protocol/kf-protocol-serde/src/error.rs new file mode 100644 index 0000000000..ffd744cc9a --- /dev/null +++ b/kf-protocol/kf-protocol-serde/src/error.rs @@ -0,0 +1,81 @@ +use std::fmt::Display; +use std::fmt::Formatter; +use std::fmt; +use std::error::Error as StdError; +use serde::de::Error as DeError; +use std::io::Error as IoError; + +pub type Error = Box; + +#[derive(Debug)] +pub enum ErrorKind { + Io(IoError), + DeserializeAnyNotSupported, + InvalidBoolEncoding(u8), + NotEnoughBytes, + NotSupportedFormat(String), + Custom(String) +} + + +impl StdError for ErrorKind { + fn description(&self) -> &str { + match *self { + ErrorKind::Io(ref err) => StdError::description(err), + ErrorKind::NotEnoughBytes => "not enough bytes", + ErrorKind::DeserializeAnyNotSupported => { + "Kafka doesn't support serde::Deserializer::deserialize_any" + }, + ErrorKind::InvalidBoolEncoding(_) => "invalid u8 while decoding bool", + ErrorKind::Custom(ref msg) => msg , + ErrorKind::NotSupportedFormat(ref msg) => msg, + } + } + + fn cause(&self) -> Option<&std::error::Error> { + match *self { + ErrorKind::Io(ref err) => Some(err), + ErrorKind::DeserializeAnyNotSupported => None, + ErrorKind::InvalidBoolEncoding(_) => None, + ErrorKind::NotEnoughBytes => None, + ErrorKind::Custom(_) => None, + ErrorKind::NotSupportedFormat(_) => None, + } + } +} + + + +impl Display for ErrorKind { + fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { + match *self { + ErrorKind::Io(ref ioerr) => write!(fmt, "io error: {}", ioerr), + ErrorKind::DeserializeAnyNotSupported => write!( + fmt, + "Kafka does not support the serde::Deserializer::deserialize_any method" + ), + ErrorKind::InvalidBoolEncoding(b) => { + write!(fmt, "{}, expected 0 or 1, found {}", self.description(), b) + }, + ErrorKind::NotEnoughBytes => { + write!(fmt, "not enought bytes") + } + ErrorKind::NotSupportedFormat(_) => { + write!(fmt, "{}, not supported", self.description()) + } + ErrorKind::Custom(ref s) => s.fmt(fmt), + } + } +} + +impl From for Error { + fn from(err: IoError) -> Error { + ErrorKind::Io(err).into() + } +} + +impl DeError for Error { + fn custom(desc: T) -> Error { + ErrorKind::Custom(desc.to_string()).into() + } +} diff --git a/kf-protocol/kf-protocol-serde/src/lib.rs b/kf-protocol/kf-protocol-serde/src/lib.rs new file mode 100644 index 0000000000..67565b6382 --- /dev/null +++ b/kf-protocol/kf-protocol-serde/src/lib.rs @@ -0,0 +1,9 @@ +// experimental serde adoptor +// it works with most of kafka data types +// but doesn't work with optional string and varint + +mod de; +mod error; + +pub use self::error::Error; +pub use self::error::ErrorKind; \ No newline at end of file diff --git a/kf-protocol/kf-protocol-transport/Cargo.toml b/kf-protocol/kf-protocol-transport/Cargo.toml new file mode 100644 index 0000000000..2a9a3d2b98 --- /dev/null +++ b/kf-protocol/kf-protocol-transport/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "kf-protocol-transport" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[dependencies] +log = "0.4.6" +bytes = "0.4.12" +tokio-codec = "0.1.1" +kf-protocol = { path = "../kf-protocol-core", package = "kf-protocol-core" } + + +[dev-dependencies] +future-helper = { path = "../../future-helper", features = ["fixture"] } +futures-preview = { version = "0.3.0-alpha.17" } +future-aio = { path = "../../future-aio"} +utils = { path= "../../utils"} \ No newline at end of file diff --git a/kf-protocol/kf-protocol-transport/src/codec.rs b/kf-protocol/kf-protocol-transport/src/codec.rs new file mode 100644 index 0000000000..5bb3368add --- /dev/null +++ b/kf-protocol/kf-protocol-transport/src/codec.rs @@ -0,0 +1,193 @@ +use std::io::Cursor; +use std::io::Error as IoError; + +use bytes::BufMut; +use bytes::Bytes; +use bytes::BytesMut; +use log::trace; +use tokio_codec::Decoder; +use tokio_codec::Encoder; + +use kf_protocol::Decoder as KDecoder; + +#[derive(Debug, Default)] +pub struct KfCodec(()); + +impl Decoder for KfCodec { + type Item = BytesMut; + type Error = IoError; + + fn decode(&mut self, bytes: &mut BytesMut) -> Result, Self::Error> { + let len = bytes.len(); + trace!("Codec decoder: received bytes from buf: {}", len); + if len >= 4 { + let mut src = Cursor::new(&*bytes); + let mut packet_len: i32 = 0; + packet_len.decode(&mut src,0)?; + trace!("Codec decoder content len: {}", packet_len); + if (packet_len + 4) as usize <= len { + trace!( + "Codec decoder: fully decoded packet len+4: {} ", + packet_len + 4 + ); + bytes.advance(4); + Ok(Some(bytes.split_to(packet_len as usize))) + } else { + trace!( + "Codec decoder buffer len: {} is less than packet+4: {}", + len, + packet_len + 4 + ); + Ok(None) + } + } else { + trace!("Codec decoder not enough to decode len: {}", len); + Ok(None) + } + } +} + +impl Encoder for KfCodec { + type Item = Bytes; + type Error = IoError; + + fn encode(&mut self, data: Bytes, buf: &mut BytesMut) -> Result<(), IoError> { + trace!("Codec encoder: writing {} bytes", data.len()); + buf.reserve(data.len()); + buf.put(data); + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use std::io::Cursor; + use std::io::Error; + use std::net::SocketAddr; + use std::time; + + use bytes::BufMut; + use bytes::Bytes; + use bytes::BytesMut; + use futures::future::join; + use futures::sink::SinkExt; + use futures::stream::StreamExt; + + use future_aio::net::AsyncTcpListener; + use future_aio::net::AsyncTcpStream; + use future_aio::net::TcpStreamSplit; + use future_helper::sleep; + use future_helper::test_async; + use kf_protocol::Decoder as KDecoder; + use kf_protocol::Encoder as KEncoder; + use log::debug; + + use super::KfCodec; + + fn to_bytes(bytes: Vec) -> Bytes { + let mut buf = BytesMut::with_capacity(bytes.len()); + buf.put_slice(&bytes); + buf.freeze() + } + + #[test_async] + async fn test_async_tcp() -> Result<(), Error> { + debug!("start running test"); + + let addr = "127.0.0.1:11122".parse::().expect("parse"); + + let server_ft = async { + debug!("server: binding"); + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server: successfully binding. waiting for incoming"); + let mut incoming = listener.incoming(); + while let Some(stream) = incoming.next().await { + debug!("server: got connection from client"); + let tcp_stream = stream?; + let split: TcpStreamSplit = tcp_stream.split(); + let mut sink = split.sink(); + let data: Vec = vec![0x1, 0x02, 0x03, 0x04, 0x5]; + debug!("data len: {}", data.len()); + let mut buf = vec![]; + debug!("server buf len: {}", buf.len()); + data.encode(&mut buf,0)?; + debug!("server buf len: {}", buf.len()); + let out = to_bytes(buf); + debug!("server: client final buf len: {}", out.len()); + assert_eq!(out.len(), 9); // 4(array len)+ 5 bytes + + // need to explicity send out len + let len = out.len() as i32; + let mut len_buf = vec![]; + len.encode(&mut len_buf,0)?; + sink.send(to_bytes(len_buf)).await?; + + sink.send(out).await?; + /* + debug!("server: sending 2nd value to client"); + let data2 = vec![0x20,0x11]; + await!(sink.send(to_bytes(data2)))?; + // sleep for 100 ms to give client time + debug!("wait for 50 ms to give receiver change to process"); + */ + future_helper::sleep(time::Duration::from_millis(50)).await; + debug!("finishing. terminating server"); + return Ok(()) as Result<(), Error>; + } + + Ok(()) as Result<(), Error> + }; + + let client_ft = async { + debug!("client: sleep to give server chance to come up"); + sleep(time::Duration::from_millis(100)).await; + debug!("client: trying to connect"); + let tcp_stream = AsyncTcpStream::connect(&addr).await?; + debug!("client: got connection. waiting"); + let split: TcpStreamSplit = tcp_stream.split(); + let mut stream = split.stream(); + if let Some(value) = stream.next().await { + debug!("client :received first value from server"); + let mut bytes = value?; + let values = bytes.take(); + debug!("client :received bytes of len: {}", values.len()); + assert_eq!(values.len(), 9, "total bytes is 9"); + + let mut cursor = Cursor::new(values); + let mut decoded_values = vec![]; + decoded_values + .decode(&mut cursor,0) + .expect("vector decoding failed"); + assert_eq!(decoded_values.len(), 5); + assert_eq!(decoded_values[0], 1); + assert_eq!(decoded_values[1], 2); + debug!("all test pass"); + } else { + assert!(false, "no first value received"); + } + + debug!("waiting for 2nd value"); + /* + if let Some(value) = await!(stream.next()) { + debug!("client: received 2nd value from server"); + let mut bytes = value?; + let values = bytes.take(); + assert_eq!(values.len(),2); + + } else { + assert!(false,"no second value received"); + } + */ + + debug!("finished client"); + + Ok(()) as Result<(), Error> + }; + + let _rt = join(client_ft,server_ft).await; + + Ok(()) + } + +} diff --git a/kf-protocol/kf-protocol-transport/src/lib.rs b/kf-protocol/kf-protocol-transport/src/lib.rs new file mode 100644 index 0000000000..23591342db --- /dev/null +++ b/kf-protocol/kf-protocol-transport/src/lib.rs @@ -0,0 +1,3 @@ +mod codec; + +pub use self::codec::KfCodec; \ No newline at end of file diff --git a/kf-protocol/rust-toolchain b/kf-protocol/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/kf-protocol/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/kf-protocol/send-b-client.sh b/kf-protocol/send-b-client.sh new file mode 100755 index 0000000000..51ad83cb1c --- /dev/null +++ b/kf-protocol/send-b-client.sh @@ -0,0 +1,5 @@ +# /bin/bash +# send out simple message +msg="$1" +port=${2:-9092} +xxd -r -p $msg - | nc -w 1 localhost $port | xxd -p -c 4 diff --git a/kf-protocol/src/lib.rs b/kf-protocol/src/lib.rs new file mode 100644 index 0000000000..4f7235a7d3 --- /dev/null +++ b/kf-protocol/src/lib.rs @@ -0,0 +1,28 @@ + + +pub mod derive { + pub use kf_protocol_derive::*; +} + +pub use kf_protocol_core::Decoder; +pub use kf_protocol_core::Encoder; +pub use kf_protocol_core::DecoderVarInt; +pub use kf_protocol_core::EncoderVarInt; +pub use kf_protocol_core::Version; + +pub mod bytes { + pub use kf_protocol_core::bytes::Buf; + pub use kf_protocol_core::bytes::BufMut; +} + +pub mod api { + pub use kf_protocol_api::*; +} + +pub mod transport { + pub use kf_protocol_transport::KfCodec; +} + +pub mod message { + pub use kf_protocol_message::*; +} \ No newline at end of file diff --git a/kf-protocol/test-client.sh b/kf-protocol/test-client.sh new file mode 100755 index 0000000000..f79cdbfa6a --- /dev/null +++ b/kf-protocol/test-client.sh @@ -0,0 +1,4 @@ +# /bin/bash +# send out simple message +msg="$1" +echo "$msg" | nc -w 1 localhost 9092 \ No newline at end of file diff --git a/kf-protocol/tests/api.rs b/kf-protocol/tests/api.rs new file mode 100644 index 0000000000..e861a45b12 --- /dev/null +++ b/kf-protocol/tests/api.rs @@ -0,0 +1,138 @@ +use std::io::Cursor; + +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::derive::KfDefault; +use kf_protocol::derive::RequestApi; +use kf_protocol::Decoder; +use kf_protocol::Encoder; +use kf_protocol::api::Request; + +#[derive(Encode,Decode,KfDefault,RequestApi,Debug)] +#[fluvio_kf(api_min_version = 5, api_max_version = 6, api_key = 10, response = "TestResponse")] +pub struct TestRequest { + pub value: i8, + + #[fluvio_kf(min_version = 1,max_version = 1)] + pub value2: i8, + + #[fluvio_kf(min_version = 1, default = "-1")] + pub value3: i8 +} + +#[derive(Encode,Decode,KfDefault,Debug)] +pub struct TestResponse { + pub value: i8, + + #[fluvio_kf(min_version = 1,max_version = 1)] + pub value2: i8, + + #[fluvio_kf(min_version = 1)] + pub value3: i8 +} + + + +#[derive(Encode, Decode, KfDefault, Debug)] +pub struct KfMetadataResponse { + + + #[fluvio_kf(min_version = 2)] + pub cluster_id: Option, + +} + + + + +struct RandomStruct { + +} + +impl RandomStruct { + + pub fn _type(&self) -> bool { + true + } +} + + +#[test] +fn test_metadata() { + let d = KfMetadataResponse::default(); + assert_eq!(d.cluster_id,None); +} + +#[test] +fn test_type() { + let r = RandomStruct{}; + assert_eq!(r._type(),true); +} + +#[test] +fn test_derive_api_version() { + + let mut record = TestRequest::default(); + record.value2 = 10; + record.value3 = 5; + + // version 0 should only encode value + let mut dest = vec![]; + record.encode(&mut dest,0).expect("encode"); + assert_eq!(dest.len(),1); + + // version 1 should encode value1,value2,value3 + let mut dest = vec![]; + record.encode(&mut dest,1).expect("encode"); + assert_eq!(dest.len(),3); + + // version 3 should only encode value, value3 + let mut dest = vec![]; + record.encode(&mut dest,2).expect("encode"); + assert_eq!(dest.len(),2); + assert_eq!(dest[1],5); +} + +#[test] +fn test_api_request() { + assert_eq!(TestRequest::API_KEY,10); + assert_eq!(TestRequest::MIN_API_VERSION,5); + assert_eq!(TestRequest::MAX_API_VERSION,6); +} + +#[test] +fn test_api_getter() { + let mut record = TestRequest::default(); + + record.value2 = 10; + assert_eq!(record.value2,10); + + +} + + +#[test] +fn test_decode_version() { + + // version 0 record + let data = [0x08]; + let record = TestRequest::decode_from(&mut Cursor::new(&data),0).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,0); // default + + let data = [0x08]; + assert!(TestRequest::decode_from(&mut Cursor::new(&data),1).is_err(),"version 1 needs 3 bytes"); + + let data = [0x08,0x01,0x05]; + let record = TestRequest::decode_from(&mut Cursor::new(&data),1).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,1); + assert_eq!(record.value3,5); + + let data = [0x08,0x01,0x05]; + let record = TestRequest::decode_from(&mut Cursor::new(&data),3).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,0); + assert_eq!(record.value3,1); // default, didn't consume + +} diff --git a/kf-protocol/tests/btreemap.rs b/kf-protocol/tests/btreemap.rs new file mode 100644 index 0000000000..027144f8b9 --- /dev/null +++ b/kf-protocol/tests/btreemap.rs @@ -0,0 +1,36 @@ + +use std::io::Error; +use std::io::Cursor; +use std::collections::BTreeMap; +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + +#[derive(Encode,Default,Decode,Debug)] +pub struct MapHolder { + values: BTreeMap> +} + + + +#[test] +fn test_encode_treemap() -> Result<(),Error> { + + let mut v1 = MapHolder::default(); + v1.values.insert(1,vec![0,2]); // 4 (key) + 4 (vec len) + 8 = 16 + v1.values.insert(5,vec![1]); // 4 (key) + 4 (vec len) + 4 = 12 + let mut src = vec![]; + v1.encode(&mut src,0)?; + assert_eq!(src.len(),30); + let v2 = MapHolder::decode_from(&mut Cursor::new(src),0)?; + assert_eq!(v2.values.len(),2); + let r1 = v2.values.get(&1).unwrap(); + assert_eq!(r1.len(),2); + assert_eq!(r1[0],0); + assert_eq!(r1[1],2); + let r2 = v2.values.get(&5).unwrap(); + assert_eq!(r2[0],1); + Ok(()) +} + diff --git a/kf-protocol/tests/decode.rs b/kf-protocol/tests/decode.rs new file mode 100644 index 0000000000..3b2d309880 --- /dev/null +++ b/kf-protocol/tests/decode.rs @@ -0,0 +1,52 @@ +use std::io::Cursor; + +use kf_protocol::derive::Decode; +use kf_protocol::DecoderVarInt; +use kf_protocol::Decoder; + + +#[derive(Decode, Default, Debug)] +pub struct SimpleRecord { + #[varint] + len: i64, + attributes: i8, +} + +#[derive(Decode, Default, Debug)] +pub struct RecordSet { + records: Vec, +} + +#[test] +fn test_decode_record() { + let data = [ + 0x14, // record length of 7 + 0x04, // attributes + ]; + + let mut buf = Cursor::new(data); + + let result = SimpleRecord::decode_from(&mut buf,0); + assert!(result.is_ok()); + let record = result.unwrap(); + assert_eq!(record.len, 10); + assert_eq!(record.attributes, 4); +} + +#[test] +fn test_decode_recordset() { + let data = [ + 0x00, 0x00, 0x00, 0x01, // record count + 0x14, // record length of 7 + 0x04, // attributes + ]; + + let result = RecordSet::decode_from(&mut Cursor::new(&data),0); + assert!(result.is_ok()); + let recordset = result.unwrap(); + let records = &recordset.records; + assert_eq!(records.len(), 1); + let record = &records[0]; + assert_eq!(record.len, 10); + assert_eq!(record.attributes, 4); +} diff --git a/kf-protocol/tests/default.rs b/kf-protocol/tests/default.rs new file mode 100644 index 0000000000..fb72c54635 --- /dev/null +++ b/kf-protocol/tests/default.rs @@ -0,0 +1,25 @@ +use kf_protocol::derive::KfDefault; + +#[derive(KfDefault,Debug)] +struct TestRecord { + value: i8, + value2: i8, + #[fluvio_kf(default = "4")] + value3: i8, + #[fluvio_kf(default = "-1" )] + value4: i16 +} + + + +#[test] +fn test_default() { + + let record = TestRecord::default(); + assert_eq!(record.value3,4); + assert_eq!(record.value4,-1); + + + +} + diff --git a/kf-protocol/tests/encode.rs b/kf-protocol/tests/encode.rs new file mode 100644 index 0000000000..a26f60fda3 --- /dev/null +++ b/kf-protocol/tests/encode.rs @@ -0,0 +1,44 @@ +use kf_protocol::derive::Encode; +use kf_protocol::EncoderVarInt; +use kf_protocol::Encoder; + + +#[derive(Encode,Default,Debug)] +pub struct SimpleRecord { + #[varint] + len: i64, + attributes: i8, +} + + +#[derive(Encode,Default,Debug)] +pub struct RecordSet { + records: Vec +} + +impl RecordSet { + + fn add_record(&mut self, record: SimpleRecord) { + (&mut self.records).push(record); + } +} + + + +#[test] +fn test_encode_recordset() { + + let mut recordset = RecordSet::default(); + let mut record = SimpleRecord::default(); + record.attributes = 10; + record.len = 4; + recordset.add_record(record); + + let mut src = vec![]; + let result = recordset.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),6); + assert_eq!(src[5],0x0a); + assert_eq!(recordset.write_size(0),6); + +} \ No newline at end of file diff --git a/kf-protocol/tests/enum.rs b/kf-protocol/tests/enum.rs new file mode 100644 index 0000000000..c263fbb61f --- /dev/null +++ b/kf-protocol/tests/enum.rs @@ -0,0 +1,298 @@ +use std::io::Error; +use std::io::Error as IoError; +use std::io::Cursor; +use std::io::ErrorKind; +use std::convert::TryInto; + +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::Version; + + +// manual encode +pub enum Mix { + A = 2, + C = 3 +} + +impl Encoder for Mix { + + fn write_size(&self,_version: Version) -> usize { + match self { + Mix::A => 2, + Mix::C => 2, + } + } + + fn encode(&self, src: &mut T,version: Version) -> Result<(), IoError> + where + T: BufMut, + { + match self { + Mix::A => { + let val = 2 as u8; + val.encode(src,version)?; + }, + Mix::C => { + let val = 3 as u8; + val.encode(src,version)?; + } + } + Ok(()) + } +} + + +impl Default for Mix { + fn default() -> Mix { + Mix::A + } +} + + +impl Decoder for Mix { + + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf { + + let mut value: u8 = 0; + value.decode(src,version)?; + match value { + 2 => { + *self = Mix::A; + } + 3 => { + *self = Mix::C; + } + _ => return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for Mix: {}",value) + )) + } + + Ok(()) + } + +} + + + + +#[derive(Encode,Debug)] +pub enum VariantEnum { + A(u16), + C(String) +} + + +#[test] +fn test_var_encode() { + + let v1 = VariantEnum::C("hello".to_string()); + let mut src = vec![]; + let result = v1.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),7); + assert_eq!(v1.write_size(0),7); + +} + + + +/* +impl Encoder for VariantEnum { + + fn write_size(&self) -> usize { + match self { + VariantEnum::A(val) => val.write_size(), + VariantEnum::C(val) => val.write_size(), + } + } + + fn encode(&self, src: &mut T) -> Result<(), IoError> + where + T: BufMut, + { + match self { + VariantEnum::A(val) => val.encode(src), + VariantEnum::C(val) => val.encode(src) + } + + } +} +*/ + +/* +impl Decoder for Mix { + + fn decode(&mut self, src: &mut T) -> Result<(), Error> + where + T: Buf { + + let mut value: u8 = 0; + value.decode(src)?; + match value { + 2 => { + *self = Mix::A; + } + 3 => { + *self = Mix::C; + } + _ => return Err(Error::new( + ErrorKind::UnexpectedEof, + format!("invalid value for Mix: {}",value) + )) + } + + Ok(()) + } + +} +*/ + + + +#[derive(Encode,PartialEq,Decode,Debug)] +#[repr(u8)] +pub enum EnumNoExprTest { + A, + B +} + + +impl Default for EnumNoExprTest { + fn default() -> EnumNoExprTest { + EnumNoExprTest::A + } +} + + + + +#[test] +fn test_enum_encode() { + + let v1 = EnumNoExprTest::B; + let mut src = vec![]; + let result = v1.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),1); + assert_eq!(src[0],0x01); + +} + + +#[test] +fn test_enum_decode() { + + let data = [ + 0x01 + ]; + + let mut buf = Cursor::new(data); + + let result = EnumNoExprTest::decode_from(&mut buf,0); + assert!(result.is_ok()); + let val = result.unwrap(); + assert_eq!(val,EnumNoExprTest::B); + + let data = [ + 0x00 + ]; + + let mut buf = Cursor::new(data); + + let result = EnumNoExprTest::decode_from(&mut buf,0); + assert!(result.is_ok()); + let val = result.unwrap(); + assert_eq!(val,EnumNoExprTest::A); + +} + + + +#[derive(Encode,Decode,PartialEq,Debug)] +#[repr(u8)] +pub enum EnumExprTest { + D = 5, + E = 10 +} + + +impl Default for EnumExprTest { + fn default() -> EnumExprTest { + EnumExprTest::D + } +} + + +#[test] +fn test_enum_expr_encode() { + + let v1 = EnumExprTest::D; + let mut src = vec![]; + let result = v1.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),1); + assert_eq!(src[0],0x05); + +} + + + +#[test] +fn test_enum_expr_decode() { + + let data = [ + 0x05 + ]; + + let mut buf = Cursor::new(data); + + let result = EnumExprTest::decode_from(&mut buf,0); + assert!(result.is_ok()); + let val = result.unwrap(); + assert_eq!(val,EnumExprTest::D); +} + + + + +#[derive(Encode,Decode,PartialEq,Debug)] +#[repr(u16)] +pub enum WideEnum { + D = 5, + E = 10 +} + + +impl Default for WideEnum { + fn default() -> WideEnum { + WideEnum::D + } +} + + +#[test] +fn test_wide_encode() { + + let v1 = WideEnum::D; + let mut src = vec![]; + let result = v1.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),2); + assert_eq!(v1.write_size(0),2); +} + +#[test] +fn test_try_decode() { + let val: u16 = 10; + let e: WideEnum = val.try_into().expect("convert"); + assert_eq!(e,WideEnum::E); +} + diff --git a/kf-protocol/tests/generic.rs b/kf-protocol/tests/generic.rs new file mode 100644 index 0000000000..57649b43d6 --- /dev/null +++ b/kf-protocol/tests/generic.rs @@ -0,0 +1,38 @@ +use std::fmt::Debug; +use std::io::Cursor; + +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + + +#[derive(Encode,Decode,Default,Debug)] +pub struct GenericRecord where R: Encoder + Decoder + Debug { + + len: i64, + value: R +} + + + +#[test] +fn test_generic() { + + let record = GenericRecord { len: 20, value: 25 as i64}; + + let mut src = vec![]; + let result = record.encode(&mut src,0); + assert!(result.is_ok()); + + assert_eq!(src.len(),16); + + let result2 = GenericRecord::::decode_from(&mut Cursor::new(&src),0); + assert!(result2.is_ok()); + let decoded_record = result2.expect("is ok"); + assert_eq!(decoded_record.len,20); + assert_eq!(decoded_record.value,25); + + + +} \ No newline at end of file diff --git a/kf-protocol/tests/option.rs b/kf-protocol/tests/option.rs new file mode 100644 index 0000000000..b242b44440 --- /dev/null +++ b/kf-protocol/tests/option.rs @@ -0,0 +1,49 @@ +use std::io::Cursor; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; +use kf_protocol::Encoder; +use kf_protocol::Decoder; + + +#[derive(Encode,Default,Decode,Debug)] +pub struct Parent { + child: Option +} + +#[derive(Encode,Decode, Default, Debug)] +pub struct Child { + flag: bool +} + + +#[test] +fn test_encode() { + + let mut v1 = Parent::default(); + let mut child = Child::default(); + child.flag = true; + v1.child = Some(child); + let mut src = vec![]; + let result = v1.encode(&mut src,0); + assert!(result.is_ok()); + assert_eq!(src.len(),2); + assert_eq!(src[0],0x01); + assert_eq!(src[1],0x01); + +} + + +#[test] +fn test_decode() { + + let data = [ + 0x01, 0x01 + ]; + + let mut buf = Cursor::new(data); + + let result = Parent::decode_from(&mut buf,0); + assert!(result.is_ok()); + let val = result.unwrap(); + assert!(val.child.is_some()); +} diff --git a/kf-protocol/tests/version.rs b/kf-protocol/tests/version.rs new file mode 100644 index 0000000000..6e3381b132 --- /dev/null +++ b/kf-protocol/tests/version.rs @@ -0,0 +1,74 @@ +use std::io::Cursor; + +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::Decoder; +use kf_protocol::Encoder; + + +#[derive(Encode,Decode,Default,Debug)] +struct TestRecord { + value: i8, + #[fluvio_kf(min_version = 1,max_version = 1)] + value2: i8, + #[fluvio_kf(min_version = 1)] + value3: i8 +} + + +#[test] +fn test_encode_version() { + + utils::init_logger(); + let mut record = TestRecord::default(); + record.value2 = 10; + record.value3 = 5; + + // version 0 should only encode value + let mut dest = vec![]; + record.encode(&mut dest,0).expect("encode"); + assert_eq!(dest.len(),1); + assert_eq!(record.write_size(0),1); + + + // version 1 should encode value1,value2,value3 + let mut dest = vec![]; + record.encode(&mut dest,1).expect("encode"); + assert_eq!(dest.len(),3); + assert_eq!(record.write_size(1),3); + + // version 3 should only encode value, value3 + let mut dest = vec![]; + record.encode(&mut dest,2).expect("encode"); + assert_eq!(dest.len(),2); + assert_eq!(dest[1],5); + assert_eq!(record.write_size(2),2); + +} + + +#[test] +fn test_decode_version() { + + // version 0 record + let data = [0x08]; + let record = TestRecord::decode_from(&mut Cursor::new(&data),0).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,0); // default + + let data = [0x08]; + assert!(TestRecord::decode_from(&mut Cursor::new(&data),1).is_err(),"version 1 needs 3 bytes"); + + let data = [0x08,0x01,0x05]; + let record = TestRecord::decode_from(&mut Cursor::new(&data),1).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,1); + assert_eq!(record.value3,5); + + let data = [0x08,0x01,0x05]; + let record = TestRecord::decode_from(&mut Cursor::new(&data),3).expect("decode"); + assert_eq!(record.value,8); + assert_eq!(record.value2,0); + assert_eq!(record.value3,1); // default, didn't consume + +} diff --git a/kf-service/Cargo.toml b/kf-service/Cargo.toml new file mode 100644 index 0000000000..ba48f35b4e --- /dev/null +++ b/kf-service/Cargo.toml @@ -0,0 +1,19 @@ +[package] +edition = "2018" +name = "kf-service" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[dependencies] +log = "0.4.6" +futures-preview = { version = "0.3.0-alpha.17", features = ["nightly","async-await"] } +pin-utils = "0.1.0-alpha.4" +kf-protocol = { path = "../kf-protocol"} +future-helper = { path = "../future-helper" } +future-aio = { path = "../future-aio"} +kf-socket = { path = "../kf-socket"} +types = { path = "../types"} + +[dev-dependencies] +future-helper = { path = "../future-helper", features = ["fixture"] } +utils = { path= "../utils"} \ No newline at end of file diff --git a/kf-service/rust-toolchain b/kf-service/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/kf-service/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/kf-service/src/kf_server.rs b/kf-service/src/kf_server.rs new file mode 100644 index 0000000000..0491eb2f7b --- /dev/null +++ b/kf-service/src/kf_server.rs @@ -0,0 +1,267 @@ +use std::fmt::Debug; +use std::io::Error as IoError; +use std::marker::PhantomData; +use std::net::SocketAddr; + +use std::sync::Arc; +use std::process; + +use futures::Future; +use futures::StreamExt; +use futures::future::FutureExt; +use futures::select; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::channel; + + +use log::error; +use log::info; +use log::trace; +use log::warn; + +use future_aio::net::AsyncTcpListener; +use future_aio::net::AsyncTcpStream; +use future_helper::spawn; +use kf_protocol::api::KfRequestMessage; +use kf_protocol::Decoder as KfDecoder; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use types::print_cli_err; + +/// Trait for responding to kf service +/// Request -> Response is type specific +/// Each response is responsible for sending back to socket +pub trait KfService { + type Request; + type Context; + + type ResponseFuture: Send + Future>; + + fn respond(self: Arc, context: Self::Context,socket: KfSocket) -> Self::ResponseFuture ; + +} + +/// Transform Service into Futures 01 +pub struct KfApiServer { + req: PhantomData, + api: PhantomData, + context: C, + service: Arc, + addr: SocketAddr +} + +impl KfApiServer +where + C: Clone, +{ + pub fn new(addr: SocketAddr, context: C,service: S) -> Self { + + KfApiServer { + req: PhantomData, + api: PhantomData, + service: Arc::new(service), + context, + addr + } + } + +} + + + + + +impl KfApiServer +where + R: KfRequestMessage + Send + Debug + 'static, + C: Clone + Sync + Send + 'static, + A: Send + KfDecoder + Debug + 'static, + S: KfService + Send + 'static + Sync, +{ + + pub fn run(self) -> Sender { + + let (sender ,receiver) = channel::(1); + + spawn(self.run_shutdown(receiver)); + + sender + } + + + pub async fn run_shutdown(self,shutdown_signal: Receiver) { + + match AsyncTcpListener::bind(&self.addr) { + Ok(listener) => { + + info!("starting event loop for: {}", &self.addr); + self.event_loop(listener,shutdown_signal).await; + + }, + Err(err) => { + print_cli_err!(err); + process::exit(0x0100); + } + } + } + + + + async fn event_loop(self,listener: AsyncTcpListener,mut shutdown_signal: Receiver) { + + let addr = self.addr; + + let mut incoming = listener.incoming(); + + trace!("opened connection from: {}", addr); + + let mut done = false; + + while !done { + + trace!("waiting for client connection..."); + + select! { + incoming = incoming.next().fuse() => { + self.server_incoming(incoming) + }, + + shutdown = shutdown_signal.next() => { + trace!("shutdown signal received"); + if let Some(flag) = shutdown { + warn!("shutdown received"); + done = true; + } else { + trace!("no shutdown value, ignoring"); + } + + } + + } + } + + info!("server terminating"); + } + + + fn server_incoming(&self, incoming: Option>) { + + if let Some(incoming_stream) = incoming { + match incoming_stream { + Ok(stream) => { + + let context = self.context.clone(); + let service = self.service.clone(); + + let ft = async move { + trace!("incoming connection {}",stream); + + let socket: KfSocket = stream.into(); + + if let Err(err) = service.respond(context.clone(),socket).await { + error!("error handling stream: {}", err); + } + }; + + spawn(ft); + } + Err(err) => { + error!("error with stream: {}", err); + } + } + } else { + trace!("no stream value, ignoring"); + } + + } + +} + +#[cfg(test)] +mod test { + + use std::net::SocketAddr; + use std::sync::Arc; + use std::time::Duration; + + use futures::future::join; + use futures::channel::mpsc::Sender; + use futures::channel::mpsc::channel; + use futures::sink::SinkExt; + + use log::debug; + use log::trace; + + use future_helper::sleep; + use future_helper::test_async; + + use kf_protocol::api::RequestMessage; + use kf_socket::KfSocket; + use kf_socket::KfSocketError; + + use crate::test_request::EchoRequest; + use crate::test_request::SharedTestContext; + use crate::test_request::TestApiRequest; + use crate::test_request::TestContext; + use crate::test_request::TestKafkaApiEnum; + use crate::test_request::TestService; + + use super::KfApiServer; + + fn create_server( + addr: SocketAddr, + ) -> KfApiServer { + let ctx = Arc::new(TestContext::new()); + let server: KfApiServer = + KfApiServer::new(addr, ctx,TestService::new()); + + server + } + + + async fn create_client(addr: SocketAddr) -> Result { + + debug!("client wait for 1 second for 2nd server to come up"); + sleep(Duration::from_millis(100)).await; + KfSocket::connect(&addr).await + } + + async fn test_client(addr: SocketAddr,mut shutdown: Sender) -> Result<(), KfSocketError> { + let mut socket = create_client(addr).await?; + + let request = EchoRequest::new("hello".to_owned()); + let msg = RequestMessage::new_request(request); + let reply = socket.send(&msg).await?; + trace!("received reply from server: {:#?}", reply); + assert_eq!(reply.response.msg, "hello"); + + // send 2nd message on same socket + let request2 = EchoRequest::new("hello2".to_owned()); + let msg2 = RequestMessage::new_request(request2); + let reply2 = socket.send(&msg2).await?; + trace!("received 2nd reply from server: {:#?}", reply2); + assert_eq!(reply2.response.msg, "hello2"); + + shutdown.send(true).await.expect("shutdown should succeed"); // shutdown server + Ok(()) + } + + #[test_async] + async fn test_server() -> Result<(), KfSocketError> { + // create fake server, anything will do since we only + // care about creating tcp stream + + let socket_addr = "127.0.0.1:30001".parse::().expect("parse"); + + let (sender,receiver) = channel::(1); + + let server = create_server(socket_addr.clone()); + let client_ft1 = test_client(socket_addr.clone(),sender); + + let _r = join(client_ft1,server.run_shutdown(receiver)).await; + + Ok(()) + } + +} diff --git a/kf-service/src/lib.rs b/kf-service/src/lib.rs new file mode 100644 index 0000000000..85eaba69ec --- /dev/null +++ b/kf-service/src/lib.rs @@ -0,0 +1,90 @@ +#![feature(generators)] +#![recursion_limit = "128"] + +mod kf_server; + +#[cfg(test)] +pub mod test_request; + +pub use kf_protocol::transport::KfCodec; +pub use self::kf_server::KfApiServer; +pub use self::kf_server::KfService; + +#[macro_export] +macro_rules! call_service { + ($req:expr,$handler:expr,$sink:expr,$msg:expr) => {{ + { + let version = $req.header.api_version(); + log::trace!("invoking handler: {}", $msg); + let response = $handler.await?; + log::trace!("send back response: {:#?}", &response); + $sink.send_response(&response, version).await?; + log::trace!("finish send"); + } + }}; + + ($handler:expr,$sink:expr) => {{ + call_service!($handler, $sink, "") + }}; +} + +#[macro_export] +macro_rules! api_loop { + ( $api_stream:ident, $($matcher:pat => $result:expr),*) => {{ + + use futures::stream::StreamExt; + loop { + + log::trace!("waiting for next api request"); + if let Some(msg) = $api_stream.next().await { + if let Ok(req_message) = msg { + log::trace!("received request: {:#?}",req_message); + match req_message { + $($matcher => $result),* + } + } else { + log::trace!("no content, end of connection {:#?}", msg); + break; + } + + } else { + log::trace!("client connect terminated"); + break; + } + } + }}; +} + +/// wait for a single request +#[macro_export] +macro_rules! wait_for_request { + ( $api_stream:ident, $matcher:pat => $result:expr) => {{ + + use futures::stream::StreamExt; + + if let Some(msg) = $api_stream.next().await { + + if let Ok(req_message) = msg { + + log::trace!("received request: {:#?}",req_message); + match req_message { + $matcher => $result, + _ => { + log::error!("unexpected request: {:#?}",req_message); + return Ok(()) + } + } + + } else { + log::trace!("no content, end of connection"); + return Ok(()) + } + + } else { + log::trace!("client connect terminated"); + return Ok(()) + } + + + }}; +} diff --git a/kf-service/src/test_request.rs b/kf-service/src/test_request.rs new file mode 100644 index 0000000000..e3cb51d698 --- /dev/null +++ b/kf-service/src/test_request.rs @@ -0,0 +1,181 @@ +use std::sync::Arc; +use std::io::Error as IoError; +use std::convert::TryInto; + +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::api_decode; +use kf_protocol::bytes::Buf; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::Request; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; + +use crate::KfService; +use crate::call_service; +use crate::api_loop; + + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub(crate) enum TestKafkaApiEnum { + Echo = 1000, + Save = 1001 +} + +impl Default for TestKafkaApiEnum { + fn default() -> TestKafkaApiEnum { + TestKafkaApiEnum::Echo + } +} + + + + +#[derive(Decode, Encode, Debug, Default)] +pub(crate) struct EchoRequest { + msg: String, +} + +impl EchoRequest { + + pub(crate) fn new(msg: String) -> Self { + EchoRequest { + msg + } + } +} + +impl Request for EchoRequest{ + + const API_KEY: u16 = TestKafkaApiEnum::Echo as u16; + type Response = EchoResponse; + } + + +#[derive(Decode, Encode, Default, Debug)] +pub(crate) struct EchoResponse { + pub msg: String +} + + +#[derive(Decode,Encode,Debug,Default)] +pub(crate) struct SaveRequest {} +impl Request for SaveRequest{ + const API_KEY: u16 = TestKafkaApiEnum::Save as u16; + type Response = SaveResponse; +} + +#[derive(Decode,Encode,Debug,Default)] +pub(crate) struct SaveResponse{} + + +#[derive(Debug,Encode)] +pub(crate) enum TestApiRequest { + EchoRequest(RequestMessage), + SaveRequest(RequestMessage) +} + +// Added to satisfy Encode/Decode traits +impl Default for TestApiRequest { + fn default() -> TestApiRequest { + TestApiRequest::EchoRequest(RequestMessage::default()) + } +} + + +impl KfRequestMessage for TestApiRequest { + + type ApiKey = TestKafkaApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf + { + match header.api_key().try_into()? { + TestKafkaApiEnum::Echo => api_decode!(TestApiRequest,EchoRequest,src,header), + TestKafkaApiEnum::Save => api_decode!(TestApiRequest,SaveRequest,src,header) + } + } +} + + + +pub(crate) struct TestContext { +} + +impl TestContext { + pub(crate) fn new() -> Self { + TestContext{} + } +} + +pub(crate) type SharedTestContext = Arc; + + +pub(crate) struct TestService { +} + +impl TestService { + + pub fn new() -> TestService { + Self {} + } + + async fn handle(self: Arc, _context: SharedTestContext, socket: KfSocket) -> Result<(),KfSocketError> { + + let (mut sink,mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + api_loop!( + api_stream, + TestApiRequest::EchoRequest(request) => call_service!( + request, + handle_echo_request(request), + sink, + "echo request handler" + ), + TestApiRequest::SaveRequest(_request) => { + drop(api_stream); + let _orig_socket: KfSocket = (sink,stream).into(); + break; + } + ); + + Ok(()) + + } +} + + +async fn handle_echo_request( + msg: RequestMessage +) -> Result,IoError> { + + let mut response = EchoResponse::default(); + response.msg = msg.request.msg.clone(); + Ok(msg.new_response(response)) +} + + + +impl KfService for TestService { + type Context = SharedTestContext; + type Request = TestApiRequest; + type ResponseFuture = BoxFuture<'static,Result<(),KfSocketError>>; + + fn respond(self: Arc, context: Self::Context,socket: KfSocket) -> Self::ResponseFuture + { + + self.handle(context,socket).boxed() + } + +} \ No newline at end of file diff --git a/kf-socket/Cargo.toml b/kf-socket/Cargo.toml new file mode 100644 index 0000000000..284c7bfd6f --- /dev/null +++ b/kf-socket/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "kf-socket" +version = "0.1.0-alpha.1" +edition = "2018" +authors = ["fluvio.io"] + +[[bin]] +name = "file_send" +path = "src/bin/file_send.rs" +doc = false +required-features = ["cli"] + + + +[dependencies] +log = "0.4.6" +futures-preview = { version = "0.3.0-alpha.17" } +pin-utils = "0.1.0-alpha.4" +chashmap = "2.2.0" +kf-protocol = { path = "../kf-protocol"} +future-aio = { path = "../future-aio"} +future-helper = { path = "../future-helper" } + + +[dev-dependencies] +utils = { path = "../utils", features = ["fixture"]} +future-helper = { path = "../future-helper", features = ["fixture"] } diff --git a/kf-socket/README.md b/kf-socket/README.md new file mode 100644 index 0000000000..9bb95a1710 --- /dev/null +++ b/kf-socket/README.md @@ -0,0 +1,9 @@ +# build kafka send utility + +build kafka-send utilty + +```cargo build --all-features``` + +invoking kafka-send utility to send api request sample data + +```../target/debug/kafka-send data/apirequest.bin``` \ No newline at end of file diff --git a/kf-socket/rust-toolchain b/kf-socket/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/kf-socket/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/kf-socket/src/backflow.rs b/kf-socket/src/backflow.rs new file mode 100644 index 0000000000..f9e0d8f97f --- /dev/null +++ b/kf-socket/src/backflow.rs @@ -0,0 +1,169 @@ +use std::fmt::Debug; +use std::sync::Arc; +use std::hash::Hash; +use std::net::ToSocketAddrs; +use std::io::Error as IoError; + +use log::trace; +use log::error; +use futures::stream::StreamExt; +use futures::Future; + +use future_helper::spawn; +use kf_protocol::Decoder as KfDecoder; +use kf_protocol::api::RequestMessage; + + +use crate::SocketPool; +use crate::KfSocketError; + +/// perform backflow handling +/// backflow is where you respond to msg from server +#[allow(dead_code)] +pub async fn back_flow(pool: &SocketPool, id: T, handler: F) -> Result<(),KfSocketError> + where + F: Fn(RequestMessage) -> Fut + Send + Sync + 'static, + Fut: Future> + Send, + T: Eq + Hash + Debug + Clone + ToSocketAddrs, + R: Send + 'static, + RequestMessage: KfDecoder + Default + Debug, +{ + + trace!("starting proces stream: {:#?}",id); + + if let Some(mut client) = pool.get_socket(&id) { + + { + // mutation borrow occurs here so we need to nest + let mut req_stream = client.get_mut_stream().request_stream(); + let c_handler = Arc::new(handler); + while let Some(item) = req_stream.next().await { + let msg: RequestMessage = item?; + trace!("processing new msg: {:#?}",msg); + let s_handler = c_handler.clone(); + spawn(async move { + match s_handler(msg).await { + Err(err) => error!("error handling: {}",err), + _ => {} + } + }) + } + } + + + client.set_stale(); + + } + + + + Ok(()) +} + + + +/* +#[cfg(test)] +mod test { + + use std::net::SocketAddr; + use std::time::Duration; + use std::io::Error as IoError; + use std::fmt::Debug; + + use log::debug; + use log::error; + use futures::stream::StreamExt; + use futures::future::FutureExt; + + use future_helper::test_async; + use future_helper::sleep; + use future_aio::net::AsyncTcpListener; + + use kf_protocol::api::RequestMessage; + use crate::ClientPooling; + use crate::KfSocket; + use crate::KfSocketError; + use crate::test_request::TestApiRequest; + use crate::test_request::EchoRequest; + use crate::test_request::TestKafkaApiEnum; + use crate::pooling::test::server_loop; + + use super::back_flow; + + type TestPooling = ClientPooling; + + + /// create server and + async fn create_server(addr: String,_client_count: u16) -> Result<(),KfSocketError> { + + let socket_addr = addr.parse::().expect("parse"); + { + server_loop(&socket_addr,0).await?; + } + Ok(()) + } + + async fn client_check(client_pool: &TestPooling,addr: String,id: u16) -> Result<(),KfSocketError> + where RequestMessage: Debug + { + + debug!("client: {}-{} client start: sleeping for 100 second to give server chances",&addr,id); + sleep(Duration::from_millis(10)).await.expect("panic"); + back_flow(client_pool,addr, + |req: RequestMessage | { + + async move { + + debug!("client: {}-{} message from server: {:#?}",&addr,id,req); + + match req.request { + TestApiRequest::EchoRequest(ech_req) => { + debug!("client: {}-{} message {} from server",&addr,id,ech_req.msg); + assert_eq!(ech_req.msg,"Hello"); + }, + _ => assert!(false,"not echo") + } + + Ok(()) as Result<(),IoError> + + } + }).await; + + Ok(()) + + } + + async fn test_client(client_pool: &TestPooling, addr: String) -> Result<(),KfSocketError> { + + client_check(client_pool,addr.clone(),0).await.expect("should finished"); + debug!("client wait for 1 second for 2nd server to come up"); + sleep(Duration::from_millis(1000)).await.expect("panic"); + client_check(client_pool,addr.clone(),1).await.expect("should be finished"); + Ok(()) + } + + + #[test_async] + async fn test_backflow() -> Result<(),KfSocketError> { + + utils::init_logger(); + + let count = 1; + + + let addr1 = "127.0.0.1:20001".to_owned(); + + let client_pool = TestPooling::new(); + let server_ft1 = create_server(addr1.clone(),count); + let client_ft1 = test_client(&client_pool,addr1); + + client_ft1.join(server_ft1).await; + + Ok(()) + + + } + +} +*/ \ No newline at end of file diff --git a/kf-socket/src/error.rs b/kf-socket/src/error.rs new file mode 100644 index 0000000000..5be88db327 --- /dev/null +++ b/kf-socket/src/error.rs @@ -0,0 +1,32 @@ + +use std::fmt; +use std::io::Error as IoError; +use future_aio::SendFileError; + + +#[derive(Debug)] +pub enum KfSocketError { + IoError(IoError), + SendFileError(SendFileError) +} + +impl From for KfSocketError { + fn from(error: IoError) -> Self { + KfSocketError::IoError(error) + } +} + +impl From for KfSocketError { + fn from(error: SendFileError) -> Self { + KfSocketError::SendFileError(error) + } +} + +impl fmt::Display for KfSocketError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + KfSocketError::IoError(err) => write!(f, "{}", err), + KfSocketError::SendFileError(err) => write!(f,"{:#?}",err) + } + } +} diff --git a/kf-socket/src/file_fetch.rs b/kf-socket/src/file_fetch.rs new file mode 100644 index 0000000000..cad8c24bdb --- /dev/null +++ b/kf-socket/src/file_fetch.rs @@ -0,0 +1,288 @@ +use std::io::Error as IoError; +use std::fmt; + +use log::trace; + +use future_aio::fs::AsyncFileSlice; +use future_aio::BufMut; +use future_aio::BytesMut; +use kf_protocol::Version; +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::bytes::Buf; +use kf_protocol::message::fetch::KfFetchResponse; +use kf_protocol::message::fetch::KfFetchRequest; +use kf_protocol::message::fetch::FetchableTopicResponse; +use kf_protocol::message::fetch::FetchablePartitionResponse; + +use crate::StoreValue; +use crate::FileWrite; + +pub type FileFetchResponse = KfFetchResponse; +pub type FileTopicResponse = FetchableTopicResponse; +pub type FilePartitionResponse = FetchablePartitionResponse; + +#[derive(Default, Debug)] +pub struct KfFileRecordSet(AsyncFileSlice); + +pub type KfFileFetchRequest = KfFetchRequest; + + + +impl fmt::Display for KfFileRecordSet { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"pos: {} len: {}",self.position(),self.len()) + } +} + + +impl KfFileRecordSet { + pub fn position(&self) -> u64 { + self.0.position() + } + + pub fn len(&self) -> usize { + self.0.len() as usize + } + + pub fn raw_slice(&self) -> &AsyncFileSlice { + &self.0 + } +} + +impl From for KfFileRecordSet { + fn from(slice: AsyncFileSlice) -> Self { + Self(slice) + } +} + +impl Encoder for KfFileRecordSet { + fn write_size(&self, _version: Version) -> usize { + self.len() + 4 // include header + } + + fn encode(&self, _src: &mut T, _version: Version) -> Result<(), IoError> + where + T: BufMut, + { + unimplemented!("file slice cannot be encoded in the ButMut") + } +} + +impl Decoder for KfFileRecordSet { + fn decode(&mut self, _src: &mut T, _version: Version) -> Result<(), IoError> + where + T: Buf, + { + unimplemented!("file slice cannot be decoded in the ButMut") + } +} + +impl FileWrite for KfFileRecordSet { + fn file_encode<'a: 'b, 'b>( + &'a self, + dest: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + let len: i32 = self.len() as i32; + trace!("KfFileRecordSet encoding file slice len: {}", len); + len.encode(dest, version)?; + let bytes = dest.take().freeze(); + data.push(StoreValue::Bytes(bytes)); + data.push(StoreValue::FileSlice(&self.raw_slice())); + Ok(()) + } +} + +impl FileWrite for FileFetchResponse { + fn file_encode<'a: 'b, 'b>( + &'a self, + src: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + trace!("file encoding FileFetchResponse"); + trace!("encoding throttle_time_ms {}", self.throttle_time_ms); + self.throttle_time_ms.encode(src, version)?; + trace!("encoding error code {:#?}", self.error_code); + self.error_code.encode(src, version)?; + trace!("encoding session code {}", self.session_id); + self.session_id.encode(src, version)?; + trace!("encoding topics len: {}", self.topics.len()); + self.topics.file_encode(src, data, version)?; + Ok(()) + } +} + +impl FileWrite for FileTopicResponse { + fn file_encode<'a: 'b, 'b>( + &'a self, + src: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + trace!("file encoding fetch topic response"); + self.name.encode(src, version)?; + self.partitions.file_encode(src, data, version)?; + Ok(()) + } +} + +impl FileWrite for FilePartitionResponse { + fn file_encode<'a: 'b, 'b>( + &'a self, + src: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + trace!("file encoding fetch partition response"); + self.partition_index.encode(src, version)?; + self.error_code.encode(src, version)?; + self.high_watermark.encode(src, version)?; + self.last_stable_offset.encode(src, version)?; + self.log_start_offset.encode(src, version)?; + self.aborted.encode(src, version)?; + self.records.file_encode(src, data, version)?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + + use std::io::Error as IoError; + use std::env::temp_dir; + use std::net::SocketAddr; + use std::time::Duration; + + use log::debug; + use futures::io::AsyncWriteExt; + use futures::future::join; + use futures::stream::StreamExt; + + use future_helper::test_async; + use future_helper::sleep; + use kf_protocol::Encoder; + use kf_protocol::api::Request; + use kf_protocol::api::ResponseMessage; + use kf_protocol::api::RequestMessage; + use kf_protocol::api::DefaultBatch; + use kf_protocol::api::DefaultRecord; + use kf_protocol::message::fetch::DefaultKfFetchRequest; + use future_aio::fs::AsyncFile; + use future_aio::net::AsyncTcpListener; + use utils::fixture::ensure_clean_file; + use crate::KfSocket; + use crate::KfSocketError; + use crate::FileFetchResponse; + use crate::KfFileFetchRequest; + use crate::FilePartitionResponse; + use crate::FileTopicResponse; + + /// create sample batches with message + fn create_batches(records: u16) -> DefaultBatch { + let mut batches = DefaultBatch::default(); + let header = batches.get_mut_header(); + header.magic = 2; + header.producer_id = 20; + header.producer_epoch = -1; + + for i in 0..records { + let msg = format!("record {}", i); + let record: DefaultRecord = msg.into(); + batches.add_record(record); + } + batches + } + + async fn setup_batch_file() -> Result<(), IoError> { + let test_file_path = temp_dir().join("batch_fetch"); + ensure_clean_file(&test_file_path); + debug!("creating test file: {:#?}", test_file_path); + let mut file = AsyncFile::create(&test_file_path).await?; + let batch = create_batches(2); + let bytes = batch.as_bytes(0)?; + file.write_all(bytes.as_ref()).await?; + Ok(()) + } + + async fn test_server(addr: SocketAddr) -> Result<(), KfSocketError> { + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server is running"); + let mut incoming = listener.incoming(); + let incoming_stream = incoming.next().await; + debug!("server: got connection"); + let incoming_stream = incoming_stream.expect("next").expect("unwrap again"); + let mut socket: KfSocket = incoming_stream.into(); + + let fetch_request: Result, KfSocketError> = socket + .get_mut_stream() + .next_request_item() + .await + .expect("next value"); + let request = fetch_request?; + debug!("received fetch request: {:#?}", request); + + let test_file_path = temp_dir().join("batch_fetch"); + debug!("opening file test file: {:#?}", test_file_path); + let file = AsyncFile::open(&test_file_path).await?; + + let mut response = FileFetchResponse::default(); + let mut topic_response = FileTopicResponse::default(); + let mut part_response = FilePartitionResponse::default(); + part_response.partition_index = 10; + part_response.records = file.as_slice(0, None).await?.into(); + topic_response.partitions.push(part_response); + response.topics.push(topic_response); + let resp_msg = ResponseMessage::new(10, response); + + debug!( + "res msg write size: {}", + resp_msg.write_size(KfFileFetchRequest::DEFAULT_API_VERSION) + ); + + socket + .get_mut_sink() + .encode_file_slices(&resp_msg, KfFileFetchRequest::DEFAULT_API_VERSION) + .await?; + debug!("server: finish sending out"); + Ok(()) + } + + async fn setup_client(addr: SocketAddr) -> Result<(), KfSocketError> { + sleep(Duration::from_millis(50)).await; + debug!("client: trying to connect"); + let mut socket = KfSocket::connect(&addr).await?; + debug!("client: connect to test server and waiting..."); + + let req_msg: RequestMessage = RequestMessage::default(); + let res_msg = socket.send(&req_msg).await?; + + debug!("output: {:#?}", res_msg); + let topic_responses = res_msg.response.topics; + assert_eq!(topic_responses.len(), 1); + let part_responses = &topic_responses[0].partitions; + assert_eq!(part_responses.len(), 1); + let batches = &part_responses[0].records.batches; + assert_eq!(batches.len(), 1); + let records = &batches[0].records; + assert_eq!(records.len(), 2); + assert_eq!(records[0].value.to_string(), "record 0"); + assert_eq!(records[1].value.to_string(), "record 1"); + + Ok(()) + } + + #[test_async] + async fn test_save_fetch() -> Result<(), KfSocketError> { + // create fetch and save + setup_batch_file().await?; + + let addr = "127.0.0.1:9911".parse::().expect("parse"); + + let _r = join(setup_client(addr), test_server(addr.clone())).await; + + Ok(()) + } +} diff --git a/kf-socket/src/file_produce.rs b/kf-socket/src/file_produce.rs new file mode 100644 index 0000000000..1627451a52 --- /dev/null +++ b/kf-socket/src/file_produce.rs @@ -0,0 +1,58 @@ +use std::io::Error as IoError; + +use log::trace; + +use future_aio::BytesMut; +use kf_protocol::Encoder; +use kf_protocol::Version; +use kf_protocol::message::produce::KfProduceRequest; +use kf_protocol::message::produce::TopicProduceData; +use kf_protocol::message::produce::PartitionProduceData; + + +use crate::FileWrite; +use crate::StoreValue; +use super::file_fetch::KfFileRecordSet; + + +pub type FileProduceRequest = KfProduceRequest; +pub type FileTopicRequest = TopicProduceData; +pub type FilePartitionRequest = PartitionProduceData; + + + +impl FileWrite for FileProduceRequest { + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + trace!("file encoding produce request"); + self.transactional_id.encode(src,version)?; + self.acks.encode(src,version)?; + self.timeout_ms.encode(src,version)?; + self.topics.file_encode(src,data,version)?; + Ok(()) + } +} + + +impl FileWrite for FileTopicRequest { + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + trace!("file encoding produce topic request"); + self.name.encode(src,version)?; + self.partitions.file_encode(src,data,version)?; + Ok(()) + } +} + + +impl FileWrite for FilePartitionRequest { + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + trace!("file encoding for partition request"); + self.partition_index.encode(src,version)?; + self.records.file_encode(src,data,version)?; + Ok(()) + } +} + + diff --git a/kf-socket/src/lib.rs b/kf-socket/src/lib.rs new file mode 100644 index 0000000000..0c0b7e9004 --- /dev/null +++ b/kf-socket/src/lib.rs @@ -0,0 +1,145 @@ +#![feature(generators)] + +mod backflow; +mod error; +mod pooling; +mod socket; +mod stream; +mod sink; +mod sink_pool; +mod file_fetch; +mod file_produce; + +#[cfg(test)] +pub mod test_request; + +pub use self::error::KfSocketError; +pub use self::socket::KfSocket; +pub use pooling::SocketPool; +pub use sink_pool::SinkPool; +pub use sink_pool::SharedSinkPool; +pub use stream::KfStream; +pub use sink::KfSink; +pub use sink::ExclusiveKfSink; +pub use file_fetch::FilePartitionResponse; +pub use file_fetch::FileFetchResponse; +pub use file_fetch::FileTopicResponse; +pub use file_fetch::KfFileFetchRequest; +pub use file_produce::FileProduceRequest; +pub use file_produce::FileTopicRequest; +pub use file_produce::FilePartitionRequest; +pub use file_fetch::KfFileRecordSet; + +use std::net::SocketAddr; +use std::io::Error as IoError; + +use log::trace; + +use future_aio::Bytes; +use future_aio::BytesMut; +use future_aio::fs::AsyncFileSlice; +use kf_protocol::Version; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::Encoder; + + +/// send request and respons to socket addr +pub async fn send_and_receive( + addr: SocketAddr, + request: &RequestMessage, +) -> Result, KfSocketError> +where + R: Request +{ + let mut client = KfSocket::connect(&addr).await?; + + let msgs: ResponseMessage = client.send(&request).await?; + + Ok(msgs) +} + + + +pub enum StoreValue<'a> { + Bytes(Bytes), + FileSlice(&'a AsyncFileSlice) +} + +impl <'a>std::fmt::Debug for StoreValue<'a> { + + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + StoreValue::Bytes(bytes) => write!(f, "StoreValue:Bytes wiht len: {}", bytes.len()), + StoreValue::FileSlice(slice) => write!(f, "StoreValue:FileSlice: {:#?}",slice), + } + + } +} + + + +pub trait FileWrite: Encoder { + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, _data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + self.encode(src,version) + } + +} + + +impl FileWrite for Vec where M: FileWrite, +{ + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> + { + let len: i32 = self.len() as i32; + len.encode(src,version)?; + for v in self { + v.file_encode(src,data,version)?; + } + Ok(()) + } +} + + + +impl

FileWrite for ResponseMessage

where P: FileWrite + Default { + + fn file_encode<'a: 'b,'b>(&'a self, dest: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + + trace!("file encoding response message"); + let len = self.write_size(version) as i32; + trace!("encoding response len: {}", len); + len.encode(dest,version)?; + + trace!("encoding response corre _id: {}",self.correlation_id); + self.correlation_id.encode(dest,version)?; + + trace!("encoding response"); + self.response.file_encode(dest,data,version)?; + Ok(()) + } +} + + + +impl FileWrite for RequestMessage where R: FileWrite + Default + Request { + + fn file_encode<'a: 'b,'b>(&'a self, dest: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + + trace!("file encoding response message"); + let len = self.write_size(version) as i32; + trace!("file encoding response len: {}", len); + len.encode(dest,version)?; + + trace!("file encoding header"); + self.header.encode(dest,version)?; + + trace!("encoding response"); + self.request.file_encode(dest,data,version)?; + Ok(()) + } +} + diff --git a/kf-socket/src/pooling.rs b/kf-socket/src/pooling.rs new file mode 100644 index 0000000000..ac573bb3bb --- /dev/null +++ b/kf-socket/src/pooling.rs @@ -0,0 +1,275 @@ +use std::collections::HashMap; +use std::fmt::Debug; +use std::hash::Hash; +use std::net::ToSocketAddrs; +use std::sync::RwLock; + + +use chashmap::CHashMap; +use chashmap::WriteGuard; +use log::trace; + +use crate::KfSocket; +use crate::KfSocketError; + +/// pooling of sockets +#[derive(Debug)] +pub struct SocketPool +where + T: Eq + Hash, +{ + clients: CHashMap, + ids: RwLock>, +} + +impl SocketPool +where + T: Eq + PartialEq + Hash + Debug + Clone, + KfSocket: Sync, +{ + #[allow(dead_code)] + pub fn new() -> Self { + Self { + clients: CHashMap::new(), + ids: RwLock::new(HashMap::new()), + } + } + + pub fn insert_socket(&self, id: T, socket: KfSocket) { + trace!("inserting connection: {:#?}, returning",id); + let mut ids = self.ids.write().expect("id lock must always lock"); + ids.insert(id.clone(), true); + self.clients.insert(id.clone(),socket); + } + + /// get valid client. return only client which is not stale + pub fn get_socket(&self, id: &T) -> Option> + { + if let Some(client) = self.clients.get_mut(id) { + trace!("got existing connection: {:#?}, returning",id); + if client.is_stale() { + trace!("connection is stale, do not return"); + None + } else { + Some(client) + } + } else { + trace!("no existing connection: {:#?}, returning",id); + None + } + } +} + +impl SocketPool +where + T: Eq + PartialEq + Hash + Debug + Clone + ToSocketAddrs, + KfSocket: Sync, +{ + /// make connection where id can be used as address + pub async fn make_connection(&self, id: T) -> Result<(), KfSocketError> { + let addr = id.clone(); + self.make_connection_with_addr(id,&addr).await + } +} + +impl SocketPool +where + T: Eq + PartialEq + Hash + Debug + Clone, + KfSocket: Sync, +{ + /// make connection with addres as separate parameter + pub async fn make_connection_with_addr<'a,A>(&'a self, id: T,addr: &'a A) -> Result<(), KfSocketError> + where A: ToSocketAddrs + Debug + { + trace!("creating new connection: {:#?}",addr); + let mut socket_address = addr.to_socket_addrs()?; + let socket_addr =socket_address.next().unwrap(); + let client = KfSocket::connect(&socket_addr).await?; + trace!("got connection to server: {:#?}", &id); + self.insert_socket(id.clone(),client); + trace!("finish connection to server: {:#?}",&id); + Ok(()) + } + + /// get existing socket connection or make new one + pub async fn get_or_make<'a,A>(&'a self, id: T,addr: &'a A) -> Result>, KfSocketError> + where A: ToSocketAddrs + Debug + { + + if let Some(socket) = self.get_socket(&id) { + return Ok(Some(socket)); + } + + self.make_connection_with_addr(id.clone(),addr).await?; + + Ok(self.get_socket(&id)) + + + } +} + +#[cfg(test)] +pub(crate) mod test { + + use std::net::SocketAddr; + use std::time::Duration; + + use futures::future::join; + use futures::stream::StreamExt; + use log::debug; + use log::error; + + use future_aio::net::AsyncTcpListener; + use future_helper::sleep; + use future_helper::test_async; + + use super::KfSocket; + use super::KfSocketError; + use super::SocketPool; + use crate::test_request::EchoRequest; + use kf_protocol::api::RequestMessage; + + type TestPooling = SocketPool; + + pub(crate) async fn server_loop( + socket_addr: &SocketAddr, + id: u16, + ) -> Result<(), KfSocketError> { + debug!("server: {}-{} ready to bind", socket_addr, id); + let listener = AsyncTcpListener::bind(&socket_addr)?; + debug!( + "server: {}-{} successfully binding. waiting for incoming", + socket_addr, id + ); + let mut incoming = listener.incoming(); + if let Some(stream) = incoming.next().await { + debug!( + "server: {}-{} got connection from client, sending rely", + socket_addr, id + ); + + let stream = stream?; + let mut socket: KfSocket = stream.into(); + + let msg: RequestMessage = RequestMessage::new_request( + EchoRequest { + msg: "Hello".to_owned(), + } + ); + + socket.get_mut_sink().send_request(&msg).await?; + debug!("server: {}-{} finish send echo", socket_addr, id); + } else { + error!("no content from client"); + } + + // server terminating + drop(incoming); + debug!( + "server: {}-{} sleeping for 100ms to give client chances", + socket_addr, id + ); + + debug!("server: {}-{} server loop ended", socket_addr, id); + Ok(()) + } + + /// create server and + async fn create_server(addr: String, _client_count: u16) -> Result<(), KfSocketError> { + let socket_addr = addr.parse::().expect("parse"); + + { + server_loop(&socket_addr, 0).await?; + } + { + server_loop(&socket_addr, 1).await?; + } + + Ok(()) + } + + async fn client_check( + client_pool: &TestPooling, + addr: String, + id: u16, + ) -> Result<(), KfSocketError> { + debug!( + "client: {}-{} client start: sleeping for 100 second to give server chances", + &addr, id + ); + sleep(Duration::from_millis(10)).await; + debug!("client: {}-{} trying to connect to server", &addr, id); + client_pool.make_connection(addr.clone()).await?; + + if let Some(mut client_socket) = client_pool.get_socket(&addr) { + debug!("client: {}-{} got socket from server",&addr,id); + // create new scope, so we limit mut borrow + { + let mut req_stream = client_socket.get_mut_stream().request_stream(); + debug!( + "client: {}-{} waiting for echo request from server", + &addr, id + ); + let next = req_stream.next().await; + if let Some(result) = next { + let req_msg: RequestMessage = result?; + + + debug!( + "client: {}-{} message {} from server", + &addr, id, req_msg.request.msg + ); + assert_eq!(req_msg.request.msg, "Hello"); + + // await for next + debug!( + "client: {}-{} wait for 2nd, server should terminate this point", + &addr, id + ); + let next2 = req_stream.next().await; + assert!(next2.is_none(), "next2 should be none"); + debug!("client: {}-{} 2nd wait finished", &addr, id); + + + + } + } + + debug!("client: {}-{} mark as stale", &addr, id); + client_socket.set_stale(); + Ok(()) + } else { + panic!("not able to connect: {}", addr); + } + } + + async fn test_client(client_pool: &TestPooling, addr: String) -> Result<(), KfSocketError> { + client_check(client_pool, addr.clone(), 0).await.expect("should finished"); + debug!("client wait for 1 second for 2nd server to come up"); + sleep(Duration::from_millis(1000)).await; + client_check(client_pool, addr.clone(), 1).await.expect("should be finished"); + Ok(()) + } + + #[test_async] + async fn test_pool() -> Result<(),KfSocketError> { + let count = 1; + + // create fake server, anything will do since we only + // care creating tcp stream + let addr1 = "127.0.0.1:20001".to_owned(); + let addr2 = "127.0.0.1:20002".to_owned(); + + let server_ft1 = create_server(addr1.clone(), count); + let server_ft2 = create_server(addr2.clone(), count); + + let client_pool = TestPooling::new(); + let client_ft1 = test_client(&client_pool, addr1); + let client_ft2 = test_client(&client_pool, addr2); + + let _fr = join(join(client_ft1,client_ft2),join(server_ft1,server_ft2)).await; + + Ok(()) + } + +} diff --git a/kf-socket/src/sink.rs b/kf-socket/src/sink.rs new file mode 100644 index 0000000000..13026ef68b --- /dev/null +++ b/kf-socket/src/sink.rs @@ -0,0 +1,298 @@ + +use std::fmt::Debug; + +#[cfg(unix)] +use std::os::unix::io::RawFd; +use std::os::unix::io::AsRawFd; + +use log::trace; +use log::debug; + +use futures::sink::SinkExt; + + +use future_aio::ZeroCopyWrite; +use future_aio::BytesMut; +use future_aio::net::TcpStreamSplitSink; +use kf_protocol::Version; +use kf_protocol::Encoder as KfEncoder; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::transport::KfCodec; + +use crate::KfSocketError; +use crate::FileWrite; +use crate::StoreValue; + +#[derive(Debug)] +pub struct KfSink{ + inner: TcpStreamSplitSink, + fd: RawFd +} + +impl KfSink { + + pub fn new(inner: TcpStreamSplitSink, fd: RawFd) -> Self { + KfSink { + fd, + inner + } + } + + pub fn get_mut_tcp_sink(&mut self) -> &mut TcpStreamSplitSink{ + &mut self.inner + } + + + /// as client, send request to server + pub async fn send_request<'a,R>( + &'a mut self, + req_msg: &'a RequestMessage, + ) -> Result<(), KfSocketError> + where RequestMessage: KfEncoder + Default + Debug + { + trace!("sending one way request: {:#?}", &req_msg); + + (&mut self.inner).send(req_msg.as_bytes(0)?).await?; + + Ok(()) + } + + /// as server, send back response + pub async fn send_response<'a,P>( + &'a mut self, + resp_msg: &'a ResponseMessage

, + version: Version + + ) -> Result<(), KfSocketError> + where ResponseMessage

: KfEncoder + Default + Debug + { + trace!("sending response {:#?}", &resp_msg); + + (&mut self.inner).send(resp_msg.as_bytes(version)?).await?; + + Ok(()) + } + + + pub async fn encode_file_slices<'a,T>(&'a mut self,msg: &'a T,version: Version) -> Result<(),KfSocketError> where T: FileWrite{ + + trace!("encoding file slices version: {}",version); + let mut buf = BytesMut::with_capacity(1000); + let mut data: Vec = vec![]; + msg.file_encode(&mut buf,&mut data,version)?; + trace!("file buf len: {}",buf.len()); + // add remainder + data.push(StoreValue::Bytes(buf.take().freeze())); + self.write_store_values(data).await + } + + + + /// write store values to socket + async fn write_store_values<'a>(&'a mut self, values: Vec>) -> Result<(),KfSocketError> { + + trace!("writing store values to socket"); + + for value in values { + match value { + StoreValue::Bytes(bytes) => { + trace!("writing bytes to socket len: {}",bytes.len()); + self.get_mut_tcp_sink().send(bytes).await?; + }, + StoreValue::FileSlice(f_slice) => { + if f_slice.len() == 0 { + debug!("empty slice, skipping"); + } else { + debug!("start writing file slice to socket: {}",f_slice.len()); + self.zero_copy_write(f_slice).await?; + trace!("finish writing file slice"); + } + + + } + } + } + + Ok(()) + } + + +} + + + + + +impl AsRawFd for KfSink { + + fn as_raw_fd(&self) -> RawFd { + self.fd + } + +} + + +impl ZeroCopyWrite for KfSink{} + + +use futures::lock::Mutex; + +/// Multi-thread aware Sink. Only allow sending request one a time. +pub struct ExclusiveKfSink(Mutex); + +impl ExclusiveKfSink { + pub fn new(sink: KfSink) -> Self { + ExclusiveKfSink(Mutex::new(sink)) + } + + pub async fn send_request( + &self, + req_msg: &RequestMessage, + ) -> Result<(), KfSocketError> + where RequestMessage: KfEncoder + Default + Debug + { + let mut inner_sink = self.0.lock().await; + + inner_sink.send_request(req_msg).await + } + + + +} + + + + +#[cfg(test)] +mod tests { + + use std::net::SocketAddr; + use std::io::Cursor; + use std::path::Path; + use std::time::Duration; + use std::fs::remove_file; + use std::env::temp_dir; + + use log::debug; + use log::info; + use futures::stream::StreamExt; + use futures::future::join; + use futures::io::AsyncWriteExt; + use futures::sink::SinkExt; + + use future_helper::test_async; + use future_helper::sleep; + use future_aio::fs::AsyncFile; + use future_aio::ZeroCopyWrite; + use future_aio::net::AsyncTcpListener; + use future_aio::Bytes; + use kf_protocol::Decoder; + use kf_protocol::Encoder; + + use crate::KfSocket; + use crate::KfSocketError; + + + pub fn ensure_clean_file

(path: P) where P: AsRef { + let log_path = path.as_ref(); + if let Ok(_) = remove_file(log_path) { + info!("remove existing file: {}", log_path.display()); + } else { + info!("there was no existing file: {}", log_path.display()); + } + } + + async fn test_server(addr: SocketAddr) -> Result<(),KfSocketError> { + + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server is running"); + let mut incoming = listener.incoming(); + let incoming_stream = incoming.next().await; + debug!("server: got connection"); + let incoming_stream = incoming_stream.expect("next").expect("unwrap again"); + let mut socket: KfSocket = incoming_stream.into(); + let raw_tcp_sink = socket.get_mut_sink().get_mut_tcp_sink(); + + // encode message + let mut out = vec![]; + let msg = "hello".to_owned(); + msg.encode(&mut out,0)?; + + // need to explicity encode length since codec doesn't do anymore + let mut buf = vec![]; + let len: i32 = out.len() as i32 + 7; // msg plus file + len.encode(&mut buf,0)?; + msg.encode(&mut buf,0)?; + + // send out raw bytes first + debug!("out len: {}",buf.len()); + raw_tcp_sink.send(Bytes::from(buf)).await?; + + // send out file + debug!("sending out file contents"); + let test_file_path = temp_dir().join("socket_zero_copy"); + let data_file = AsyncFile::open(test_file_path).await?; + let fslice = data_file.as_slice(0,None).await?; + socket.get_mut_sink().zero_copy_write(&fslice).await?; + + debug!("server: finish sending out"); + Ok(()) + } + + async fn setup_client(addr: SocketAddr) -> Result<(),KfSocketError> { + + sleep(Duration::from_millis(50)).await; + debug!("client: trying to connect"); + let mut socket = KfSocket::connect(&addr).await?; + info!("client: connect to test server and waiting..."); + + let stream = socket.get_mut_stream(); + let next_value = stream.get_mut_tcp_stream().next().await; + let bytes = next_value.expect("next").expect("bytes"); + debug!("decoding values"); + let mut src = Cursor::new(&bytes); + + let mut msg1 = String::new(); + msg1.decode(&mut src,0).expect("decode should work"); + assert_eq!(msg1,"hello"); + + let mut msg2 = String::new(); + msg2.decode(&mut src,0).expect("2nd msg decoding should work"); + debug!("msg2: {}",msg2); + assert_eq!(msg2,"world"); + + Ok(()) + } + + // set up sample file for testing + async fn setup_data() -> Result<(),KfSocketError> { + let test_file_path = temp_dir().join("socket_zero_copy"); + ensure_clean_file(&test_file_path); + debug!("creating test file: {:#?}",test_file_path); + let mut file = AsyncFile::create(&test_file_path).await?; + let mut out = vec![]; + let msg = "world".to_owned(); + msg.encode(&mut out,0)?; + file.write_all(&out).await?; + Ok(()) + + } + + + #[test_async] + async fn test_sink_copy() -> Result<(),KfSocketError> { + + setup_data().await?; + + let addr = "127.0.0.1:9999".parse::().expect("parse"); + + let _r = join(setup_client(addr),test_server(addr.clone())).await; + + Ok(()) + } + +} + + + diff --git a/kf-socket/src/sink_pool.rs b/kf-socket/src/sink_pool.rs new file mode 100644 index 0000000000..649a1076d7 --- /dev/null +++ b/kf-socket/src/sink_pool.rs @@ -0,0 +1,151 @@ +use std::sync::Arc; +use std::fmt::Debug; +use std::hash::Hash; + +use chashmap::CHashMap; +use chashmap::WriteGuard; +use log::trace; + +use crate::KfSink; + +pub type SharedSinkPool = Arc>; + +/// Pool of sinks. This is lightweight version of SocketPool +/// where you only need to keep track of sink +/// no attemp to keep id indexes +#[derive(Debug)] +pub struct SinkPool(CHashMap); + +impl SinkPool +where + T: Eq + PartialEq + Hash + Debug + Clone, + KfSink: Sync, +{ + + pub fn new_shared() -> SharedSinkPool { + Arc::new(Self::new()) + } + + pub fn new() -> Self { + Self(CHashMap::new()) + } + + + pub fn insert_sink(&self, id: T, socket: KfSink) { + trace!("inserting sink at: {:#?}",id); + self.0.insert(id,socket); + } + + pub fn clear_sink(&self, id: &T) { + self.0.remove(id); + } + + /// get sink + pub fn get_sink<'a>(&'a self, id: &T) -> Option> + { + self.0.get_mut(id) + } +} + + + + + +#[cfg(test)] +mod tests { + + use std::net::SocketAddr; + use std::time::Duration; + + use log::debug; + use log::info; + use futures::stream::StreamExt; + use futures::future::join; + + + use future_helper::test_async; + use future_helper::sleep; + use future_aio::net::AsyncTcpListener; + use kf_protocol::api::RequestMessage; + + use crate::KfSocket; + use crate::KfSocketError; + use crate::test_request::EchoRequest; + use crate::test_request::EchoResponse; + use crate::test_request::TestApiRequest; + use crate::test_request::TestKafkaApiEnum; + use super::SinkPool; + + async fn test_server(addr: SocketAddr) -> Result<(),KfSocketError> { + + let sink_pool: SinkPool = SinkPool::new(); + + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server is running"); + let mut incoming = listener.incoming(); + let incoming_stream = incoming.next().await; + debug!("server: got connection"); + let incoming_stream = incoming_stream.expect("next").expect("unwrap again"); + let socket: KfSocket = incoming_stream.into(); + + let (sink,mut stream) = socket.split(); + let id: u16 = 0; + sink_pool.insert_sink(id,sink); + let mut api_stream = stream.api_stream::(); + + let msg = api_stream.next().await.expect("msg").expect("unwrap"); + debug!("msg received: {:#?}",msg); + match msg { + TestApiRequest::EchoRequest(echo_request) => { + let resp = echo_request.new_response(EchoResponse::new("yes".to_owned())); + let mut sink = sink_pool.get_sink(&id).expect("sink"); + sink.send_response(&resp,0).await.expect("send succeed"); + + // can't detect sink failures + let resp2 = echo_request.new_response(EchoResponse::new("yes2".to_owned())); + sink.send_response(&resp2,0).await.expect("error occured"); + + // can detect api stream end + match api_stream.next().await { + Some(_) => assert!(false,"should not received"), + None => assert!(true,"none") + } + + }, + _ => assert!(false,"no echo request") + } + + debug!("server: finish sending out"); + + Ok(()) + } + + async fn setup_client(addr: SocketAddr) -> Result<(),KfSocketError> { + + sleep(Duration::from_millis(20)).await; + debug!("client: trying to connect"); + let mut socket = KfSocket::connect(&addr).await?; + info!("client: connect to test server and waiting..."); + + let request = RequestMessage::new_request(EchoRequest::new("hello".to_owned())); + socket.send(&request).await.expect("send success"); + drop(socket); + + Ok(()) + } + + + #[test_async] + async fn test_sink_pool() -> Result<(),KfSocketError> { + + let addr = "127.0.0.1:5999".parse::().expect("parse"); + + let _r = join(setup_client(addr),test_server(addr.clone())).await; + + Ok(()) + } + +} + + + diff --git a/kf-socket/src/socket.rs b/kf-socket/src/socket.rs new file mode 100644 index 0000000000..99065431be --- /dev/null +++ b/kf-socket/src/socket.rs @@ -0,0 +1,139 @@ + +use std::net::SocketAddr; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +#[cfg(unix)] +use std::os::unix::io::AsRawFd; + +use log::trace; +use futures::Future; +use pin_utils::unsafe_pinned; + +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; + + +use future_aio::net::AsyncTcpStream; + +use crate::KfSink; +use crate::KfStream; + +use super::KfSocketError; + +/// KfSocket is high level socket that can send and receive kf-protocol +#[derive(Debug)] +pub struct KfSocket { + sink: KfSink, + stream: KfStream, + stale: bool, +} + +unsafe impl Sync for KfSocket {} + +impl KfSocket { + pub fn new(sink: KfSink, stream: KfStream) -> Self { + KfSocket { + sink, + stream, + stale: false, + } + } + + /// create socket from establishing connection to server + pub async fn connect(addr: &SocketAddr) -> Result { + trace!("trying to connect to server at: {}", addr); + let tcp_stream = AsyncTcpStream::connect(addr).await?; + Ok(tcp_stream.into()) + } + + + pub fn fusable_connect(addr: &SocketAddr) -> impl Future> + '_ + { + SocketConnectFusableFuture{ inner: Self::connect(&addr) } + } + + + pub fn split(self) -> (KfSink, KfStream) { + (self.sink, self.stream) + } + + /// mark as stale + pub fn set_stale(&mut self) { + self.stale = true; + } + + pub fn is_stale(&self) -> bool { + self.stale + } + + pub fn get_mut_sink(&mut self) -> &mut KfSink { + &mut self.sink + } + + pub fn get_mut_stream(&mut self) -> &mut KfStream { + &mut self.stream + } + + /// as client, send request and wait for reply from server + pub async fn send<'a,R>( + &'a mut self, + req_msg: &'a RequestMessage, + ) -> Result, KfSocketError> + where + R: Request + { + self.sink.send_request(&req_msg).await?; + + self.stream.next_response(&req_msg).await + } +} + +impl From for KfSocket { + fn from(tcp_stream: AsyncTcpStream) -> Self { + let fd = tcp_stream.as_raw_fd(); + let (sink, stream) = tcp_stream.split().as_tuple(); + KfSocket { + sink: KfSink::new(sink, fd), + stream: stream.into(), + stale: false, + } + } +} + +impl From<(KfSink,KfStream)> for KfSocket { + fn from(pair: (KfSink,KfStream)) -> Self { + let (sink,stream) = pair; + KfSocket::new(sink,stream) + } +} + + + + + +/// connect future which can be fused. this requires it need to be unpin. +/// in the future, this could be removed +struct SocketConnectFusableFuture { + inner: F +} + +impl Unpin for SocketConnectFusableFuture{} + +impl SocketConnectFusableFuture { + unsafe_pinned!(inner: F); +} + +impl Future for SocketConnectFusableFuture where F: Future> { + + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + + self.inner().poll(cx) + } + +} + diff --git a/kf-socket/src/stream.rs b/kf-socket/src/stream.rs new file mode 100644 index 0000000000..163ac24b5a --- /dev/null +++ b/kf-socket/src/stream.rs @@ -0,0 +1,128 @@ + +use std::io::Cursor; +use std::fmt::Debug; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::trace; +use log::error; +use futures::Stream; +use futures::stream::StreamExt; + +use kf_protocol::api::Request; +use kf_protocol::transport::KfCodec; +use kf_protocol::Decoder as KfDecoder; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::api::KfRequestMessage; +use future_aio::net::TcpStreamSplitStream; + +use crate::KfSocketError; + +#[derive(Debug)] +pub struct KfStream(TcpStreamSplitStream); + +impl KfStream { + + pub fn get_mut_tcp_stream(&mut self) -> &mut TcpStreamSplitStream { + &mut self.0 + } + + /// as server, get stream of request coming from client + pub fn request_stream(&mut self) -> impl Stream,KfSocketError>> + '_ + where + RequestMessage: KfDecoder + Debug + { + (&mut self.0).map( | req_bytes_r | { + + match req_bytes_r { + Ok(req_bytes) => { + let mut src = Cursor::new(&req_bytes); + let msg: RequestMessage = RequestMessage::decode_from(&mut src,0)?; + Ok(msg) + }, + Err(err) => Err(err.into()) + } + + }) + } + + /// as server, get next request from client + pub async fn next_request_item(&mut self) -> Option,KfSocketError>> + where + RequestMessage: KfDecoder + Debug + { + let mut stream = self.request_stream(); + stream.next().await + } + + /// as client, get next response from server + pub async fn next_response<'a,R>(&'a mut self, req_msg: &'a RequestMessage) -> Result,KfSocketError> + where + R: Request + { + + trace!("waiting for response"); + let next = self.0.next().await; + if let Some(result) = next { + match result { + Ok(req_bytes) => { + let response = req_msg.decode_response(&mut Cursor::new(&req_bytes),req_msg.header.api_version())?; + trace!("receive response: {:#?}", &response); + Ok(response) + } + Err(err) => { + error!("error receiving response: {:?}", err); + return Err(KfSocketError::IoError(err)); + } + } + } else { + error!("no more response. server has terminated connection"); + Err(KfSocketError::IoError(IoError::new( + ErrorKind::UnexpectedEof, + "server has terminated connection", + ))) + } + + } + + + + /// as server, get api request (PublicRequest, InternalRequest, etc) + pub fn api_stream(&mut self) -> impl Stream> + '_ + where + R: KfRequestMessage, + A: KfDecoder+ Debug + { + (&mut self.0).map(|req_bytes_r| { + + match req_bytes_r { + Ok(req_bytes) => { + trace!("received bytes from client len: {}",req_bytes.len()); + let mut src = Cursor::new(&req_bytes); + R::decode_from(&mut src).map_err(|err|err.into()) + }, + Err(err) => Err(err.into()) + } + + }) + } + + pub async fn next_api_item(&mut self) -> Option> + where + R: KfRequestMessage, + A: KfDecoder + Debug + { + let mut stream = self.api_stream(); + stream.next().await + } + + + +} + +impl From> for KfStream { + fn from(stream: TcpStreamSplitStream) -> Self { + KfStream(stream) + } +} \ No newline at end of file diff --git a/kf-socket/src/test_request.rs b/kf-socket/src/test_request.rs new file mode 100644 index 0000000000..2550076d36 --- /dev/null +++ b/kf-socket/src/test_request.rs @@ -0,0 +1,98 @@ +use std::io::Error as IoError; +use std::convert::TryInto; + + +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::RequestHeader; +use kf_protocol::api::api_decode; +use kf_protocol::bytes::Buf; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::Request; + + + + +#[derive(Encode,Decode,PartialEq, Debug, Clone, Copy)] +#[repr(u16)] +pub enum TestKafkaApiEnum { + Echo = 1000 +} + +impl Default for TestKafkaApiEnum { + fn default() -> TestKafkaApiEnum { + TestKafkaApiEnum::Echo + } +} + + + + +#[derive(Decode, Encode, Debug, Default)] +pub struct EchoRequest { + pub msg: String, +} + + +impl EchoRequest { + pub fn new(msg: String) -> Self { + Self { + msg + } + } +} + +impl Request for EchoRequest{ + + const API_KEY: u16 = TestKafkaApiEnum::Echo as u16; + type Response = EchoResponse; + +} + + +#[derive(Encode,Debug)] +pub enum TestApiRequest { + EchoRequest(RequestMessage), + Noop(bool) +} + +// Added to satisfy Encode/Decode traits +impl Default for TestApiRequest { + fn default() -> TestApiRequest { + TestApiRequest::Noop(true) + } +} + + +impl KfRequestMessage for TestApiRequest { + + type ApiKey = TestKafkaApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf + { + match header.api_key().try_into()? { + TestKafkaApiEnum::Echo => api_decode!(TestApiRequest,EchoRequest,src,header) + } + } +} + + + +#[derive(Decode, Encode, Default, Debug)] +pub struct EchoResponse { + pub msg: String +} + + +impl EchoResponse { + pub fn new(msg: String) -> Self { + Self { + msg + } + } +} \ No newline at end of file diff --git a/metadata/Cargo.toml b/metadata/Cargo.toml new file mode 100644 index 0000000000..950aac84e3 --- /dev/null +++ b/metadata/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "metadata" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[dependencies] +log = "0.4.6" +kf-protocol = { path = "../kf-protocol"} +k8-metadata = { path = "../k8-metadata"} +types = { path = "../types"} \ No newline at end of file diff --git a/metadata/src/auth_token/mod.rs b/metadata/src/auth_token/mod.rs new file mode 100644 index 0000000000..95b0e3bce1 --- /dev/null +++ b/metadata/src/auth_token/mod.rs @@ -0,0 +1,8 @@ +mod spec; +mod status; + +pub use self::spec::AuthTokenSpec; +pub use self::spec::TokenType; + +pub use self::status::AuthTokenStatus; +pub use self::status::TokenResolution; diff --git a/metadata/src/auth_token/spec.rs b/metadata/src/auth_token/spec.rs new file mode 100644 index 0000000000..50c9a23edb --- /dev/null +++ b/metadata/src/auth_token/spec.rs @@ -0,0 +1,78 @@ +//! +//! # Auth Token Spec +//! +//! Auth Token Spec metadata information cached locally. +//! +use types::SpuId; + +use kf_protocol::derive::{Decode, Encode}; + +use k8_metadata::auth_token::AuthTokenSpec as K8AuthTokenSpec; +use k8_metadata::auth_token::TokenType as K8TokenType; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq, Default)] +pub struct AuthTokenSpec { + pub token_type: TokenType, + pub min_spu: SpuId, + pub max_spu: SpuId, +} + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum TokenType { + Any, + Custom, + Managed, +} + +// ----------------------------------- +// Encode - from K8 AuthTokenSpec +// ----------------------------------- + +impl From for AuthTokenSpec { + fn from(k8_spec: K8AuthTokenSpec) -> Self { + AuthTokenSpec { + token_type: k8_spec.token_type.into(), + min_spu: k8_spec.min_spu, + max_spu: k8_spec.max_spu, + } + } +} + + +impl From for TokenType { + fn from(k8_token_type: K8TokenType) -> Self { + match k8_token_type { + K8TokenType::Any => TokenType::Any, + K8TokenType::Custom => TokenType::Custom, + K8TokenType::Managed => TokenType::Managed, + } + } +} + +// ----------------------------------- +// Implementation - AuthTokenSpec +// ----------------------------------- + +impl AuthTokenSpec { + pub fn token_type_label(token_type: &TokenType) -> &'static str { + match token_type { + TokenType::Any => "any", + TokenType::Custom => "custom", + TokenType::Managed => "managed", + } + } +} + +// ----------------------------------- +// Implementation - TokenType +// ----------------------------------- + +impl ::std::default::Default for TokenType { + fn default() -> Self { + TokenType::Any + } +} diff --git a/metadata/src/auth_token/status.rs b/metadata/src/auth_token/status.rs new file mode 100644 index 0000000000..54d953fe07 --- /dev/null +++ b/metadata/src/auth_token/status.rs @@ -0,0 +1,124 @@ +//! +//! # Auth Token Status +//! +//! Auth Token Status metadata information cached locally. +//! +use kf_protocol::derive::{Decode, Encode}; + +use k8_metadata::auth_token::AuthTokenStatus as K8AuthTokenStatus; +use k8_metadata::auth_token::TokenResolution as K8TokenResolution; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct AuthTokenStatus { + pub resolution: TokenResolution, + pub reason: String, +} + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum TokenResolution { + Ok, // operational + Init, // initializing + Invalid, // inactive +} + +// ----------------------------------- +// Encode - from K8 AuthTokenStatus +// ----------------------------------- + +impl From for AuthTokenStatus { + fn from(k8_status: K8AuthTokenStatus) -> Self { + AuthTokenStatus { + resolution: k8_status.resolution.into(), + reason: k8_status.reason.clone(), + } + } +} + +impl From for TokenResolution { + fn from(k8_token_resolution: K8TokenResolution) -> Self { + match k8_token_resolution { + K8TokenResolution::Ok => TokenResolution::Ok, + K8TokenResolution::Init => TokenResolution::Init, + K8TokenResolution::Invalid => TokenResolution::Invalid, + } + } +} + +impl Into for AuthTokenStatus { + fn into(self) -> K8AuthTokenStatus { + K8AuthTokenStatus { + resolution: self.resolution.into(), + reason: self.reason.clone(), + } + } +} + +impl Into for TokenResolution { + fn into(self) -> K8TokenResolution { + match self { + TokenResolution::Ok => K8TokenResolution::Ok, + TokenResolution::Init => K8TokenResolution::Init, + TokenResolution::Invalid => K8TokenResolution::Invalid, + } + } +} + +// ----------------------------------- +// Defaults +// ----------------------------------- + +impl ::std::default::Default for AuthTokenStatus { + fn default() -> Self { + AuthTokenStatus { + resolution: TokenResolution::default(), + reason: "".to_owned(), + } + } +} + + +impl ::std::default::Default for TokenResolution { + fn default() -> Self { + TokenResolution::Init + } +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl AuthTokenStatus { + pub fn resolution_label(resolution: &TokenResolution) -> &'static str { + match resolution { + TokenResolution::Ok => "ok", + TokenResolution::Init => "initializing", + TokenResolution::Invalid => "invalid", + } + } + + // ----------------------------------- + // Resolution + // ----------------------------------- + + pub fn is_resolution_ok(&self) -> bool { + self.resolution == TokenResolution::Ok + } + + pub fn is_resolution_init(&self) -> bool { + self.resolution == TokenResolution::Init + } + + pub fn next_resolution_ok(&mut self) { + self.resolution = TokenResolution::Ok; + self.reason = "".to_owned(); + } + + pub fn next_resolution_invalid(&mut self, reason: String) { + self.resolution = TokenResolution::Invalid; + self.reason = reason; + } +} diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs new file mode 100644 index 0000000000..38f85eb69d --- /dev/null +++ b/metadata/src/lib.rs @@ -0,0 +1,6 @@ +#![feature(drain_filter)] + +pub mod auth_token; +pub mod spu; +pub mod topic; +pub mod partition; diff --git a/metadata/src/partition/mod.rs b/metadata/src/partition/mod.rs new file mode 100644 index 0000000000..979685e6c4 --- /dev/null +++ b/metadata/src/partition/mod.rs @@ -0,0 +1,12 @@ +mod spec; +mod status; +mod replica_key; +mod policy; + +pub use self::spec::PartitionSpec; +pub use self::status::PartitionStatus; +pub use self::status::ReplicaStatus; +pub use self::status::PartitionResolution; +pub use self::replica_key::ReplicaKey; +pub use self::policy::ElectionPolicy; +pub use self::policy::ElectionScoring; diff --git a/metadata/src/partition/policy.rs b/metadata/src/partition/policy.rs new file mode 100644 index 0000000000..cac288986e --- /dev/null +++ b/metadata/src/partition/policy.rs @@ -0,0 +1,21 @@ +use super::ReplicaStatus; + +pub enum ElectionScoring { + NotSuitable, + Score(u16) // 0 is perfect +} + +impl ElectionScoring { + pub fn is_suitable(&self) -> bool { + match self { + Self::NotSuitable => false, + Self::Score(_) => true + } + } +} + +pub trait ElectionPolicy { + + /// compute potential leade score against leader + fn potential_leader_score(&self,replica_status: &ReplicaStatus,leader: &ReplicaStatus) -> ElectionScoring; +} diff --git a/metadata/src/partition/replica_key.rs b/metadata/src/partition/replica_key.rs new file mode 100644 index 0000000000..d5152f5918 --- /dev/null +++ b/metadata/src/partition/replica_key.rs @@ -0,0 +1,54 @@ +//! +//! # ReplicKey +//! +//! Replica Key is a composition of of Topic/Partition +//! +use std::convert::TryFrom; + +use kf_protocol::derive::{Decode, Encode}; + +use types::PartitionError; +use types::partition::decompose_partition_name; + +#[derive(Hash, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Encode, Decode)] +pub struct ReplicaKey { + pub topic: String, + pub partition: i32, +} + +impl ReplicaKey { + pub fn new(topic: S, partition: P) -> Self + where + S: Into, + P: Into, + { + ReplicaKey { + topic: topic.into(), + partition: partition.into(), + } + } +} + +impl std::fmt::Display for ReplicaKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}-{}", self.topic, self.partition) + } +} + +impl From<(S, i32)> for ReplicaKey +where + S: Into, +{ + fn from(key: (S, i32)) -> ReplicaKey { + ReplicaKey::new(key.0.into(), key.1) + } +} + +impl TryFrom for ReplicaKey { + type Error = PartitionError; + + fn try_from(value: String) -> Result { + let (topic, partition) = decompose_partition_name(&value)?; + Ok(ReplicaKey::new(topic, partition)) + } +} diff --git a/metadata/src/partition/spec.rs b/metadata/src/partition/spec.rs new file mode 100644 index 0000000000..f3f1a4421d --- /dev/null +++ b/metadata/src/partition/spec.rs @@ -0,0 +1,82 @@ +//! +//! # Partition Spec +//! +//! Partition Spec metadata information cached locally. +//! +use types::SpuId; +use kf_protocol::derive::{Decode, Encode}; +use k8_metadata::partition::PartitionSpec as K8PartitionSpec; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct PartitionSpec { + pub leader: SpuId, + pub replicas: Vec, +} + +// ----------------------------------- +// Encode - from K8 PartitionSpec +// ----------------------------------- + +impl From for PartitionSpec { + fn from(kv_spec: K8PartitionSpec) -> Self { + PartitionSpec { + leader: kv_spec.leader, + replicas: kv_spec.replicas, + } + } +} + +impl From for K8PartitionSpec { + fn from(spec: PartitionSpec) -> K8PartitionSpec { + K8PartitionSpec { + leader: spec.leader, + replicas: spec.replicas + } + } +} + +// ----------------------------------- +// Default +// ----------------------------------- + +impl std::default::Default for PartitionSpec { + fn default() -> Self { + PartitionSpec { + leader: 0, + replicas: Vec::default(), + } + } +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl PartitionSpec { + pub fn new(leader: SpuId,replicas: Vec) -> Self { + Self { + leader, + replicas + } + } + + pub fn has_spu(&self,spu: &SpuId) -> bool { + self.replicas.contains(spu) + } + +} + +impl From> for PartitionSpec { + fn from(replicas: Vec) -> Self { + if replicas.len() > 0 { + Self::new( replicas[0].clone(),replicas) + } else { + Self::new( 0, replicas) + } + + } +} \ No newline at end of file diff --git a/metadata/src/partition/status.rs b/metadata/src/partition/status.rs new file mode 100644 index 0000000000..8fbaebf7d1 --- /dev/null +++ b/metadata/src/partition/status.rs @@ -0,0 +1,562 @@ +//! +//! # Partition Status +//! +//! Partition Status metadata information cached locally. +//! +use std::collections::HashSet; +use std::fmt; +use std::slice::Iter; + +use kf_protocol::derive::{Decode, Encode}; +use kf_protocol::api::Offset; +use types::SpuId; + +use k8_metadata::partition::PartitionStatus as K8PartitionStatus; +use k8_metadata::partition::ReplicaStatus as K8ReplicaStatus; +use k8_metadata::partition::PartitionResolution as K8PartitionResolution; + +use super::ElectionPolicy; +use super::ElectionScoring; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Default, Debug, Clone, PartialEq)] +pub struct PartitionStatus { + pub resolution: PartitionResolution, + pub leader: ReplicaStatus, + lsr: u32, + replicas: Vec +} + + + +impl fmt::Display for PartitionStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{:#?} Leader: {} [",self.resolution,self.leader)?; + for replica in &self.replicas { + write!(f,"{},",replica)?; + } + write!(f,"]") + + } +} + + +// ----------------------------------- +// Implementation +// ----------------------------------- + +impl PartitionStatus { + + pub fn leader(leader: L) -> Self where L: Into{ + Self::new(leader.into(),vec![]) + } + + pub fn new(leader: L,replicas: Vec) -> Self where L: Into { + Self { + resolution: PartitionResolution::default(), + leader: leader.into(), + replicas, + ..Default::default() + } + } + + pub fn new2(leader: L,replicas: Vec,resolution: PartitionResolution) -> Self where L: Into { + Self { + resolution, + leader: leader.into(), + replicas, + ..Default::default() + } + } + + pub fn is_online(&self) -> bool { + self.resolution == PartitionResolution::Online + } + + pub fn is_offline(&self) -> bool { + self.resolution != PartitionResolution::Online + } + + pub fn lsr(&self) -> u32 { + self.lsr + } + + + pub fn replica_iter(&self) -> Iter { + self.replicas.iter() + } + + pub fn live_replicas(&self) -> Vec { + self.replicas + .iter() + .map(|lrs| lrs.spu) + .collect() + } + + pub fn offline_replicas(&self) -> Vec { + vec![] + } + + + + pub fn has_live_replicas(&self) -> bool { + self.replicas.len() > 0 + } + + + /// Fnd best candidate from online replicas + /// If there are multiple matches, find with best score (lowest lag) + pub fn candidate_leader

(&self,online: &HashSet,policy: &P) -> Option + where P: ElectionPolicy + { + + let mut candiate_spu = None; + let mut best_score = 0; + + for candidate in &self.replicas { + + // only do for live replicas + if online.contains(&candidate.spu) { + match policy.potential_leader_score(&candidate,&self.leader) { + ElectionScoring::Score(score) => { + if candiate_spu.is_some() { + if score < best_score { + best_score = score; + candiate_spu = Some(candidate.spu); + } + } else { + best_score = score; + candiate_spu = Some(candidate.spu); + } + }, + _ => {} + } + } + + } + candiate_spu + } + + + + + /// merge status from spu + /// ignore changes from spu = -1 or offsets = -1 + pub fn merge(&mut self, other: Self) { + self.resolution = other.resolution; + if let Some(old) = self.leader.merge(&other.leader) { + self.replicas.push(old); // move old leader to replicas + } + + for status in other.replicas { + if let Some(old_status) = find_status(&mut self.replicas,status.spu) { + old_status.merge(&status); + } else { + self.replicas.push(status); + } + } + // delete any old status for leader in the follower + let spu = self.leader.spu; + self.replicas.drain_filter(move |s| s.spu == spu); + self.update_lrs(); + + } + + /// recalculate lrs which is count of follower whose leo is same as leader + fn update_lrs(&mut self) { + let leader_leo = self.leader.leo; + self.lsr = self.replicas.iter().filter(|re| re.leo != -1 && leader_leo == re.leo).count() as u32; + } +} + + +/// find status matching it, +fn find_status(status: &mut Vec, spu: SpuId) -> Option<&'_ mut ReplicaStatus>{ + status.iter_mut().find(|status| status.spu == spu) +} + + + +// ----------------------------------- +// Encode - from KV Partition Status +// ----------------------------------- + +impl From for PartitionStatus { + fn from(kv_status: K8PartitionStatus) -> Self { + Self { + resolution: kv_status.resolution.into(), + leader: kv_status.leader.into(), + replicas: kv_status.replicas.into_iter().map(|lrs| lrs.into()).collect(), + lsr: kv_status.lsr + } + } +} + +impl From for K8PartitionStatus { + fn from(status: PartitionStatus) -> K8PartitionStatus { + K8PartitionStatus { + resolution: status.resolution.into(), + leader: status.leader.into(), + replicas: status.replicas.into_iter().map(|lrs| lrs.into()).collect(), + lsr: status.lsr.into() + } + } +} + + + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum PartitionResolution { + Offline, // No leader available for serving partition + Online, // Partition is running normally, status contains replica info + LeaderOffline, // Election has failed, no suitable leader has been founded + ElectionLeaderFound // New leader has been selected +} + +impl Default for PartitionResolution { + fn default() -> Self { + PartitionResolution::Offline + } +} + + +impl From for PartitionResolution { + fn from(resolution: K8PartitionResolution) -> Self { + match resolution { + K8PartitionResolution::Offline => Self::Offline, + K8PartitionResolution::Online => Self::Online, + K8PartitionResolution::ElectionLeaderFound => Self::ElectionLeaderFound, + K8PartitionResolution::LeaderOffline => Self::LeaderOffline + } + } +} + +impl From for K8PartitionResolution { + fn from(resolution: PartitionResolution) -> Self { + match resolution { + PartitionResolution::Offline => Self::Offline, + PartitionResolution::Online => Self::Online, + PartitionResolution::LeaderOffline => Self::LeaderOffline, + PartitionResolution::ElectionLeaderFound => Self::ElectionLeaderFound + } + } +} + + + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct ReplicaStatus { + pub spu: i32, + pub hw: i64, + pub leo: i64 +} + + +impl fmt::Display for ReplicaStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"spu:{} hw:{} leo: {}",self.spu,self.hw,self.leo) + } +} + + + +impl Default for ReplicaStatus { + fn default() -> Self { + ReplicaStatus { + spu: -1, + hw: -1, + leo: -1 + } + } +} + + + + +impl ReplicaStatus { + pub fn new(spu: SpuId,hw: Offset,leo: Offset) -> Self { + Self { + spu, + hw, + leo + } + } + + /// compute lag score respect to leader + pub fn leader_lag(&self,leader_status: &Self) -> i64 { + leader_status.leo - self.leo + } + + + + pub fn high_watermark_lag(&self,leader_status: &Self) -> i64 { + leader_status.hw - self.hw + } + + + /// merge status + pub fn merge(&mut self,source: &Self) -> Option { + + // if source spu is -1, we ignore it + if source.spu == -1 { + return None; + } + + // if spu is same, we override otherwise, we copy but return old + if self.spu == -1 || self.spu == source.spu { + + self.spu = source.spu; + + if source.leo != -1 { + self.leo = source.leo; + } + + if source.hw != -1 { + self.hw = source.hw; + } + None + } else { + + let old = Self::new(self.spu,self.hw,self.leo); + + self.spu = source.spu; + + self.leo = source.leo; + self.hw = source.hw; + + Some(old) + + } + + + + + } +} + +impl From<(SpuId,Offset,Offset)> for ReplicaStatus { + fn from(val: (SpuId,Offset,Offset)) -> Self { + let (id,high_watermark,end_offset) = val; + Self::new(id, high_watermark, end_offset) + } +} + + +impl From for ReplicaStatus { + fn from(status: K8ReplicaStatus) -> Self { + Self { + spu: status.spu, + hw: status.hw, + leo: status.leo + } + } +} + +impl From for K8ReplicaStatus { + fn from(status: ReplicaStatus) -> Self { + Self { + spu: status.spu, + hw: status.hw, + leo: status.leo + } + } +} + + + +#[cfg(test)] +mod test { + + use std::collections::HashSet; + + use super::PartitionStatus; + use super::ReplicaStatus; + use super::ElectionPolicy; + use super::ElectionScoring; + + + struct SimplePolicy {} + + impl ElectionPolicy for SimplePolicy { + fn potential_leader_score(&self,replica_status: &ReplicaStatus,leader: &ReplicaStatus) -> ElectionScoring { + let lag = leader.leo - replica_status.leo; + if lag < 4 { + ElectionScoring::Score(lag as u16) + } else { + ElectionScoring::NotSuitable + } + } + } + + + + #[test] + fn test_candidate_spu_no_candidate() { + + let status = PartitionStatus::leader((5000,0,0)); + let online_spu = HashSet::new(); + let policy = SimplePolicy{}; + + assert!(status.candidate_leader(&online_spu,&policy).is_none()); + } + + + #[test] + fn test_candidate_spu_best() { + + let status = PartitionStatus::new( + (5000,100,110), + vec![ + (5001,100,110).into(), // caught up with leader (best) + (5002,100,105).into(), // need 5 offset to caught with leaser + ]); + let mut online_spu = HashSet::new(); + online_spu.insert(5001); + online_spu.insert(5002); + let policy = SimplePolicy{}; + + assert_eq!(status.candidate_leader(&online_spu,&policy),Some(5001)); // 5001 has least lag + } + + /// we only care about which has least lag of end offset + /// even if follower didn't catch up HW + #[test] + fn test_candidate_spu_best_conflict() { + + let status = PartitionStatus::new( + (5000,100,110), + vec![ + (5001,95,110).into(), // caught up with leader (best) + (5002,100,105).into(), // need 5 offset to caught with leaser + + ]); + + let mut online_spu = HashSet::new(); + online_spu.insert(5000); + online_spu.insert(5001); + online_spu.insert(5002); + let policy = SimplePolicy{}; + + assert_eq!(status.candidate_leader(&online_spu,&policy),Some(5001)); // 5001 has least lag + } + + /// check when we don't have any online + #[test] + fn test_candidate_spu_no_online() { + + let status = PartitionStatus::new( + (5000,100,110), + vec![ + (5001,95,110).into(), // caught up with leader (best) + (5002,100,105).into(), // need 5 offset to caught with leaser + + ]); + + let online_spu = HashSet::new(); + let policy = SimplePolicy{}; + + assert!(status.candidate_leader(&online_spu,&policy).is_none()); + } + + + #[test] + fn test_merge_initial() { + + let mut target = PartitionStatus::default(); + let source = PartitionStatus::leader((5000,10,11)); + target.merge(source); + assert_eq!(target.leader,(5000,10,11).into()); + assert_eq!(target.replicas.len(),0); + + let source = PartitionStatus::new( + (5000,10,11), + vec![ + (5001,9,11).into() + ]); + target.merge(source); + + assert_eq!(target.replicas.len(),1); + assert_eq!(target.replicas[0],(5001,9,11).into()); + + } + + + #[test] + fn test_merge_lrs_full() { + + let mut target = PartitionStatus::new( + (5000,100,110), + vec![ + (5001,95,110).into(), + (5002,100,105).into(), + ]); + + let source = PartitionStatus::new( + (5000,120,120), + vec![ + (5002,110,120).into(), + (5001,-1,-1).into() + ]); + + target.merge(source); + + assert_eq!(target.leader,(5000,120,120).into()); + assert_eq!(target.replicas[0],(5001,95,110).into()); + assert_eq!(target.replicas[1],(5002,110,120).into()); + + } + + + #[test] + fn test_merge_lrs_different_leader() { + + let mut target = PartitionStatus::new( + (5000,100,110), + vec![ + (5001,95,110).into(), + ]); + + let source = PartitionStatus::new( + (5001,120,120), + vec![ + (5000,-1,-1).into() + ]); + + target.merge(source); + + assert_eq!(target.leader,(5001,120,120).into()); + assert_eq!(target.replicas.len(),1); + assert_eq!(target.replicas[0],(5000,100,110).into()); + } + + + #[test] + fn test_merge_lrs_case_2() { + + let mut target = PartitionStatus::new( + (5002,0,0), + vec![ + (5002,0,0).into(), + (5001,0,0).into(), + ]); + + let source = PartitionStatus::new( + (5002,0,0), + vec![ + (5001,-1,-1).into(), + ]); + + target.merge(source); + + assert_eq!(target.leader,(5002,0,0).into()); + assert_eq!(target.replicas.len(),1); + assert_eq!(target.replicas[0],(5001,0,0).into()); + + } + + +} + diff --git a/metadata/src/spu/mod.rs b/metadata/src/spu/mod.rs new file mode 100644 index 0000000000..388abef97b --- /dev/null +++ b/metadata/src/spu/mod.rs @@ -0,0 +1,11 @@ +mod spec; +mod status; + +pub use self::spec::SpuSpec; +pub use self::spec::SpuType; +pub use self::spec::Endpoint; +pub use self::spec::EncryptionEnum; + +pub use self::status::SpuStatus; +pub use self::status::SpuResolution; + diff --git a/metadata/src/spu/spec.rs b/metadata/src/spu/spec.rs new file mode 100644 index 0000000000..2bbe5bce8a --- /dev/null +++ b/metadata/src/spu/spec.rs @@ -0,0 +1,268 @@ +//! +//! # Spu Spec +//! +//! Spu Spec metadata information cached locally. +//! +use std::convert::TryFrom; +use std::io::Error as IoError; +use std::fmt; + +use types::socket_helpers::EndPoint as SocketEndPoint; +use types::socket_helpers::EndPointEncryption; +use types::defaults::{SPU_PRIVATE_HOSTNAME, SPU_PRIVATE_PORT}; +use types::defaults::{SPU_PUBLIC_HOSTNAME, SPU_PUBLIC_PORT}; +use types::SpuId; +use types::socket_helpers::ServerAddress; + +use kf_protocol::derive::{Decode, Encode}; + +use k8_metadata::spu::SpuSpec as K8SpuSpec; +use k8_metadata::spu::SpuType as K8SpuType; +use k8_metadata::spu::EncryptionEnum as K8EncryptionEnum; +use k8_metadata::spu::Endpoint as K8Endpoint; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct SpuSpec { + pub id: SpuId, + pub spu_type: SpuType, + pub public_endpoint: Endpoint, + pub private_endpoint: Endpoint, + + pub rack: Option, +} + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct Endpoint { + pub port: u16, + pub host: String, + pub encryption: EncryptionEnum, +} + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum EncryptionEnum { + PLAINTEXT, + SSL, +} + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum SpuType { + Managed, + Custom, +} + +impl Default for SpuType { + fn default() -> Self { + SpuType::Managed + } +} + +impl From for SpuType { + fn from(kv_spu_type: K8SpuType) -> Self { + match kv_spu_type { + K8SpuType::Managed => SpuType::Managed, + K8SpuType::Custom => SpuType::Custom, + } + } +} + +impl Into for SpuType { + fn into(self) -> K8SpuType { + match self { + SpuType::Managed => K8SpuType::Managed, + SpuType::Custom => K8SpuType::Custom, + } + } +} + +// prob better way to do this +impl From<&SpuSpec> for SpuSpec { + fn from(spec: &SpuSpec) -> Self { + spec.clone() + } +} + +impl From for SpuSpec { + fn from(spec: SpuId) -> Self { + Self::new(spec) + } +} + +impl From for SpuSpec { + fn from(kv_spec: K8SpuSpec) -> Self { + // convert spu-type, defaults to Custom for none + let spu_type = if let Some(kv_spu_type) = kv_spec.spu_type { + kv_spu_type.into() + } else { + SpuType::Custom + }; + + // spu spec + SpuSpec { + id: kv_spec.spu_id, + spu_type: spu_type, + public_endpoint: Endpoint::new(&kv_spec.public_endpoint), + private_endpoint: Endpoint::new(&kv_spec.private_endpoint), + rack: kv_spec.rack.clone(), + } + } +} + +impl From for K8SpuSpec { + fn from(spec: SpuSpec) -> Self { + K8SpuSpec { + spu_id: spec.id, + spu_type: Some(spec.spu_type.into()), + public_endpoint: spec.public_endpoint.into(), + private_endpoint: spec.private_endpoint.into(), + rack: spec.rack, + } + } +} + +impl Default for SpuSpec { + fn default() -> Self { + SpuSpec { + id: -1, + spu_type: SpuType::default(), + public_endpoint: Endpoint { + port: SPU_PUBLIC_PORT, + host: SPU_PUBLIC_HOSTNAME.to_string(), + encryption: EncryptionEnum::default(), + }, + private_endpoint: Endpoint { + port: SPU_PRIVATE_PORT, + host: SPU_PRIVATE_HOSTNAME.to_string(), + encryption: EncryptionEnum::default(), + }, + rack: None, + } + } +} + +impl SpuSpec { + /// Given an Spu id generate a new SpuSpec + pub fn new(id: SpuId) -> Self { + let mut spec = Self::default(); + spec.id = id; + spec + } + + pub fn set_custom(mut self) -> Self { + self.spu_type = SpuType::Custom; + self + } + + /// Return type label in String format + pub fn type_label(&self) -> String { + match self.spu_type { + SpuType::Managed => "managed".to_owned(), + SpuType::Custom => "custom".to_owned(), + } + } + + /// Return custom type: true for custom, false otherwise + pub fn is_custom(&self) -> bool { + match self.spu_type { + SpuType::Managed => false, + SpuType::Custom => true, + } + } + + pub fn private_server_address(&self) -> ServerAddress { + let private_ep = &self.private_endpoint; + ServerAddress { + host: private_ep.host.clone(), + port: private_ep.port, + } + } +} + +// ----------------------------------- +// Implementation - Endpoint +// ----------------------------------- + +impl fmt::Display for Endpoint { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{}", self.host, self.port) + } +} + +impl TryFrom<&Endpoint> for SocketEndPoint { + type Error = IoError; + + fn try_from(endpoint: &Endpoint) -> Result { + types::socket_helpers::host_port_to_socket_addr(&endpoint.host, endpoint.port).map(|addr| { + SocketEndPoint { + addr, + encryption: EndPointEncryption::PLAINTEXT, + } + }) + } +} + +#[allow(dead_code)] +impl TryFrom<&Endpoint> for std::net::SocketAddr { + type Error = IoError; + + fn try_from(endpoint: &Endpoint) -> Result { + types::socket_helpers::host_port_to_socket_addr(&endpoint.host, endpoint.port) + } +} + +impl Into for Endpoint { + fn into(self) -> K8Endpoint { + K8Endpoint { + host: self.host.clone(), + port: self.port, + encryption: match self.encryption { + EncryptionEnum::PLAINTEXT => K8EncryptionEnum::PLAINTEXT, + EncryptionEnum::SSL => K8EncryptionEnum::SSL, + }, + } + } +} + +impl Default for Endpoint { + fn default() -> Self { + Endpoint { + host: "127.0.0.1".to_owned(), + port: 0, + encryption: EncryptionEnum::default(), + } + } +} + +impl Endpoint { + pub fn from_port_host(port: u16, host: &String) -> Self { + Endpoint { + port: port, + host: host.clone(), + encryption: EncryptionEnum::PLAINTEXT, + } + } + + pub fn new(ep: &K8Endpoint) -> Endpoint { + Endpoint { + port: ep.port, + host: ep.host.clone(), + encryption: match ep.encryption { + K8EncryptionEnum::PLAINTEXT => EncryptionEnum::PLAINTEXT, + K8EncryptionEnum::SSL => EncryptionEnum::SSL, + }, + } + } +} + +// ----------------------------------- +// Implementation - EncryptionEnum +// ----------------------------------- + +impl Default for EncryptionEnum { + fn default() -> Self { + EncryptionEnum::PLAINTEXT + } +} diff --git a/metadata/src/spu/status.rs b/metadata/src/spu/status.rs new file mode 100644 index 0000000000..bb192bbb2c --- /dev/null +++ b/metadata/src/spu/status.rs @@ -0,0 +1,117 @@ +//! +//! # Spu Status +//! +//! Spu Status metadata information cached locally. +//! +use std::fmt; + + +use kf_protocol::derive::{Decode, Encode}; + +use k8_metadata::spu::SpuStatus as K8SpuStatus; +use k8_metadata::spu::SpuStatusResolution as K8SpuStatusResolution; + + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct SpuStatus { + pub resolution: SpuResolution, +} + +impl fmt::Display for SpuStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{:#?}",self.resolution) + } +} + + +// ----------------------------------- +// Implementation - SpuStatus +// ----------------------------------- + +impl From for SpuStatus { + fn from(kv_status: K8SpuStatus) -> Self { + SpuStatus { + resolution: match kv_status.resolution { + K8SpuStatusResolution::Online => SpuResolution::Online, + K8SpuStatusResolution::Offline => SpuResolution::Offline, + K8SpuStatusResolution::Init => SpuResolution::Init + }, + } + } +} + +impl From for K8SpuStatus { + fn from(status: SpuStatus) -> K8SpuStatus { + K8SpuStatus { + resolution: (match status.resolution { + SpuResolution::Online => K8SpuStatusResolution::Online, + SpuResolution::Offline => K8SpuStatusResolution::Offline, + SpuResolution::Init => K8SpuStatusResolution::Init + }), + } + } +} + +impl Default for SpuStatus { + fn default() -> Self { + SpuStatus { + resolution: SpuResolution::default(), + } + } +} + +impl SpuStatus { + /// Resolution to string label + pub fn resolution_label(&self) -> &'static str { + match self.resolution { + SpuResolution::Online => "online", + SpuResolution::Offline => "offline", + SpuResolution::Init => "Init" + } + } + + /// Checks if resoultion is marked online. true for online, false otherwise + pub fn is_online(&self) -> bool { + self.resolution == SpuResolution::Online + } + + pub fn is_offline(&self) -> bool { + self.resolution == SpuResolution::Offline + } + + /// Set resolution to status to online + pub fn set_online(&mut self) { + self.resolution = SpuResolution::Online; + } + + /// Set resolution to status to offline + pub fn set_offline(&mut self) { + self.resolution = SpuResolution::Offline; + } +} + + + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum SpuResolution { + Online, + Offline, + Init +} + + + +// ----------------------------------- +// Implementation - SpuResolution +// ----------------------------------- + +impl Default for SpuResolution { + fn default() -> Self { + SpuResolution::Init + } +} + diff --git a/metadata/src/topic/mod.rs b/metadata/src/topic/mod.rs new file mode 100644 index 0000000000..b73aeee8ae --- /dev/null +++ b/metadata/src/topic/mod.rs @@ -0,0 +1,13 @@ +mod spec; +mod status; + +pub use self::spec::TopicSpec; +pub use self::spec::PartitionMap; +pub use self::spec::PartitionMaps; +pub use self::spec::TopicReplicaParam; + +pub use self::status::TopicStatus; +pub use self::status::TopicResolution; + + +pub const PENDING_REASON: &'static str = "waiting for live spus"; \ No newline at end of file diff --git a/metadata/src/topic/spec.rs b/metadata/src/topic/spec.rs new file mode 100644 index 0000000000..42533307f3 --- /dev/null +++ b/metadata/src/topic/spec.rs @@ -0,0 +1,845 @@ +//! +//! # Topic Spec +//! +//! Topic spec consists of 2 types of topics +//! * Assigned +//! * Computed +//! +use std::io::{Error, ErrorKind}; +use std::collections::BTreeMap; + +use log::trace; +use types::{ReplicaMap, SpuId}; +use types::{PartitionId, PartitionCount, ReplicationFactor, IgnoreRackAssignment}; + +use kf_protocol::Version; +use kf_protocol::bytes::{Buf, BufMut}; +use kf_protocol::derive::{Decode, Encode}; +use kf_protocol::{Decoder, Encoder}; +use k8_metadata::topic::TopicSpec as K8TopicSpec; +use k8_metadata::topic::Partition as K8Partition; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug,Clone,Default,PartialEq,Encode,Decode)] +pub struct TopicReplicaParam { + pub partitions: PartitionCount, + pub replication_factor: ReplicationFactor, + pub ignore_rack_assignment: IgnoreRackAssignment +} + + +impl TopicReplicaParam { + pub fn new(partitions: PartitionCount,replication_factor: ReplicationFactor, + ignore_rack_assignment: IgnoreRackAssignment) -> Self { + + Self { + partitions, + replication_factor, + ignore_rack_assignment + } + } +} + +impl From<(PartitionCount,ReplicationFactor,IgnoreRackAssignment)> for TopicReplicaParam { + fn from(value: (PartitionCount,ReplicationFactor,IgnoreRackAssignment)) -> Self { + let (partitions,replication_factor,ignore_rack) = value; + Self::new(partitions,replication_factor,ignore_rack) + } +} + + +impl std::fmt::Display for TopicReplicaParam { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "replica param::(p:{}, r:{})", self.partitions, self.replication_factor) + } +} + + +/// Hack: field instead of new type to get around encode and decode limitations +#[derive(Debug,Default,Clone,PartialEq,Encode,Decode)] +pub struct PartitionMaps { + maps: Vec +} + +impl From> for PartitionMaps { + fn from(maps: Vec) -> Self { + Self { + maps + } + } +} + +impl From)>> for PartitionMaps { + fn from(partition_vec: Vec<(i32, Vec)>) -> Self { + let maps: Vec = partition_vec.into_iter() + .map( |(id,replicas)| PartitionMap { + id, + replicas + }). + collect(); + maps.into() + } +} + + +impl std::fmt::Display for PartitionMaps { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "partiton map:{})", self.maps.len()) + } +} + + +impl PartitionMaps { + + pub fn maps(&self) -> &Vec { + &self.maps + } + + pub fn maps_owned(self) -> Vec { + self.maps + } + + fn partition_count(&self) -> Option { + // compute partitions form replica map + let partitions = self.maps.len() as PartitionCount; + if partitions > 0 { + Some(partitions) + } else { + None + } + } + + fn replication_factor(&self) -> Option { + // compute replication form replica map + if self.maps.len() > 0 { + Some(self.maps[0].replicas.len() as i32) + } else { + None + } + } + + fn partition_map_string(&self) -> Option { + let mut res = String::new(); + for partition in &self.maps { + res.push_str(&format!("{}:{:?}, ", partition.id, partition.replicas)); + } + if res.len() > 0 { + res.truncate(res.len() - 2); + } + Some(res) + } + + // ----------------------------------- + // Partition Map - Operations + // ----------------------------------- + + /// Generate a vector with all spu ids represented by all partitions (no duplicates) + pub fn unique_spus_in_partition_map(&self) -> Vec { + let mut spu_ids: Vec = vec![]; + + for partition in &self.maps { + for spu in &partition.replicas { + if !spu_ids.contains(spu) { + spu_ids.push(spu.clone()); + } + } + } + + spu_ids + } + + + /// Convert partition map into replica map + pub fn partition_map_to_replica_map(&self) -> ReplicaMap { + let mut replica_map: ReplicaMap = BTreeMap::new(); + + for partition in &self.maps { + replica_map.insert(partition.id, partition.replicas.clone()); + } + + replica_map + } + + /// Validate partition map for assigned topics + pub fn valid_partition_map(&self) -> Result<(), Error> { + // there must be at least one partition in the partition map + if self.maps.len() == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "no assigned partitions found", + )); + } + + // assigned partitions must meet the following criteria + // ids: + // - must start with 0 + // - must be in sequence, without gaps + // replicas: + // - must have at least one element + // - all replicas must have the same number of elements. + // - all elements must be unique + // - all elements must be positive integers + let mut id = 0; + let mut replica_len = 0; + for partition in &self.maps { + if id == 0 { + // id must be 0 + if partition.id != id { + return Err(Error::new( + ErrorKind::InvalidInput, + "assigned partitions must start with id 0", + )); + } + + // replica must have elements + replica_len = partition.replicas.len(); + if replica_len == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "assigned replicas must have at least one spu id", + )); + } + } else { + // id must be in sequence + if partition.id != id { + return Err(Error::new( + ErrorKind::InvalidInput, + "assigned partition ids must be in sequence and without gaps", + )); + } + + // replica must have same number of elements as previous one + if partition.replicas.len() != replica_len { + return Err(Error::new( + ErrorKind::InvalidInput, + format!( + "all assigned replicas must have the same number of spu ids: {}", + replica_len + ), + )); + } + } + + // all replica ids must be unique + let mut sorted_replicas = partition.replicas.clone(); + sorted_replicas.sort(); + let unique_count = 1 + sorted_replicas + .windows(2) + .filter(|pair| pair[0] != pair[1]) + .count(); + if partition.replicas.len() != unique_count { + return Err(Error::new( + ErrorKind::InvalidInput, + format!( + "duplicate spu ids found in assigned partition with id: {}", + id + ), + )); + } + + + // all ids must be positive numbers + for spu_id in &partition.replicas { + if *spu_id < 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + format!( + "invalid spu id: {} in assigned partition with id: {}", + spu_id, id + ), + )); + } + } + + + // increment id for next iteration + id += 1; + } + + Ok(()) + } + +} + + + +#[derive(Debug,Clone,PartialEq)] +pub enum TopicSpec { + Assigned(PartitionMaps), + Computed(TopicReplicaParam) +} + + +// ----------------------------------- +// Implementation +// ----------------------------------- +impl Default for TopicSpec { + fn default() -> TopicSpec { + TopicSpec::Assigned(PartitionMaps::default()) + } +} + +impl TopicSpec { + pub fn new_assigned(partition_map: J) -> Self where J: Into { + TopicSpec::Assigned(partition_map.into()) + } + + pub fn new_computed( + partitions: PartitionCount, + replication: ReplicationFactor, + ignore_rack: Option, + ) -> Self { + TopicSpec::Computed((partitions, replication, ignore_rack.unwrap_or(false)).into()) + } + + pub fn is_computed(&self) -> bool { + match self { + TopicSpec::Computed(_) => true, + TopicSpec::Assigned(_) => false, + } + } + + pub fn partitions(&self) -> Option { + match self { + TopicSpec::Computed(param) => Some(param.partitions), + TopicSpec::Assigned(partition_map) => partition_map.partition_count() + } + } + + pub fn replication_factor(&self) -> Option { + match self { + TopicSpec::Computed(param) => Some(param.replication_factor), + TopicSpec::Assigned(partition_map) => partition_map.replication_factor() + } + } + + pub fn ignore_rack_assignment(&self) -> IgnoreRackAssignment { + match self { + TopicSpec::Computed(param) => param.ignore_rack_assignment, + TopicSpec::Assigned(_) => false, + } + } + + // ----------------------------------- + // Labels & Strings + // ----------------------------------- + pub fn type_label(is_computed: &bool) -> &'static str { + match is_computed { + true => "computed", + false => "assigned", + } + } + + pub fn partitions_str(partition_cnt: &Option) -> String { + match partition_cnt { + Some(partitions) => partitions.to_string(), + None => "-".to_string(), + } + } + + pub fn replication_factor_str(replication_cnt: &Option) -> String { + match replication_cnt { + Some(replication) => replication.to_string(), + None => "-".to_string(), + } + } + + pub fn ignore_rack_assign_str(ignore_rack_assign: &bool) -> &'static str { + match ignore_rack_assign { + true => "yes", + false => "-", + } + } + + pub fn partition_map_str(&self) -> Option { + match self { + TopicSpec::Computed(_) => None, + TopicSpec::Assigned(partition_map) => partition_map.partition_map_string() + } + } + + // ----------------------------------- + // Parameter validation + // ----------------------------------- + + /// Validate partitions + pub fn valid_partition(partitions: &PartitionCount) -> Result<(), Error> { + if *partitions < 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "partition is mandatory for computed topics", + )); + } + + if *partitions == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "partition must be greater than 0", + )); + } + + Ok(()) + } + + /// Validate replication factor + pub fn valid_replication_factor(replication: &ReplicationFactor) -> Result<(), Error> { + if *replication < 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "replication factor is mandatory for computed topics", + )); + } + + if *replication == 0 { + return Err(Error::new( + ErrorKind::InvalidInput, + "replication factor must be greater than 0", + )); + } + + Ok(()) + } + + + + +} + + + +impl Decoder for TopicSpec { + fn decode(&mut self, src: &mut T,version: Version) -> Result<(), Error> + where + T: Buf, + { + let mut typ: u8 = 0; + typ.decode(src,version)?; + trace!("decoded type: {}", typ); + + match typ { + // Assigned Replicas + 0 => { + let mut partition_map = PartitionMaps::default(); + partition_map.decode(src,version)?; + *self = Self::Assigned(partition_map); + Ok(()) + } + + // Computed Replicas + 1 => { + let mut param = TopicReplicaParam::default(); + param.decode(src,version)?; + *self =TopicSpec::Computed(param); + Ok(()) + } + + // Unexpected type + _ => Err(Error::new( + ErrorKind::UnexpectedEof, + format!("unknown replica type {}", typ), + )), + } + } +} + + + + +// ----------------------------------- +// Encoder / Decoder +// ----------------------------------- +impl Encoder for TopicSpec { + + // compute size for fluvio replicas + fn write_size(&self, version: Version) -> usize { + let typ_size = (0 as u8).write_size(version); + match self { + TopicSpec::Assigned(partitions) => typ_size + partitions.write_size(version), + TopicSpec::Computed(param) => { + typ_size + param.write_size(version) + } + } + } + + // encode fluvio replicas + fn encode(&self, dest: &mut T,version: Version) -> Result<(), Error> + where + T: BufMut, + { + // ensure buffer is large enough + if dest.remaining_mut() < self.write_size(version) { + return Err(Error::new( + ErrorKind::UnexpectedEof, + format!( + "not enough capacity for replica len of {}", + self.write_size(version) + ), + )); + } + + match self { + // encode assign partitions + TopicSpec::Assigned(partitions) => { + + let typ: u8 = 0; + typ.encode(dest,version)?; + partitions.encode(dest,version)?; + + } + + // encode computed partitions + TopicSpec::Computed(param) => { + + let typ: u8 = 1; + typ.encode(dest,version)?; + param.encode(dest,version)?; + + } + } + + Ok(()) + } + +} + +impl From for K8TopicSpec { + fn from(spec: TopicSpec) -> Self { + + match spec { + TopicSpec::Computed(computed_param) => K8TopicSpec::new( + Some(computed_param.partitions), + Some(computed_param.replication_factor), + Some(computed_param.ignore_rack_assignment), + None + ), + TopicSpec::Assigned(assign_param) => K8TopicSpec::new( + None, + None, + None, + Some(replica_map_to_k8_partition(assign_param)) + ) + } + } +} + + +/// Translate Fluvio Replica Map to K8 Partitions to KV store notification +fn replica_map_to_k8_partition(partition_maps: PartitionMaps) -> Vec { + let mut k8_partitions: Vec = vec![]; + for partition in partition_maps.maps() { + k8_partitions.push(K8Partition::new(partition.id, partition.replicas.clone())); + } + k8_partitions +} + + +impl From<(PartitionCount,ReplicationFactor,IgnoreRackAssignment)> for TopicSpec { + fn from(spec: (PartitionCount,ReplicationFactor,IgnoreRackAssignment)) -> Self { + let (count,factor, rack) = spec; + Self::new_computed(count,factor,Some(rack)) + } +} + +/// convert from tuple with partition and replication with rack off +impl From<(PartitionCount,ReplicationFactor)> for TopicSpec { + fn from(spec: (PartitionCount,ReplicationFactor)) -> Self { + let (count,factor) = spec; + Self::new_computed(count,factor,Some(false)) + } +} + + +#[derive(Decode, Encode, Default, Debug, Clone, PartialEq)] +pub struct PartitionMap { + pub id: PartitionId, + pub replicas: Vec, +} + + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + use std::io::Cursor; + + + + #[test] + fn test_is_computed_topic() { + let p1: PartitionMaps = vec![(1, vec![0]), (2, vec![2])].into(); + let t1 = TopicSpec::new_assigned(p1); + assert_eq!(t1.is_computed(), false); + + let t2 = TopicSpec::new_computed(0, 0, None); + assert_eq!(t2.is_computed(), true); + } + + #[test] + fn test_valid_computed_replica_params() { + // -1 indicates an unassigned partition + let t1_result = TopicSpec::valid_partition(&-1); + assert!(t1_result.is_err()); + assert_eq!( + format!("{}", t1_result.unwrap_err()), + "partition is mandatory for computed topics" + ); + + // 0 is not a valid partition + let t2_result = TopicSpec::valid_partition(&0); + assert!(t2_result.is_err()); + assert_eq!( + format!("{}", t2_result.unwrap_err()), + "partition must be greater than 0" + ); + + let t3_result = TopicSpec::valid_partition(&1); + assert!(t3_result.is_ok()); + + // -1 indicates an unassigned replication factor + let t4_result = TopicSpec::valid_replication_factor(&-1); + assert!(t4_result.is_err()); + assert_eq!( + format!("{}", t4_result.unwrap_err()), + "replication factor is mandatory for computed topics" + ); + + // 0 is not a valid replication factor + let t5_result = TopicSpec::valid_replication_factor(&0); + assert!(t5_result.is_err()); + assert_eq!( + format!("{}", t5_result.unwrap_err()), + "replication factor must be greater than 0" + ); + + // positive numbers are OK + let t6_result = TopicSpec::valid_replication_factor(&1); + assert!(t6_result.is_ok()); + } + + // Replica Map ids: + // - must start with 0 + // - must be in sequence, without gaps + #[test] + fn test_replica_map_ids() { + // id starts from 1 rather than 0 + let p1: PartitionMaps = vec![(1, vec![0]), (2, vec![2])].into(); + let p1_result = p1.valid_partition_map(); + assert!(p1_result.is_err()); + assert_eq!( + format!("{}", p1_result.unwrap_err()), + "assigned partitions must start with id 0" + ); + + // id has a gap + let p2: PartitionMaps = vec![(0, vec![0]), (2, vec![2])].into(); + let p2_result = p2.valid_partition_map(); + assert!(p2_result.is_err()); + assert_eq!( + format!("{}", p2_result.unwrap_err()), + "assigned partition ids must be in sequence and without gaps" + ); + + // ids are out of sequence + let p3: PartitionMaps = vec![(0, vec![0]), (2, vec![2]), (1, vec![1])].into(); + let p3_result = p3.valid_partition_map(); + assert!(p3_result.is_err()); + assert_eq!( + format!("{}", p3_result.unwrap_err()), + "assigned partition ids must be in sequence and without gaps" + ); + + // duplicate ids + let p4: PartitionMaps = vec![(0, vec![0]), (1, vec![1]), (1, vec![1])].into(); + let p4_result = p4.valid_partition_map(); + assert!(p4_result.is_err()); + assert_eq!( + format!("{}", p4_result.unwrap_err()), + "assigned partition ids must be in sequence and without gaps" + ); + + // ids are ok + let p5: PartitionMaps = vec![(0, vec![1]), (1, vec![1]), (2, vec![2])].into(); + let p5_result = p5.valid_partition_map(); + assert!(p5_result.is_ok()); + } + + // Replica Map replicas: + // - replicas must have at least one element + // - all replicas must have the same number of elements + // - all elements must be unique + // - all elements must be positive integers + #[test] + fn test_replica_map_spu_ids() { + // replicas must have at least one element + let p1: PartitionMaps = vec![(0, vec![]), (1, vec![1])].into(); + let p1_result = p1.valid_partition_map(); + assert!(p1_result.is_err()); + assert_eq!( + format!("{}", p1_result.unwrap_err()), + "assigned replicas must have at least one spu id" + ); + + // all replicas must have the same number of elements + let p2: PartitionMaps = vec![(0, vec![1, 2]), (1, vec![1])].into(); + let p2_result = p2.valid_partition_map(); + assert!(p2_result.is_err()); + assert_eq!( + format!("{}", p2_result.unwrap_err()), + "all assigned replicas must have the same number of spu ids: 2" + ); + + // all elements must be unique + let p3: PartitionMaps = vec![(0, vec![1, 2]), (1, vec![1, 1])].into(); + let p3_result = p3.valid_partition_map(); + assert!(p3_result.is_err()); + assert_eq!( + format!("{}", p3_result.unwrap_err()), + "duplicate spu ids found in assigned partition with id: 1" + ); + + // all elements must be unique + let p4: PartitionMaps = vec![(0, vec![3, 1, 2, 3])].into(); + let p4_result = p4.valid_partition_map(); + assert!(p4_result.is_err()); + assert_eq!( + format!("{}", p4_result.unwrap_err()), + "duplicate spu ids found in assigned partition with id: 0" + ); + + // all elements must be positive integers + let p5: PartitionMaps = vec![(0, vec![1, 2]), (1, vec![1, -2])].into(); + let p5_result = p5.valid_partition_map(); + assert!(p5_result.is_err()); + assert_eq!( + format!("{}", p5_result.unwrap_err()), + "invalid spu id: -2 in assigned partition with id: 1" + ); + } + + // Partitions repeatedly reference spu-ids. The purpose of + // this API is to return a list of all unique SPUs + #[test] + fn test_unique_spus_in_partition_map() { + // id starts from 1 rather than 0 + let p1: PartitionMaps = vec![ + (0, vec![0, 1, 3]), + (1, vec![0, 2, 3]), + (2, vec![1, 3, 4]), + ].into(); + + let p1_result = p1.unique_spus_in_partition_map(); + let expected_p1_result: Vec = vec![0, 1, 3, 2, 4]; + assert_eq!(p1_result, expected_p1_result); + } + + // print in hex format: + // - println!("{:02x?}", dest); + + #[test] + fn test_encode_decode_assigned_topic_spec() { + let partition_map: PartitionMaps = vec![PartitionMap { + id: 0, + replicas: vec![5001, 5002], + }].into(); + let topic_spec = TopicSpec::Assigned(partition_map.clone()); + let mut dest = vec![]; + + // test encode + let result = topic_spec.encode(&mut dest,0); + assert!(result.is_ok()); + + let expected_dest = [ + 0x00, // type + 0x00, 0x00, 0x00, 0x01, // partition cnt + 0x00, 0x00, 0x00, 0x00, // partition id + 0x00, 0x00, 0x00, 0x02, // replica cnt + 0x00, 0x00, 0x13, 0x89, // spu id: 5001 + 0x00, 0x00, 0x13, 0x8a, // spu id: 5002 + ]; + assert_eq!(dest, expected_dest); + + // test encode + let mut topic_spec_decoded = TopicSpec::default(); + let result = topic_spec_decoded.decode(&mut Cursor::new(&expected_dest),0); + assert!(result.is_ok()); + + match topic_spec_decoded { + TopicSpec::Assigned(partition_map) => { + assert_eq!( + partition_map, + vec![PartitionMap { + id: 0, + replicas: vec![5001, 5002], + }].into() + ); + } + _ => assert!( + false, + "expect assigned topic spec, found {:?}", + topic_spec_decoded + ), + } + } + + #[test] + fn test_encode_decode_computed_topic_spec() { + let topic_spec = TopicSpec::Computed((2, 3, true).into()); + let mut dest = vec![]; + + // test encode + let result = topic_spec.encode(&mut dest,0); + assert!(result.is_ok()); + + let expected_dest = [ + 0x01, // type + 0x00, 0x00, 0x00, 0x02, // partition cnt + 0x00, 0x00, 0x00, 0x03, // replica cnt + 0x01, // ignore_rack_assignment + ]; + assert_eq!(dest, expected_dest); + + // test encode + let mut topic_spec_decoded = TopicSpec::default(); + let result = topic_spec_decoded.decode(&mut Cursor::new(&expected_dest),0); + assert!(result.is_ok()); + + match topic_spec_decoded { + TopicSpec::Computed(param) => { + assert_eq!(param.partitions, 2); + assert_eq!(param.replication_factor, 3); + assert_eq!(param.ignore_rack_assignment, true); + } + _ => assert!( + false, + "expect computed topic spec, found {:?}", + topic_spec_decoded + ), + } + } + + #[test] + fn test_partition_map_str() { + // Test multiple + let p1: PartitionMaps = vec![ + (0, vec![0, 1, 3]), + (1, vec![0, 2, 3]), + (2, vec![1, 3, 4]), + ].into(); + let spec = TopicSpec::new_assigned(p1); + assert_eq!( + spec.partition_map_str(), + Some("0:[0, 1, 3], 1:[0, 2, 3], 2:[1, 3, 4]".to_string()) + ); + + // Test empty + let p2 = PartitionMaps::default(); + let spec2 = TopicSpec::new_assigned(p2); + assert_eq!(spec2.partition_map_str(), Some("".to_string())); + } +} \ No newline at end of file diff --git a/metadata/src/topic/status.rs b/metadata/src/topic/status.rs new file mode 100644 index 0000000000..8894a07824 --- /dev/null +++ b/metadata/src/topic/status.rs @@ -0,0 +1,281 @@ +//! +//! # Topic Status +//! +//! Topic Status metadata information cached locally. +//! +use std::collections::BTreeMap; +use std::fmt; + +use kf_protocol::derive::{Decode, Encode}; + +use k8_metadata::topic::TopicStatus as K8TopicStatus; +use k8_metadata::topic::TopicStatusResolution as K8TopicStatusResolution; + +use types::{ReplicaMap, SpuId}; + + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub struct TopicStatus { + pub resolution: TopicResolution, + pub replica_map: BTreeMap>, + pub reason: String, +} + + +impl fmt::Display for TopicStatus { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{:#?}",self.resolution) + } +} + + +#[derive(Decode, Encode, Debug, Clone, PartialEq)] +pub enum TopicResolution { + Init, // initializing this is starting state. + Pending, // Has valid config, ready for replica mapping assignment + InsufficientResources, // replica map cannot be created due to lack of capacity + InvalidConfig, // invalid configuration + Provisioned, // topics are allocated +} + +impl TopicResolution { + + pub fn resolution_label(&self) -> &'static str { + match self { + TopicResolution::Provisioned => "provisioned", + TopicResolution::Init => "initializing", + TopicResolution::Pending => "pending", + TopicResolution::InsufficientResources => "insufficient-resources", + TopicResolution::InvalidConfig => "invalid-config", + } + } + + pub fn is_invalid(&self) -> bool { + match self { + Self::InvalidConfig => true, + _ => false + } + } + + pub fn no_resource(&self) -> bool { + match self { + Self::InsufficientResources => true, + _ => false + } + } +} + +impl std::fmt::Display for TopicResolution { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "resolution::{}", self.resolution_label()) + } +} + + +// ----------------------------------- +// Encode - from KV Topic Status +// ----------------------------------- + +impl From for TopicStatus { + fn from(kv_status: K8TopicStatus) -> Self { + let resolution = match kv_status.resolution { + K8TopicStatusResolution::Provisioned => TopicResolution::Provisioned, + K8TopicStatusResolution::Init => TopicResolution::Init, + K8TopicStatusResolution::Pending => TopicResolution::Pending, + K8TopicStatusResolution::InsufficientResources => TopicResolution::InsufficientResources, + K8TopicStatusResolution::InvalidConfig => TopicResolution::InvalidConfig, + }; + + TopicStatus { + resolution, + replica_map: kv_status.replica_map.clone(), + reason: kv_status.reason.clone(), + } + } +} + +impl From for K8TopicStatus { + fn from(status: TopicStatus) -> K8TopicStatus { + let resolution = match status.resolution { + TopicResolution::Provisioned => K8TopicStatusResolution::Provisioned, + TopicResolution::Init => K8TopicStatusResolution::Init, + TopicResolution::Pending => K8TopicStatusResolution::Pending, + TopicResolution::InsufficientResources => K8TopicStatusResolution::InsufficientResources, + TopicResolution::InvalidConfig => K8TopicStatusResolution::InvalidConfig, + }; + + K8TopicStatus { + resolution: resolution, + replica_map: status.replica_map.clone(), + reason: status.reason.clone(), + } + } +} + + +/* +// ----------------------------------- +// Encode/Decode - Internal API +// ----------------------------------- + +impl From<&TopicResolution> for u8 { + fn from(spec: &TopicResolution) -> Self { + match spec { + TopicResolution::Init => 0 as u8, + TopicResolution::Pending => 1 as u8, + TopicResolution::NoResourceForReplicaMap => 2 as u8, + TopicResolution::InvalidConfig => 3 as u8, + TopicResolution::Provisioned => 4 as u8, + } + } +} +*/ + + +// ----------------------------------- +// Default +// ----------------------------------- + +impl ::std::default::Default for TopicStatus { + fn default() -> Self { + TopicStatus { + resolution: TopicResolution::Init, + replica_map: BTreeMap::new(), + reason: "".to_owned(), + } + } +} + +impl ::std::default::Default for TopicResolution { + fn default() -> Self { + TopicResolution::Init + } +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + + +fn create_replica_map(rows: Vec>) -> BTreeMap> { + let mut map = BTreeMap::new(); + for (idx, row) in rows.iter().enumerate() { + map.insert(idx as i32, row.clone()); + } + map +} + + +impl TopicStatus { + + pub fn new( + resolution: TopicResolution, + replica_map: Vec>, + reason: S, + ) -> Self where S: Into { + TopicStatus { + resolution: resolution, + replica_map: create_replica_map(replica_map), + reason: reason.into() + } + } + + pub fn resolution(&self) -> &TopicResolution { + &self.resolution + } + + pub fn replica_map_cnt(&self) -> i32 { + self.replica_map.len() as i32 + } + + pub fn set_replica_map(&mut self, replica_map: ReplicaMap) { + self.replica_map = replica_map; + } + + pub fn spus_in_replica(&self) -> Vec { + let mut spu_list: Vec = vec![]; + + for (_, replicas) in self.replica_map.iter() { + for spu in replicas { + if !spu_list.contains(spu) { + spu_list.push(*spu); + } + } + } + + spu_list + } + + + + pub fn replica_map_str(&self) -> String { + format!("{:?}", self.replica_map) + } + + pub fn replica_map_cnt_str(&self) -> String { + let map_rows = self.replica_map_cnt(); + if map_rows > 0 { + format!("{}", map_rows) + } else { + "-".to_owned() + } + } + + pub fn reason_str(&self) -> &String { + &self.reason + } + + // ----------------------------------- + // State Machine + // ----------------------------------- + + pub fn is_resolution_initializing(&self) -> bool { + self.resolution == TopicResolution::Init + } + + /// need to update the replic map + pub fn need_replica_map_recal(&self) -> bool { + self.resolution == TopicResolution::Pending || + self.resolution == TopicResolution::InsufficientResources + } + + pub fn is_resolution_pending(&self) -> bool { + self.resolution == TopicResolution::Pending + } + + pub fn is_resolution_transient(&self) -> bool { + self.resolution == TopicResolution::Init || + self.resolution == TopicResolution::Pending + } + + pub fn is_resolution_provisioned(&self) -> bool { + self.resolution == TopicResolution::Provisioned + } + + pub fn next_resolution_provisoned() -> (TopicResolution,String){ + (TopicResolution::Provisioned,"".to_owned()) + } + + /// set to pending mode which means it is waiting for spu resources to be allocated + pub fn next_resolution_pending() -> (TopicResolution,String) { + (TopicResolution::Pending,super::PENDING_REASON.to_owned()) + } + + pub fn next_resolution_invalid_config(reason: S) -> (TopicResolution,String) where S: Into { + (TopicResolution::InvalidConfig,reason.into()) + } + + pub fn set_resolution_no_resource(reason: S) -> (TopicResolution,String) where S: Into { + (TopicResolution::InsufficientResources, reason.into()) + } + + pub fn set_next_resolution(&mut self,next: (TopicResolution,String)) { + let (resolution,reason) = next; + self.resolution = resolution; + self.reason = reason; + } +} diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000000..9ea1223ad3 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +nightly-2019-08-27 \ No newline at end of file diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000000..ad07abc835 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1,2 @@ +reorder_modules = false +reorder_imports = false \ No newline at end of file diff --git a/sc-server/Cargo.toml b/sc-server/Cargo.toml new file mode 100644 index 0000000000..95a9f2cf64 --- /dev/null +++ b/sc-server/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "sc-server" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +default-run = "sc-server" + + +[[bin]] +name = "sc-server" +path = "src/main.rs" +doc = false + + +[dependencies] +rand = "0.5" +log = "0.4.6" +toml = "0.5.0" +serde = { version ="1.0.82", features = ['derive'] } +futures-preview = { version = "0.3.0-alpha.17" } +pin-utils = "0.1.0-alpha.3" +chashmap = "2.2.0" +structopt = "0.2.14" +base64 = "0.10.1" +regex = "1.1.6" +types = { path = "../types"} +utils = { path = "../utils"} +future-helper = { path = "../future-helper" } +k8-client = { path = "../k8-client"} +kf-protocol = { path = "../kf-protocol"} +kf-socket = {path = "../kf-socket"} +kf-service = { path = "../kf-service"} +internal-api = { path = "../api/internal-api"} +k8-metadata = { path = "../k8-metadata"} +k8-config = { path = "../k8-config"} +metadata = { path = "../metadata"} +sc-api = { path = "../api/sc-api"} + +[dev-dependencies] +future-helper = { path = "../future-helper", features=["fixture"]} +future-aio = { path = "../future-aio"} +k8-fixtures = { path = "../k8-client/k8-fixtures" } +utils = { path = "../utils", features=["fixture"]} diff --git a/sc-server/README.md b/sc-server/README.md new file mode 100644 index 0000000000..31285cde11 --- /dev/null +++ b/sc-server/README.md @@ -0,0 +1,14 @@ +## Run SC in development mode +``` +./target/debug/sc-server +``` + +## Run SC with various debug flags + +To use different debug flags use: +``` +./dev-tools/log/debug-ctrl-all +./dev-tools/log/debug-ctrl-min +./dev-tools/log/debug-ctrl-client +./dev-tools/log/debug-ctrl-controller +``` diff --git a/sc-server/rust-toolchain b/sc-server/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/sc-server/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/sc-server/src/cli/cli.rs b/sc-server/src/cli/cli.rs new file mode 100644 index 0000000000..5b58fa38ca --- /dev/null +++ b/sc-server/src/cli/cli.rs @@ -0,0 +1,195 @@ +//! +//! # CLI for Streaming Controller +//! +//! Command line interface to provision SC id and bind-to server/port. +//! Parameters are overwritten in the following sequence: +//! 1) default values +//! 2) custom configuration if provided, or default configuration (if not) +//! 3) cli parameters +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::process; + +use log::info; +use structopt::StructOpt; + +use types::print_cli_err; +use types::socket_helpers::string_to_socket_addr; +use k8_config::K8Config; + +use crate::ScServerError; +use super::ScConfig; + +/// cli options +#[derive(Debug, StructOpt)] +#[structopt(name = "sc-server", author = "", about = "Streaming Controller")] +pub struct ScOpt { + #[structopt(short = "i", long = "id", value_name = "integer")] + /// Unique identifier of the server + pub id: Option, + + #[structopt(short = "b", long = "bind-public", value_name = "host:port")] + /// Address for external communication + pub bind_public: Option, + + #[structopt(short = "f", long = "conf", value_name = "file")] + /// Configuration file + pub config_file: Option, + + #[structopt(short = "n", long = "namespace", value_name = "namespace")] + pub namespace: Option +} + +/// validate streaming controller cli inputs and generate ScConfig +pub fn get_sc_config() -> Result<(ScConfig,K8Config), ScServerError> { + sc_opt_to_sc_config(ScOpt::from_args()) +} + + +/// convert cli options to sc_config +fn sc_opt_to_sc_config(opt: ScOpt) -> Result<(ScConfig,K8Config), ScServerError> { + + let mut sc_config = ScConfig::new(opt.config_file)?; + + let k8_config = K8Config::load().expect("no k8 config founded"); + + sc_config.namespace = k8_config.namespace().to_owned(); + info!("using {} as namespace from kubernetes config",sc_config.namespace); + + + // override id if set + if let Some(id) = opt.id { + if id < 0 { + return Err(IoError::new( + ErrorKind::InvalidInput, + "Id must greater of equal to 0", + ).into()); + } + sc_config.id = id; + } + + // override public if set + if let Some(bind_public) = opt.bind_public { + + let addr = string_to_socket_addr(&bind_public).map_err( + |_| IoError::new( + ErrorKind::InvalidInput, + format!("Expected , found '{}'", bind_public), + ))?; + + sc_config.public_endpoint = addr.into(); + } + + + // override namespace if set + if let Some(namespace) = opt.namespace { + sc_config.namespace = namespace; + } + + info!("sc config: {:#?}",sc_config); + + Ok((sc_config,k8_config)) +} + +/// return SC configuration or exist program. +pub fn parse_cli_or_exit() -> (ScConfig,K8Config) { + match get_sc_config() { + Err(err) => { + print_cli_err!(err); + process::exit(0x0100); + } + Ok(config) => config, + } +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- + +#[cfg(test)] +pub mod test { + + use std::net::{IpAddr, Ipv4Addr}; + use std::net::SocketAddr; + + use types::socket_helpers::EndPoint; + + use super::ScOpt; + use super::sc_opt_to_sc_config; + use super::ScConfig; + + #[test] + fn test_get_sc_config_no_params() { + let sc_opt = ScOpt { + id: None, + bind_public: None, + config_file: None, + namespace: Some("test".to_owned()) + }; + + // test read & parse + let result = sc_opt_to_sc_config(sc_opt); + assert!(result.is_ok()); + + // compare with expected result + let expected = ScConfig { + id: 1, + public_endpoint: EndPoint::all_end_point(9003), + namespace: "test".to_owned(), + ..Default::default() + }; + + assert_eq!(result.unwrap().0, expected); + } + + #[test] + fn test_get_sc_config_from_config_file() { + let sc_opt = ScOpt { + id: None, + bind_public: None, + config_file: Some("./test-data/config/sc_server.toml".to_owned()), + namespace: Some("test".to_owned()) + }; + + // test read & parse + let result = sc_opt_to_sc_config(sc_opt); + assert!(result.is_ok()); + + // compare with expected result + let expected = ScConfig { + id: 500, + public_endpoint: EndPoint::local_end_point(9999), + namespace: "test".to_owned(), + ..Default::default() + }; + + assert_eq!(result.unwrap().0, expected); + } + + #[test] + fn test_get_sc_config_overwite_config_file() { + let sc_opt = ScOpt { + id: Some(100), + bind_public: Some("1.1.1.1:8888".to_owned()), + config_file: Some("./test-data/config/sc_server.toml".to_owned()), + namespace: Some("test".to_owned()) + }; + + // test read & parse + let result = sc_opt_to_sc_config(sc_opt); + assert!(result.is_ok()); + + // compare with expected result + let expected = ScConfig { + id: 100, + public_endpoint: (SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1)), 8888)).into(), + namespace: "test".to_owned(), + ..Default::default() + }; + + assert_eq!(result.unwrap().0, expected); + } + +} +// println!("{:#?}", result); diff --git a/sc-server/src/cli/mod.rs b/sc-server/src/cli/mod.rs new file mode 100644 index 0000000000..97f0254ca5 --- /dev/null +++ b/sc-server/src/cli/mod.rs @@ -0,0 +1,9 @@ +mod cli; +mod sc_config; +mod sc_config_file; + +pub use self::cli::parse_cli_or_exit; + +pub use self::sc_config::ScConfig; +pub use self::sc_config::ScConfigBuilder; +pub use self::sc_config_file::ScConfigFile; diff --git a/sc-server/src/cli/sc_config.rs b/sc-server/src/cli/sc_config.rs new file mode 100644 index 0000000000..c74e1e90d0 --- /dev/null +++ b/sc-server/src/cli/sc_config.rs @@ -0,0 +1,91 @@ +//! +//! # Streaming Controller Configurations +//! +//! Stores configuration parameter used by Streaming Controller module. +//! +use std::io::Error as IoError; +use std::path::Path; + +use types::defaults::SC_CONFIG_FILE; +use types::defaults::{SC_DEFAULT_ID, SC_PUBLIC_PORT}; +use types::defaults::SC_PRIVATE_PORT; +use utils::config_helper::build_server_config_file_path; +use types::socket_helpers::EndPoint; + + +use super::ScConfigFile; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +/// streaming controller configuration file +#[derive(Debug, Clone, PartialEq)] +pub struct ScConfig { + pub id: i32, + pub public_endpoint: EndPoint, + pub private_endpoint: EndPoint, + pub run_k8_dispatchers: bool, + pub namespace: String +} + + +// ----------------------------------- +// Traits +// ----------------------------------- + +pub trait ScConfigBuilder { + fn to_sc_config(&self) -> Result; +} + +// ----------------------------------- +// Implementation +// ----------------------------------- + +/// initialize with default parameters +impl ::std::default::Default for ScConfig { + fn default() -> Self { + + ScConfig { + id: SC_DEFAULT_ID, + public_endpoint: EndPoint::all_end_point(SC_PUBLIC_PORT), + private_endpoint: EndPoint::all_end_point(SC_PRIVATE_PORT), + run_k8_dispatchers: true, + namespace: "default".to_owned() + + } + } +} + +impl ScConfig { + /// generate sc configuration based on configuration file provided + pub fn new(config_file: Option) -> Result { + + let config = if let Some(file) = config_file { + ScConfig::read_sc_config_from_custom_file(file)? + } else { + ScConfig::read_sc_config_from_default_file()? + }; + Ok(config) + } + + /// look-up custom configuration and overwrite defautl configuration parameters + fn read_sc_config_from_custom_file(config_file: String) -> Result { + let sc_config_file = ScConfigFile::from_file(config_file)?; + sc_config_file.to_sc_config() + } + + /// look-up default configuration, if it doesn't exist, return defautls + fn read_sc_config_from_default_file() -> Result { + let sc_file_path = build_server_config_file_path(SC_CONFIG_FILE); + + if Path::new(&sc_file_path).exists() { + // if file exists, it must readable and correct + let sc_config_file = ScConfigFile::from_file(sc_file_path)?; + sc_config_file.to_sc_config() + } else { + // no config file, return default parameters + Ok(ScConfig::default()) + } + } +} diff --git a/sc-server/src/cli/sc_config_file.rs b/sc-server/src/cli/sc_config_file.rs new file mode 100644 index 0000000000..dd4e1c84cf --- /dev/null +++ b/sc-server/src/cli/sc_config_file.rs @@ -0,0 +1,141 @@ +//! +//! # Config file for Streaming Controller +//! +//! Given a configuration file, load and return sc parameters +//! + +use serde::Deserialize; +use std::fs::read_to_string; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; +use std::path::Path; + +use super::{ScConfig, ScConfigBuilder}; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug, PartialEq, Deserialize)] +pub struct ScConfigFile { + pub version: String, + sc: Option, + bind_public: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ScGroup { + pub id: i32, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct BindGroup { + pub host: String, + pub port: u16, +} + +// --------------------------------------- +// Implementation +// --------------------------------------- + +impl ScConfigBuilder for ScConfigFile { + fn to_sc_config(&self) -> Result { + let mut sc_config = ScConfig::default(); + + // update id (if configured) + if let Some(ref sc) = &self.sc { + sc_config.id = sc.id + } + + // update bind_addr (if configured) + if let Some(ref bind_public) = &self.bind_public { + let host_port_str = format!("{}:{}", bind_public.host, bind_public.port); + + // parse address and error if failed + let bind_addr = host_port_str + .parse::() + .map_err(|err| IoError::new(ErrorKind::InvalidInput, format!("{}", err)))?; + sc_config.public_endpoint = bind_addr.into(); + } + + Ok(sc_config) + } +} + +impl ScConfigFile { + // read and parse the .toml file + pub fn from_file>(path: T) -> Result { + let file_str = read_to_string(path)?; + toml::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err))) + } +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + use std::path::PathBuf; + use types::defaults::{CONFIG_FILE_EXTENTION, SC_CONFIG_FILE}; + + #[test] + fn test_default_sc_config_ok() { + let mut sc_config_path = PathBuf::new(); + sc_config_path.push("./test-data/config"); + sc_config_path.push(SC_CONFIG_FILE); + sc_config_path.set_extension(CONFIG_FILE_EXTENTION); + + // test file generator + assert_eq!( + sc_config_path.clone().to_str().unwrap(), + "./test-data/config/sc_server.toml" + ); + + // test read & parse + let result = ScConfigFile::from_file(sc_config_path); + assert!(result.is_ok()); + + // compare with expected result + let expected = ScConfigFile { + version: "1.0".to_owned(), + sc: Some(ScGroup { id: 500 }), + bind_public: Some(BindGroup { + host: "127.0.0.1".to_owned(), + port: 9999, + }), + }; + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_default_sc_config_not_found() { + let mut sc_config_path = PathBuf::new(); + sc_config_path.push("./test-data/config/unknown.toml"); + let result = ScConfigFile::from_file(sc_config_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "No such file or directory (os error 2)" + ); + } + + #[test] + fn test_invalid_sc_config_file() { + let mut sc_config_path = PathBuf::new(); + sc_config_path.push("./test-data/config/sc_invalid.toml"); + let result = ScConfigFile::from_file(sc_config_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "missing field `version`" + ); + } +} diff --git a/sc-server/src/conn_manager/actions.rs b/sc-server/src/conn_manager/actions.rs new file mode 100644 index 0000000000..ceb815d3b8 --- /dev/null +++ b/sc-server/src/conn_manager/actions.rs @@ -0,0 +1,65 @@ +//! +//! # Connection actions +//! +//! Actions are received through check dispatcher channel and are forwarded to +//! Connection manager for processing. +//! +use types::SpuId; +use metadata::spu::SpuSpec; +use metadata::partition::PartitionSpec; +use metadata::partition::ReplicaKey; + +/// Change in connection status +#[derive(Debug,PartialEq,Clone)] +pub enum SpuConnectionStatusChange{ + Off(SpuId), + On(SpuId) +} + +impl SpuConnectionStatusChange { + pub fn spu_id(&self) -> SpuId { + match self { + SpuConnectionStatusChange::Off(id) => *id, + SpuConnectionStatusChange::On(id) => *id + } + } +} + + +impl std::fmt::Display for SpuConnectionStatusChange { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + SpuConnectionStatusChange::Off(id) => + write!(f, "SPU {} Off",id), + SpuConnectionStatusChange::On(id) => + write!(f, "SPU {} On",id) + } + + } +} + +#[derive(Debug, PartialEq)] +pub enum SpuSpecChange { + Add(SpuSpec), // New Spec + Mod(SpuSpec,SpuSpec), // Update SPU spec (new,old) + Remove(SpuSpec), +} + +#[derive(Debug, PartialEq)] +pub enum PartitionSpecChange { + Add(ReplicaKey,PartitionSpec), + Mod(ReplicaKey,PartitionSpec,PartitionSpec), + Remove(ReplicaKey,PartitionSpec) +} + +/// Request to made to Connection Manager +#[derive(Debug, PartialEq)] +pub enum ConnectionRequest { + Spu(SpuSpecChange), + Partition(PartitionSpecChange), + RefreshSpu(SpuId), // Refresh SPU with it' metadata including SPU and Replica +} + + + + diff --git a/sc-server/src/conn_manager/manager.rs b/sc-server/src/conn_manager/manager.rs new file mode 100644 index 0000000000..3701f8717c --- /dev/null +++ b/sc-server/src/conn_manager/manager.rs @@ -0,0 +1,474 @@ +//! +//! # Connection Manager (ConnManager) +//! +//! ConnManager keeps communication sockets between Streaming Coordinator (SC) and Streaming Processing +//! Units (SPUs) persistent. The manager keeps a map of SPU names with their associated socket handles. +//! +//! # ConnManager Actions +//! +//! SC notifies the ConnManager when a new SPU is joins or leaves the system: +//! * ConnAction::AddSpu(SpuId, ServerAddress) - SPU joins the system +//! * ConnAction::UpdateSpu(SpuId, ServerAddress) - SPU parameters are changed +//! * ConnAction::RemoveSpu(SpuId) - SPU leaves the system +//! +//! Connections will be lazzy handled. They are looked-up when a connection is requested. When SPU +//! parameters chnage, the connection is marked as stale and a new connection is generated. + +use std::sync::Arc; + + +use log::debug; +use log::trace; +use log::error; +use log::warn; +use chashmap::WriteGuard; + +use metadata::spu::SpuSpec; +use metadata::partition::PartitionSpec; +use metadata::partition::ReplicaKey; +use kf_socket::SinkPool; +use kf_socket::KfSink; +use types::SpuId; +use types::log_on_err; +use utils::actions::Actions; +use utils::counters::CounterTable; +use utils::SimpleConcurrentBTreeMap; +use internal_api::messages::SpuMsg; +use internal_api::messages::Replica; +use internal_api::messages::ReplicaMsg; +use internal_api::messages::ReplicaMsgs; +use internal_api::UpdateSpuRequest; +use internal_api::UpdateReplicaRequest; +use internal_api::UpdateAllRequest; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; + +use crate::core::spus::SharedSpuLocalStore; +use crate::core::spus::SpuLocalStore; +use crate::core::spus::SpuKV; +use crate::core::partitions::SharedPartitionStore; +use crate::core::partitions::PartitionLocalStore; +use crate::core::ShareLocalStores; +use crate::ScServerError; + +use super::ConnectionRequest; +use super::SpuSpecChange; +use super::PartitionSpecChange; + +// --------------------------------------- +// Counters +// --------------------------------------- + +#[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone)] +enum ConnCntr { + ReqOk = 0, + ReqFailed = 1, + TryConnectOk = 2, + TryConnectFailed = 3, + SendMsgOk = 4, + SendMsgFailed = 5, + InternalErr = 6, +} + +const CONN_COUNTERS: [(ConnCntr, &'static str, bool); 7] = [ + (ConnCntr::ReqOk, "CONN-REQ-OK", false), + (ConnCntr::ReqFailed, "CONN-REQ-FAIL", false), + (ConnCntr::TryConnectOk, "TRY-CONN-OK", false), + (ConnCntr::TryConnectFailed, "TRY-CONN-FAIL", false), + (ConnCntr::SendMsgOk, "SEND-MSG-OK", false), + (ConnCntr::SendMsgFailed, "SEND-MSG-FAIL", false), + (ConnCntr::InternalErr, "INTERNAL-ERR", true), +]; + + +/// Discovered Connection Parameter such as source IP address and port +#[derive(Debug, Clone)] +pub struct ConnParams { +} + +impl ConnParams { + pub fn new() -> Self { + Self { } + } +} + + +pub type SharedConnManager = Arc; + +/// Connection Manager handles actual connection to SPU +/// It is responsible for keeping track of connection stat, param and sinks.Actions +/// When it detects changes in connection (status), it publish them to senders +/// Unlikely controller, it doesn't have own independent task lifecyle (maybe should?) +#[derive(Debug)] +pub struct ConnManager { + spu_store: SharedSpuLocalStore, + partition_store: SharedPartitionStore, + conn_params: SimpleConcurrentBTreeMap, + sinks: SinkPool, + counter_tbl: CounterTable, +} + +impl Default for ConnManager { + fn default() -> Self { + Self::new(SpuLocalStore::new_shared(),PartitionLocalStore::new_shared()) + } +} + + +impl ConnManager { + + + + pub fn new_with_local_stores(local_stores: ShareLocalStores) -> Self { + Self::new(local_stores.spus().clone(),local_stores.partitions().clone()) + } + + /// internal connection manager constructor + pub fn new(spu_store: SharedSpuLocalStore,partition_store: SharedPartitionStore) -> Self { + ConnManager { + spu_store, + partition_store, + conn_params: SimpleConcurrentBTreeMap::new(), + counter_tbl: CounterTable::default().with_columns(CONN_COUNTERS.to_vec()), + sinks: SinkPool::new(), + } + } + + + + /// add connection parameters & counters for this SPU + fn add_conn_param(&self, spu_id: SpuId,conn: ConnParams) { + // add parameters + self.conn_params + .insert(spu_id,conn ); + + self.counter_tbl.add_row(spu_id); + } + + + /// remove connection parameters & counters for this SPU + fn remove_conn_param(&self, spu_id: &SpuId) { + self.conn_params.write().remove(spu_id); + + self.counter_tbl.remove_row(spu_id); + } + + + + /// get connection parameters + #[allow(dead_code)] + fn conn_param(&self, spu_id: SpuId) -> Option { + if let Some(params) = self.conn_params.read().get(&spu_id) { + Some(params.clone()) + } else { + None + } + } + + + /// SPU is valid if we have registered SPU in the store and if spu is offline + pub fn validate_spu(&self, spu_id: &SpuId) -> bool { + + self.spu_store.validate_spu_for_registered(spu_id) + } + + /// Register new sink + /// true if successfully register + pub async fn register_sink(&self, spu_id: SpuId, sink: KfSink, param: ConnParams) { + + self.sinks.insert_sink(spu_id.clone(), sink); + self.add_conn_param(spu_id.clone(),param); + } + + /// Unregist sink. This happens when connection when down + pub async fn clear_sink(&self,spu_id: &SpuId) { + self.sinks.clear_sink(spu_id); + debug!("removing socket sink for spu: {}",spu_id); + } + + /// return current sinks + pub fn sinks(&self) -> &SinkPool { + &self.sinks + } + + + + /// Process connection requests + /// Requests are usually send as result of action by other controller + pub async fn process_requests(&self, requests: Actions) { + + trace!("processing connection request: {:?}",requests); + + for request in requests.into_iter() { + + match request { + + ConnectionRequest::Spu(spec_changes) => { + + match spec_changes { + SpuSpecChange::Add(new_spu) => { + self.add_spu(new_spu).await; + }, + SpuSpecChange::Mod(new_spu,old_spu) => { + self.update_spu(new_spu,old_spu).await; + } + SpuSpecChange::Remove(spu) => { + self.remove_spu(spu).await; + }, + } + }, + ConnectionRequest::RefreshSpu(spu_id) => { + log_on_err!(self.refresh_spu(spu_id).await); + }, + ConnectionRequest::Partition(partition_req) => { + match partition_req { + PartitionSpecChange::Add(key,spec) => { + self.refresh_partition(key,spec).await; + }, + PartitionSpecChange::Mod(key,new_spec,_) => { + // for now, only send new + self.refresh_partition(key,new_spec).await; + + } + _ => {} + } + } + } + } + + } + + /// synchronize spu spec with our connection + /// if there exists spu connection, we need to drop it. + fn inner_add_spu(&self,spu: &SpuSpec) { + + debug!("adding spu: {}", spu.id); + + // there should not be existing entry, if so something is wrong + if let Some(conn) = self.sinks.get_sink(&spu.id) { + drop(conn); + self.sinks.clear_sink(&spu.id); + warn!("unexpected socket entry found for Spu({}). clearing ", spu.id); + self.counter_tbl.inc_counter(&spu.id, ConnCntr::InternalErr); + } + + } + + + /// add spu, + async fn add_spu(&self,spu: SpuSpec) { + + self.inner_add_spu(&spu); + + // send new SPU spec to all SPUS + let spu_msg = SpuMsg::update(spu.into()); + self.send_msg_to_all_live_spus(vec![spu_msg]).await; + + } + + + /// update spu connection, we do similar thing as add. + async fn update_spu(&self, new_spu: SpuSpec, old_spu: SpuSpec) { + debug!("updating new spu: {}, old spu: {}",new_spu.id,old_spu.id); + + self.inner_remove_spu(&old_spu); + self.inner_add_spu(&new_spu); + + let spu_msg = SpuMsg::delete(old_spu.into()); + self.send_msg_to_all_live_spus(vec![spu_msg]).await; + } + + /// remove spu connection parameters & socket. + async fn remove_spu(&self,old_spu: SpuSpec) { + debug!("remove Spu({}) from ConnMgr", old_spu.id); + + self.sinks.clear_sink(&old_spu.id); + self.remove_conn_param(&old_spu.id); + + let spu_msg = SpuMsg::delete(old_spu.into()); + self.send_msg_to_all_live_spus(vec![spu_msg]).await; + } + + /// remove spu connection parameters & socket. + fn inner_remove_spu(&self, old_spu: &SpuSpec) { + debug!("remove Spu({}) from ConnMgr", old_spu.id); + + self.sinks.clear_sink(&old_spu.id); + self.remove_conn_param(&old_spu.id); + } + + // ----------------------------------- + // Get Connection & Update status + // ----------------------------------- + + /// grab connection socket and increment counters + pub fn get_mut_connection(&self, spu_id: &SpuId) -> Option> { + + self.sinks.get_sink(spu_id) + } + + /// message sent successfully + pub fn inc_ok_counter(&self, spu_id: &SpuId) { + self.counter_tbl.inc_counter(spu_id, ConnCntr::SendMsgOk); + } + + /// could not send message clear connection so it gets established again. + pub fn inc_failed_counter(&self, spu_id: &SpuId) { + self.counter_tbl + .inc_counter(spu_id, ConnCntr::SendMsgFailed); + } + + /// Update Partition information to all SPUs in the spec + async fn refresh_partition(&self,key: ReplicaKey,spec: PartitionSpec) { + + // generate replica + let mut replica_msgs = ReplicaMsgs::default(); + replica_msgs.push(ReplicaMsg::update( + Replica::new( + key, + spec.leader, + spec.replicas.clone()))); + + let request = UpdateReplicaRequest::encode_request(replica_msgs); + let mut message = RequestMessage::new_request(request); + message + .get_mut_header() + .set_client_id("controller"); + + for spu in spec.replicas { + debug!("sending replica: {} to spu: {}",message.request.decode_request(),spu); + match self.send_msg(&spu, &message).await { + Ok(status) => { + if !status { + trace!("unable to send partition: {} to offline spu: {}",spec.leader,spu); + } + }, + Err(err) => warn!("error {} sending partition: {} to spu: {}",err,spec.leader,spu) + } + } + } + + + /// looks-up metadata and sends all SPUs and Replicas leaders associated with the SPU. + async fn refresh_spu( + &self, + spu_id: i32, + ) -> Result<(), ScServerError > { + + debug!("Send SPU metadata({})", spu_id); + + if let Some(spu) = self.spu_store.get_by_id(&spu_id) { + self.send_update_all_to_spu( + &spu, + ).await?; + } else { + return Err(ScServerError::UnknownSpu(spu_id)); + } + + Ok(()) + } + + /// send all spec to SPU + async fn send_update_all_to_spu<'a>( + &'a self, + spu: &'a SpuKV, + ) -> Result<(), ScServerError> { + let spu_specs = self.spu_store.all_values() + .into_iter() + .map(|spu_kv| spu_kv.spec) + .collect(); + let replicas = self.partition_store.replica_for_spu(spu.id()); + let request = UpdateAllRequest::new(spu_specs, replicas); + + debug!( + "SEND SPU Metadata: >> ({}): BulkUpdate({} spu-msgs, {} replica-msgs)", + spu.id(), + request.spus.len(), + request.replicas.len(), + ); + trace!("{:#?}", request); + + let mut message = RequestMessage::new_request(request); + message + .get_mut_header() + .set_client_id("controller"); + + self.send_msg(spu.id(),&message).await?; + + Ok(()) + } + + + /// send messages to all live SPU + async fn send_msg_to_all_live_spus(&self,msgs: Vec) { + + let online_spus = self.spu_store.online_spus(); + debug!("trying to send SPU spec to active Spu: {}",online_spus.len()); + for live_spu in online_spus { + if let Err(err) = self.send_update_spu_msg_request(&live_spu,msgs.clone()).await { + error!("error sending msg {}", err); + } + + } + } + + /// Send Update SPU message Request to an Spu + async fn send_update_spu_msg_request<'a>( + &'a self, + spu: &'a SpuKV, + spu_msgs: Vec, + ) -> Result<(), ScServerError> { + + + trace!("{:#?}", spu_msgs); + + let request = UpdateSpuRequest::new(spu_msgs); + + let mut message = RequestMessage::new_request(request); + message + .get_mut_header() + .set_client_id("controller"); + + self.send_msg(spu.id(), &message).await?; + + Ok(()) + } + + + + /// send request message to specific spu + /// this is a one way send + async fn send_msg<'a,R>(&'a self, spu_id: &'a SpuId, req_msg: &'a RequestMessage) -> Result + where R: Request + Send + Sync + 'static + { + + if let Some(mut spu_conn) = self.get_mut_connection(spu_id) { + // send message & evaluate response + + trace!("spu client: sending msg: {:#?}",req_msg); + + match spu_conn.send_request(&req_msg).await { + Ok(_) => { + trace!("spu client send successfully"); + // increment ok counter + self.inc_ok_counter(spu_id); + Ok(true) + }, + Err(err) => { + error!("spu client send failed"); + // mark socket as stale and update counter + //spu_conn.set_stale(); + self.inc_failed_counter(spu_id); + + Err(ScServerError::SpuCommuncationError(*spu_id, err)) + } + } + } else { + Ok(false) + } + } + + +} + + diff --git a/sc-server/src/conn_manager/mod.rs b/sc-server/src/conn_manager/mod.rs new file mode 100644 index 0000000000..5295662b7d --- /dev/null +++ b/sc-server/src/conn_manager/mod.rs @@ -0,0 +1,10 @@ +mod actions; +mod manager; + +pub use self::actions::ConnectionRequest; +pub use self::actions::SpuConnectionStatusChange; +pub use self::actions::SpuSpecChange; +pub use self::actions::PartitionSpecChange; +pub use self::manager::{ConnManager, SharedConnManager}; +pub use self::manager::ConnParams; + diff --git a/sc-server/src/core/common/actions.rs b/sc-server/src/core/common/actions.rs new file mode 100644 index 0000000000..eb37654432 --- /dev/null +++ b/sc-server/src/core/common/actions.rs @@ -0,0 +1,73 @@ +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt; + +use crate::core::Spec; + +use super::KVObject; + +/// Represents changes in Local State +#[derive(Debug,PartialEq,Clone)] +pub enum LSChange + where S: Spec, + S::Key: Debug, + S::Status: Debug + PartialEq +{ + + Add(KVObject), + Mod(KVObject,KVObject), // new, old + Delete(KVObject) +} + + +impl fmt::Display for LSChange + where S:Spec, + S::Key: Debug + Display , + S::Status: PartialEq + Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::Add(add) => write!(f,"{} Add: {}",S::LABEL,add.key()), + Self::Mod(update,_) => write!(f,"{} Mod: {}",S::LABEL,update.key()), + Self::Delete(del) => write!(f,"{} Delete: {}",S::LABEL,del.key()) + } + } +} + + + +impl LSChange + where S: Spec, + S::Key: Debug, + S::Status: Debug + PartialEq +{ + + pub fn add(value: K) -> Self + where K: Into> { + LSChange::Add(value.into()) + } + + pub fn update(new: KVObject,old: KVObject) -> Self { + LSChange::Mod(new,old) + } + + pub fn delete(value: KVObject) -> Self { + LSChange::Delete(value) + } + +} + + +/// Actions to update World States +#[derive(Debug,PartialEq,Clone)] +pub enum WSAction + where S: Spec, + S::Key: PartialEq, + S::Status: Debug + PartialEq + +{ + Add(KVObject), + UpdateStatus(KVObject), // only update the status + UpdateSpec(KVObject), // onluy update the spec + Delete(S::Key) +} diff --git a/sc-server/src/core/common/channels.rs b/sc-server/src/core/common/channels.rs new file mode 100644 index 0000000000..481eaa366e --- /dev/null +++ b/sc-server/src/core/common/channels.rs @@ -0,0 +1,13 @@ +//! +//! # Channels +//! +//! Helper functions to create a channel +//! +use futures::channel::mpsc::channel; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::Sender; + +/// Create a channel +pub fn new_channel() -> (Sender, Receiver) { + channel(100) +} diff --git a/sc-server/src/core/common/dispatcher.rs b/sc-server/src/core/common/dispatcher.rs new file mode 100644 index 0000000000..a41b8c9ea9 --- /dev/null +++ b/sc-server/src/core/common/dispatcher.rs @@ -0,0 +1,84 @@ + +use std::fmt::Debug; + +use log::trace; + +use log::info; +use futures::channel::mpsc::Receiver; +use futures::stream::StreamExt; +use futures::select; + +use future_helper::spawn; + +use utils::actions::Actions; + + +use crate::core::Status; +use crate::core::Spec; +use crate::core::WSUpdateService; + + +pub trait ControllerAction{} + +pub trait Controller { + type Spec: Spec; + type Status: Status; + type Action: Debug ; +} + +#[allow(dead_code)] +struct Dispatcher(C); + +#[allow(dead_code)] +impl Dispatcher + where + C: Controller + Send + Sync + 'static, + C::Action: Send + Sync + 'static + { + /// start the controller with ctx and receiver + pub fn run( + controller: C, + receiver: Receiver>, + kv_service: K) + + where K: WSUpdateService + Send + Sync + 'static, + { + spawn( Self::request_loop(receiver,kv_service,controller).await); + } + + async fn request_loop( + mut receiver: Receiver>, + _kv_service: K, + mut _controller: C) + + where K: WSUpdateService + Send + Sync + 'static + { + + loop { + select! { + + receiver_req = receiver.next() => { + + match receiver_req { + + None => { + info!("receiver has terminated"); + break; + }, + + Some(request) => { + trace!("received actions: {:#?}",request); + //controller.process_auth_token_request(&kv_service,request).await; + + } + + } + + } + } + + } + + + } + } \ No newline at end of file diff --git a/sc-server/src/core/common/kv_context.rs b/sc-server/src/core/common/kv_context.rs new file mode 100644 index 0000000000..5b673d643f --- /dev/null +++ b/sc-server/src/core/common/kv_context.rs @@ -0,0 +1,42 @@ +//! +//! # Key/Value Context +//! +//! Key/Value Contexts are required by KV store for modifications and owner_references. +//! Controller treats these objects as opaque cookies which are converted to Metadata by +//! the KV client. +use k8_metadata::core::metadata::ObjectMeta; + +#[derive(Debug, PartialEq, Clone)] +pub struct KvContext { + pub item_ctx: Option, + pub parent_ctx: Option, +} + +impl KvContext { + pub fn with_ctx(mut self, ctx: ObjectMeta) -> Self { + self.item_ctx = Some(ctx); + self + } + + pub fn with_parent_ctx(mut self, ctx: ObjectMeta) -> Self { + self.parent_ctx = Some(ctx); + self + } + + pub fn make_parent_ctx(&self) -> KvContext { + if self.item_ctx.is_some() { + KvContext::default().with_parent_ctx(self.item_ctx.as_ref().unwrap().clone()) + } else { + KvContext::default() + } + } +} + +impl ::std::default::Default for KvContext { + fn default() -> Self { + KvContext { + item_ctx: None, + parent_ctx: None, + } + } +} \ No newline at end of file diff --git a/sc-server/src/core/common/kv_obj.rs b/sc-server/src/core/common/kv_obj.rs new file mode 100644 index 0000000000..ec0daf8e04 --- /dev/null +++ b/sc-server/src/core/common/kv_obj.rs @@ -0,0 +1,112 @@ +use std::fmt; +use std::fmt::Display; + +use crate::core::Spec; + +use super::KvContext; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug, Clone, PartialEq)] +pub struct KVObject where S: Spec { + pub spec: S, + pub status: S::Status, + pub key: S::Key, + pub kv_ctx: KvContext, +} + +impl KVObject where S: Spec { + + + pub fn new(key: J,spec: S, status: S::Status) -> Self where J: Into { + Self { + key: key.into(), + spec, + status, + kv_ctx: KvContext::default(), + } + } + + pub fn new_with_context(key: J,spec: S,kv_ctx: KvContext) -> Self where J: Into { + Self { + key: key.into(), + spec, + status: S::Status::default(), + kv_ctx + } + } + + + pub fn with_spec(key: J,spec: S) -> Self where J: Into { + Self::new(key.into(),spec,S::Status::default()) + } + + + pub fn with_kv_ctx(mut self, kv_ctx: KvContext) -> Self { + self.kv_ctx = kv_ctx; + self + } + + pub fn key(&self) -> &S::Key { + &self.key + } + + pub fn key_owned(&self) -> S::Key { + self.key.clone() + } + + pub fn my_key(self) -> S::Key { + self.key + } + + pub fn spec(&self) -> &S { + &self.spec + } + pub fn status(&self) -> &S::Status { + &self.status + } + + + pub fn kv_ctx(&self) -> &KvContext { + &self.kv_ctx + } + + + pub fn set_ctx(&mut self, new_ctx: &KvContext) { + self.kv_ctx = new_ctx.clone(); + } + + pub fn parts(self) -> (S::Key,S,KvContext) { + (self.key,self.spec,self.kv_ctx) + } + + pub fn is_owned(&self,uid: &str) -> bool { + match &self.kv_ctx.parent_ctx { + Some(parent) => parent.uid == uid, + None => false + } + } + + +} + + +impl fmt::Display for KVObject + where + S: Spec, + S::Key: Display +{ + + default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f,"KV {} key: {}",S::LABEL,self.key()) + } +} + + +impl Into<(S::Key,S,S::Status)> for KVObject where S: Spec { + fn into(self) -> (S::Key,S,S::Status) { + (self.key,self.spec,self.status) + } +} \ No newline at end of file diff --git a/sc-server/src/core/common/mod.rs b/sc-server/src/core/common/mod.rs new file mode 100644 index 0000000000..6c97a8dedf --- /dev/null +++ b/sc-server/src/core/common/mod.rs @@ -0,0 +1,15 @@ +mod channels; +mod kv_context; +mod store; +mod kv_obj; +mod actions; + +#[cfg(test)] +pub mod test_fixtures; + +pub use self::kv_context::KvContext; +pub use self::channels::new_channel; +pub use self::store::LocalStore; +pub use self::kv_obj::KVObject; +pub use self::actions::LSChange; +pub use self::actions::WSAction; diff --git a/sc-server/src/core/common/store.rs b/sc-server/src/core/common/store.rs new file mode 100644 index 0000000000..18d803c8f8 --- /dev/null +++ b/sc-server/src/core/common/store.rs @@ -0,0 +1,189 @@ +use std::sync::Arc; +use std::fmt::Debug; +use std::fmt::Display; +use std::borrow::Borrow; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use utils::SimpleConcurrentBTreeMap; + +use crate::core::Spec; + +use super::KVObject; + +/// Local state in memory +#[derive(Debug)] +pub struct LocalStore(SimpleConcurrentBTreeMap>) +where + S: Spec, + ::Status: Debug; + +impl Default for LocalStore +where + S: Spec, + ::Status: Debug, +{ + fn default() -> Self { + LocalStore(SimpleConcurrentBTreeMap::new()) + } +} + +impl ::std::cmp::PartialEq for LocalStore +where + S: Spec + PartialEq, + S::Status: PartialEq + Debug, +{ + fn eq(&self, other: &LocalStore) -> bool { + for (key, val) in self.0.read().iter() { + let other_list = other.0.read(); + let other_val = match other_list.get(key) { + Some(val) => val, + None => return false, + }; + if val != other_val { + return false; + } + } + true + } +} + +impl LocalStore +where + S: Spec, + S::Status: Debug, +{ + pub fn new_shared() -> Arc { + Arc::new(Self::default()) + } + + pub fn inner_store(&self) -> &SimpleConcurrentBTreeMap> { + &self.0 + } + + pub fn insert(&self, value: KVObject) -> Option> { + self.inner_store().write().insert(value.key_owned(), value) + } + + /// visit all values + pub fn visit_values(&self, func: F) + where + F: FnMut(&'_ KVObject), + { + self.inner_store().read().values().for_each(func); + } + + + +} + +impl LocalStore +where + S: Spec, + S::Key: Ord + Clone, + S::Status: Debug, +{ + /* + pub fn delete(&self, name: S) where S: AsRef { + self.inner_store().write().remove(name.as_ref()); + } + */ + + pub fn delete(&self, key: &K) + where S::Key: Borrow, + K: Ord + { + self.inner_store().write().remove(key); + } + + /// get copy of the value ref by key + pub fn value(&self, key: &K) -> Option> + where S::Key: Borrow, + K: Ord + { + match self.inner_store().read().get(key) { + Some(value) => Some(value.clone()), + None => None, + } + } + + pub fn find_and_do(&self, key: &K, mut func: F) -> Option<()> + where + F: FnMut(&'_ KVObject), + K: Ord, + S::Key: Borrow + { + if let Some(value) = self.inner_store().read().get(key) { + func(value); + Some(()) + } else { + None + } + + } + + pub fn contains_key(&self, key: &K) -> bool + where S::Key: Borrow, + K: Ord + { + self.inner_store().read().contains_key(key) + } + + pub fn remove(&self, key: &K) -> Option> + where S::Key: Borrow, + K: Ord + { + self.inner_store().write().remove(key) + } + + pub fn count(&self) -> i32 { + self.inner_store().read().len() as i32 + } + + pub fn all_keys(&self) -> Vec { + self.inner_store().read().keys().cloned().collect() + } + + pub fn all_values(&self) -> Vec> { + self.inner_store().read().values().cloned().collect() + } + + pub fn all_specs(&self) -> Vec { + self.inner_store().read().values().map(|kv| kv.spec.clone()).collect() + } + + /// update status + pub fn update_status(&self, key: &K, status: S::Status) -> Result<(), IoError> + where S::Key: Borrow, + K: Display + Ord + + { + + if let Some(old_kv) = self.inner_store().write().get_mut(key) { + old_kv.status = status; + Ok(()) + } else { + Err(IoError::new( + ErrorKind::InvalidData, + format!("{} '{}': not found, cannot update", S::LABEL,key) + )) + } + } +} + + +impl Display for LocalStore +where + S: Spec, + ::Status: Debug +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + + write!(f, "{} Store count: {}",S::LABEL,self.inner_store().read().len()) + } + + +} + + + diff --git a/sc-server/src/core/common/test_fixtures.rs b/sc-server/src/core/common/test_fixtures.rs new file mode 100644 index 0000000000..503fc66285 --- /dev/null +++ b/sc-server/src/core/common/test_fixtures.rs @@ -0,0 +1,23 @@ + + +use internal_api::messages::MsgType; + + +// Test Actions - helps generate composite actions +pub enum TAction { + MOD, + DEL, + UPDATE, +} + +impl From for MsgType { + fn from(value: TAction) -> Self { + match value { + TAction::UPDATE => MsgType::UPDATE, + TAction::DEL => MsgType::DELETE, + TAction::MOD => MsgType::UPDATE + } + } +} + + diff --git a/sc-server/src/core/context.rs b/sc-server/src/core/context.rs new file mode 100644 index 0000000000..b61526344a --- /dev/null +++ b/sc-server/src/core/context.rs @@ -0,0 +1,44 @@ +//! +//! # SC Context +//! +//! Streaming Controller Context stores entities that persist through system operation. +//! + +use std::sync::Arc; + + +//use super::send_channels::ScSendChannels; + +#[derive(Debug)] +pub struct ScContext { + // send_channels: ScSendChannels, + conn_manager: Arc, +} + +impl Clone for ScContext { + + fn clone(&self) -> Self { + ScContext { + // send_channels: self.send_channels.clone(), + conn_manager: self.conn_manager.clone() + } + } +} + +// ----------------------------------- +// Global Context - Implementation +// ----------------------------------- + +impl ScContext where C: SpuConnections { + + pub fn new(conn_manager: Arc) -> Self { + Self { conn_manager} + } + + // reference to connection metadata + pub fn conn_manager(&self) -> Arc { + self.conn_manager.clone() + } + + +} diff --git a/sc-server/src/core/dispatcher.rs b/sc-server/src/core/dispatcher.rs new file mode 100644 index 0000000000..4ffbef947f --- /dev/null +++ b/sc-server/src/core/dispatcher.rs @@ -0,0 +1,56 @@ +//! +//! # Streaming Coordinator (SC) Dispatcher +//! +//! Receives actions from K8 dispatcher, identifies their action type and forwards them +//! to the processing corresponding processing engine. +//! + +use std::io::Error as IoError; + +use future_helper::spawn; +use futures::channel::mpsc::Receiver; +use futures::future::TryFutureExt; +use futures::select; +use futures::stream::StreamExt; +use log::{error, info}; +use log::trace; + +use crate::core::WSUpdateService; +use crate::conn_manager::SpuConnections; + +use super::ScController; +use super::ScRequest; + +/// Streaming Controller dispatcher entry point, spawns new thread +pub fn run(receiver: Receiver, sc_controller: ScController) + where K: WSUpdateService + Clone + Send + Sync + 'static, + C: SpuConnections + Send + Sync + 'static +{ + info!("start SC[{}] dispatcher", sc_controller.id()); + + spawn(sc_request_loop(receiver, sc_controller); +} + +/// SC dispatcher request loop, waits for a request request and dispatchers +/// it for processing. +async fn sc_request_loop(mut receiver: Receiver, mut sc_controller: ScController) + where K: WSUpdateService + Clone , C: SpuConnections +{ + loop { + select! { + receiver_req = receiver.next() => { + match receiver_req { + None => { + info!("SC dispatcher receiver is removed. end"); + break; + }, + Some(request) => { + trace!("SC Controller receive msg: {:#?}",request); + sc_controller.process_sc_request(request).await; + }, + } + } + complete => {}, + } + } +} diff --git a/sc-server/src/core/metadata.rs b/sc-server/src/core/metadata.rs new file mode 100644 index 0000000000..b72d35c479 --- /dev/null +++ b/sc-server/src/core/metadata.rs @@ -0,0 +1,82 @@ +//! +//! # Streaming Coordinator Metadata +//! +//! Metadata stores a copy of the data from KV store in local memory. +//! +use std::sync::Arc; + +use crate::cli::ScConfig; +use crate::core::partitions::PartitionLocalStore; +use crate::core::spus::SharedSpuLocalStore; +use crate::core::spus::SpuLocalStore; +use crate::core::topics::TopicLocalStore; + +pub type ShareLocalStores = Arc; + +#[derive(Debug)] +pub struct LocalStores { + spus: SharedSpuLocalStore, + partitions: Arc, + topics: Arc, + config: ScConfig, + +} + +// ----------------------------------- +// ScMetadata - Implementation +// ----------------------------------- + +impl LocalStores { + pub fn shared_metadata(config: ScConfig) -> Arc { + Arc::new(LocalStores::new(config)) + } + + /// private function to provision metadata + fn new(config: ScConfig) -> Self { + LocalStores { + spus: SpuLocalStore::new_shared(), + partitions: PartitionLocalStore::new_shared(), + topics: TopicLocalStore::new_shared(), + config: config, + } + } + + + /// reference to spus + pub fn spus(&self) -> &SharedSpuLocalStore { + &self.spus + } + + pub fn owned_spus(&self) -> SharedSpuLocalStore { + self.spus().clone() + } + + /// reference to partitions + pub fn partitions(&self) -> &Arc { + &self.partitions + } + + /// reference to topics + pub fn topics(&self) -> &Arc { + &self.topics + } + + /// reference to config + pub fn config(&self) -> &ScConfig { + &self.config + } + + /// format metadata cache into a table string + #[allow(dead_code)] + pub fn table_fmt(&self) -> String { + let mut table = String::new(); + let newline = format!("\n"); + + table.push_str(&self.spus.table_fmt()); + table.push_str(&newline); + table.push_str(&self.topics.table_fmt()); + table.push_str(&newline); + table.push_str(&self.partitions.table_fmt()); + table + } +} diff --git a/sc-server/src/core/mod.rs b/sc-server/src/core/mod.rs new file mode 100644 index 0000000000..d1b6813cd3 --- /dev/null +++ b/sc-server/src/core/mod.rs @@ -0,0 +1,43 @@ + +mod metadata; +mod world_store; + +pub mod common; +pub mod partitions; +pub mod spus; +pub mod topics; + + +pub use self::metadata::{LocalStores, ShareLocalStores}; +pub use self::world_store::WSUpdateService; +pub use self::world_store::WSChangeChannel; +pub use self::world_store::WSChangeDispatcher; + +use std::io::Error as IoError; + +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::K8Obj; + +use crate::core::common::KVObject; + +pub trait Spec: Default + Clone { + + const LABEL: &'static str; + + type Status: Status; + type K8Spec: K8Spec; + type Owner: Spec; + + type Key: Ord + Clone + ToString; + + // convert kubernetes objects into KV value + fn convert_from_k8(k8_obj: K8Obj::Status>) -> + Result,IoError>; + + +} + + +pub trait Status: Default + Clone {} + + diff --git a/sc-server/src/core/partitions/Test.MD b/sc-server/src/core/partitions/Test.MD new file mode 100644 index 0000000000..fe3bf934f8 --- /dev/null +++ b/sc-server/src/core/partitions/Test.MD @@ -0,0 +1,53 @@ +* Set up + + - Start SC Server + - Create 2 SPU CRD (5001,5002). + - SPU should not be running. + + +* Topic Creation + - No SPU should be running. + - Create Topic1 with Partition = 1, Replication = 2. + - Topic should have replica map with 2 replicas. + - Resolution should be Ok + - 1 Partition should be created. + - Should have Spec wiht leader and 2 replicas. + - Status + - should be Offline + - should have Leader with offset of -1 + - No replicas + +* Turn on leader SPU. For partition + - Status's resolution should be online + - Status's Leader should have offsets with 0 + - Status's Replica should have replica id with offsets of -1 + +* Turn on follower SPU. For partition + - Statu's Replica should have offsets of 0 + + +* Follower failure + - Turn off follower SPU + - Status's Replica's status should stay Online + - Turn on follower SPU + +* Leader Election + - Turn off Leader SPU + - Spec should be changed to follower SPU + - Statu's leader should be changed + - Statu's Replica should have value of leader + +* Follower up: + - Parition with 2 replicas. + - Shutdown all SPU. This should set partition status to ElectionNoLeaderFounded + - Bring up follower SPU. + - It should do election (doesn't) + +# Issues +[ ] Follower doesn't catch up + - 2 SPU. + - Create a record. 2 SPU should have same replicas + - Shutdown follower. Create record. Leader's EOF and HW should be greater than follower. + - Bring back follower. Follower should sync with Leader + + diff --git a/sc-server/src/core/partitions/actions.rs b/sc-server/src/core/partitions/actions.rs new file mode 100644 index 0000000000..a0b38a9d2b --- /dev/null +++ b/sc-server/src/core/partitions/actions.rs @@ -0,0 +1,65 @@ +//! +//! # Partition Actions +//! +//! Defines Partition action requests +//! +//! # Remarks +//! +//! Partitions are created in 2 phases: +//! * Phase 1 - send spec to KV store (no status with replica map) +//! * Phase 2 - update status and send to KV store +//! +use std::fmt; + + +use utils::actions::Actions; +use internal_api::UpdateLrsRequest; + +use crate::core::spus::SpuLSChange; +use crate::conn_manager::ConnectionRequest; + +use super::PartitionWSAction; +use super::PartitionLSChange; + +#[derive(Debug, PartialEq, Clone)] +pub enum PartitionChangeRequest { + Partition(Actions), + Spu(Actions), + LrsUpdate(UpdateLrsRequest) +} + + +impl fmt::Display for PartitionChangeRequest { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + PartitionChangeRequest::Partition(req) => write!(f,"Partition LS: {}",req.count()), + PartitionChangeRequest::Spu(req) => write!(f,"SPU LS: {}",req.count()), + PartitionChangeRequest::LrsUpdate(lrs) => write!(f,"Lrs Rep: {}",lrs.id) + } + } +} + + + + +#[derive(Debug,Default)] +pub struct PartitionActions { + pub partitions: Actions, + pub conns: Actions, +} + + + + +impl fmt::Display for PartitionActions { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, + "PartitionActions partitions: {}, conns: {}", + self.partitions.count(), + self.conns.count() + + ) + } +} diff --git a/sc-server/src/core/partitions/controller.rs b/sc-server/src/core/partitions/controller.rs new file mode 100644 index 0000000000..be3e3d852b --- /dev/null +++ b/sc-server/src/core/partitions/controller.rs @@ -0,0 +1,163 @@ +//! +//! # Auth Controller +//! + +use log::trace; +use log::error; +use log::info; +use futures::select; +use futures::stream::StreamExt; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::channel; + +use types::log_on_err; +use metadata::partition::PartitionSpec; +use metadata::spu::SpuSpec; +use future_helper::spawn; +use internal_api::UpdateLrsRequest; + +use crate::core::WSUpdateService; +use crate::core::ShareLocalStores; +use crate::conn_manager::SharedConnManager; + +use crate::core::WSChangeChannel; + +use super::PartitionReducer; +use super::PartitionChangeRequest; + + + +#[derive(Debug)] +pub struct PartitionController { + local_stores: ShareLocalStores, + conn_manager: SharedConnManager, + ws_service: W, + partition_receiver: WSChangeChannel, + spu_receiver: WSChangeChannel, + lrs_receiver: Receiver, + lrs_sender: Sender, + reducer: PartitionReducer, +} + +impl PartitionController + where W: WSUpdateService + Send + 'static +{ + + pub fn new( + local_stores: ShareLocalStores, + conn_manager: SharedConnManager, + partition_receiver: WSChangeChannel, + spu_receiver: WSChangeChannel, + ws_service: W) -> Self { + + let (lrs_sender,lrs_receiver) = channel(100); + + Self { + ws_service, + conn_manager, + local_stores: local_stores.clone(), + reducer: PartitionReducer::new( + local_stores.partitions().clone(), + local_stores.spus().clone() + ), + spu_receiver, + partition_receiver, + lrs_receiver, + lrs_sender + } + } + + pub fn lrs_sendr(&self) -> Sender { + self.lrs_sender.clone() + } + + pub fn run(self) { + + spawn(self.dispatch_loop()); + } + + async fn dispatch_loop(mut self) { + + loop { + select! { + partition_req = self.partition_receiver.next() => { + match partition_req { + None => { + error!("Partition LC dispatcher has been terminated. Ending Server loop"); + break; + }, + Some(request) => { + trace!("receive partition request {:#?}",request); + self.process_request(PartitionChangeRequest::Partition(request)).await; + }, + } + }, + spu_req = self.spu_receiver.next() => { + match spu_req { + None => { + error!("SPU lC dispatcher has been terminated. Ending server loop"); + break; + }, + Some(request) => { + trace!("received SPU request {:#?}",request); + self.process_request(PartitionChangeRequest::Spu(request)).await; + } + } + }, + lrs_req = self.lrs_receiver.next() => { + match lrs_req { + None => { + error!("LRS channel has been terminated. Ending server loop"); + break; + }, + Some(req) => { + trace!("Lrs status request: {:#?}",req); + self.process_request(PartitionChangeRequest::LrsUpdate(req)).await; + } + } + } + complete => {}, + } + } + + info!("spu controller is terminated"); + + } + + async fn process_request(&mut self, requests: PartitionChangeRequest) { + + // process partition actions; generate Kvs actions and SPU msgs. + match self.reducer.process_requests(requests) { + Ok(actions) => { + + trace!( + "Partition actions: {}", + actions + ); + + if actions.partitions.count() > 0 { + for ws_action in actions.partitions.into_iter() { + log_on_err!( + self.ws_service.update_partition(ws_action).await + ); + } + } + + if actions.conns.count() > 0 { + self.conn_manager.process_requests(actions.conns).await; + } + + + }, + Err(err) => error!("error processing partition requests: {}",err) + } + + } + + +} + + + + diff --git a/sc-server/src/core/partitions/metadata.rs b/sc-server/src/core/partitions/metadata.rs new file mode 100644 index 0000000000..01939bfdbf --- /dev/null +++ b/sc-server/src/core/partitions/metadata.rs @@ -0,0 +1,230 @@ +//! +//! # Partition & Partitions Metadata +//! +//! Partition metadata information on cached in the local Controller. +//! +use std::io::Error as IoError; + +use log::debug; + +use internal_api::messages::Replica; +use metadata::partition::ReplicaKey; +use metadata::partition::{PartitionSpec, PartitionStatus}; +use metadata::topic::TopicSpec; +use k8_metadata::partition::PartitionSpec as K8PartitionSpec; +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::K8Obj; +use types::SpuId; + + +use crate::core::common::LocalStore; +use crate::core::common::KVObject; +use crate::core::Spec; +use crate::core::Status; +use crate::k8::default_convert_from_k8; + + +impl Spec for PartitionSpec { + + const LABEL: &'static str = "Partition"; + type Key = ReplicaKey; + type Status = PartitionStatus; + type K8Spec = K8PartitionSpec; + type Owner = TopicSpec; + + fn convert_from_k8(k8_obj: K8Obj::Status>) -> + Result,IoError> { + + default_convert_from_k8(k8_obj) + } + +} + +impl Status for PartitionStatus{} + +// ----------------------------------- +// Data Structures +// ----------------------------------- +pub type PartitionKV = KVObject; + +// ----------------------------------- +// Partition - Implementation +// ----------------------------------- + +impl PartitionKV { + + + /// create new partiton with replica map. + /// first element of replicas is leader + pub fn with_replicas(key: ReplicaKey,replicas: Vec) -> Self { + let spec: PartitionSpec = replicas.into(); + Self::new(key,spec, PartitionStatus::default()) + } + +} + +impl From<((S, i32),Vec)> for PartitionKV where S: Into { + + fn from(partition: ((S, i32),Vec)) -> Self { + let (replica_key, replicas) = partition; + Self::with_replicas(replica_key.into(),replicas) + } + +} + + +pub type PartitionLocalStore = LocalStore; + + +// ----------------------------------- +// Partitions - Implementation +// ----------------------------------- + +impl PartitionLocalStore { + + + + pub fn names(&self) -> Vec { + self.inner_store().read().keys().cloned().collect() + } + + pub fn topic_partitions(&self, topic: &str) -> Vec { + let mut res: Vec = Vec::default(); + for (name, partition) in self.inner_store().read().iter() { + if name.topic == topic { + res.push(partition.clone()); + } + } + res + } + + /// find all partitions that has spu in the replicas + pub fn partition_spec_for_spu(&self, target_spu: &i32) -> Vec<(ReplicaKey, PartitionSpec)> { + let mut res = vec![]; + for (name, partition) in self.inner_store().read().iter() { + if partition.spec.replicas.contains(target_spu) { + res.push((name.clone(), partition.spec.clone())); + } + } + res + } + + pub fn count_topic_partitions(&self, topic: &str) -> i32 { + let mut count: i32 = 0; + for (name, _) in self.inner_store().read().iter() { + if name.topic == topic { + count += 1; + } + } + count + } + + + // return partitions that belong to this topic + #[allow(dead_code)] + fn topic_partitions_list(&self, topic: &str) -> Vec { + self + .inner_store() + .read() + .keys() + .filter_map(|name| { + if &name.topic == topic { + Some(name.clone()) + } else { + None + } + }) + .collect() + } + + + /// replica msg for target spu + pub fn replica_for_spu(&self, target_spu: &SpuId) -> Vec { + + let msgs: Vec = self.partition_spec_for_spu(target_spu) + .into_iter() + .map( |(replica_key,partition_spec)| + Replica::new( + replica_key, + partition_spec.leader, + partition_spec.replicas + ) + ).collect(); + debug!("{} computing replic msg for spuy: {}, msg: {}",self,target_spu,msgs.len()); + msgs + } + + pub fn table_fmt(&self) -> String { + let mut table = String::new(); + + let partition_hdr = format!( + "{n:<18} {l:<6} {r}\n", + n = "PARTITION", + l = "LEADER", + r = "LIVE-REPLICAS", + ); + table.push_str(&partition_hdr); + + for (name, partition) in self.inner_store().read().iter() { + let mut leader = String::from("-"); + let mut _lrs = String::from("[]"); + + if partition.spec.leader >= 0 { + leader = format!("{}", partition.spec.leader); + // lrs = partition.live_replicas_str(); + } + let row = format!( + "{n:<18} {l:<6} \n", + n = name.to_string(), + l = leader, + + ); + table.push_str(&row); + } + + table + } + + + + pub fn bulk_add(&self,partitions: Vec<((S, i32),Vec)>) + where + S: Into, + { + for (replica_key,replicas) in partitions.into_iter() { + let partition: PartitionKV = (replica_key,replicas).into(); + self.insert(partition); + } + + } + + +} + +impl From)>> for PartitionLocalStore where S: Into{ + fn from(partitions: Vec<((S, i32),Vec)>) -> Self { + let store = Self::default(); + store.bulk_add(partitions); + store + } +} + + +#[cfg(test)] +pub mod test { + + use super::PartitionLocalStore; + + #[test] + fn test_partitions_to_replica_msgs() { + + let partitions = PartitionLocalStore::default(); + partitions.bulk_add(vec![(("topic1", 0), vec![10,11,12]),]); + + let replica_msg = partitions.replica_for_spu(&10); + assert_eq!(replica_msg.len(),1); + + + } + +} \ No newline at end of file diff --git a/sc-server/src/core/partitions/mod.rs b/sc-server/src/core/partitions/mod.rs new file mode 100644 index 0000000000..2f871686bc --- /dev/null +++ b/sc-server/src/core/partitions/mod.rs @@ -0,0 +1,23 @@ +mod actions; +mod metadata; +mod controller; +mod reducer; + + +pub use self::actions::PartitionActions; +pub use self::actions::PartitionChangeRequest; +pub use self::metadata::{PartitionKV, PartitionLocalStore}; +pub use self::controller::PartitionController; + +use std::sync::Arc; +use ::metadata::partition::PartitionSpec; +use reducer::PartitionReducer; +use crate::core::common::WSAction; +use crate::core::common::LSChange; +use crate::k8::K8ClusterStateDispatcher; + +pub type K8PartitionChangeDispatcher = K8ClusterStateDispatcher; +pub type PartitionWSAction = WSAction; +pub type SharedPartitionStore = Arc; +pub type PartitionLSChange = LSChange; + diff --git a/sc-server/src/core/partitions/reducer.rs b/sc-server/src/core/partitions/reducer.rs new file mode 100644 index 0000000000..9719ee1757 --- /dev/null +++ b/sc-server/src/core/partitions/reducer.rs @@ -0,0 +1,513 @@ +//! +//! # Partition & Partitions Metadata +//! +//! Partition metadata information on cached in the local Controller. +//! +use log::trace; +use log::debug; +use log::error; +use log::warn; + +use types::log_on_err; +use metadata::partition::PartitionSpec; +use metadata::partition::PartitionResolution; +use metadata::partition::PartitionStatus; +use metadata::partition::ReplicaStatus; +use metadata::partition::ElectionPolicy; +use metadata::partition::ElectionScoring; +use internal_api::UpdateLrsRequest; + +use crate::conn_manager::ConnectionRequest; +use crate::conn_manager::PartitionSpecChange; +use crate::core::common::LSChange; +use crate::core::common::WSAction; +use crate::core::spus::SharedSpuLocalStore; +use crate::core::spus::SpuLocalStore; +use crate::core::spus::SpuKV; +use crate::ScServerError; + +use super::PartitionChangeRequest; +use super::PartitionActions; +use super::PartitionLocalStore; +use super::PartitionKV; +use super::SharedPartitionStore; + +type PartitionWSAction = WSAction; + +/// Given This is a generated partition from TopicController, It will try to allocate assign replicas +/// to live SPU. +/// ```ignore +/// Spec +/// name: Topic0-0 +/// replication: 2 +/// Status +/// state: Init +/// +/// Assuming there are 3 SPU's [0,1,2]. It will try allocate SPU and assign leader. +/// Rules are: +/// SPU id must be unique. +/// SPU leader must be evently distributed from other Partition. +/// +/// So after assignment, then Partition will look liks this +/// Spec +/// name: Topic0-0 +/// replication: 2 +/// Status +/// status: ready +/// spu: [0,1] +/// leader: 0 +/// +/// The SPU 0 then may be have replica map +/// +/// Spec +/// id: 0 +/// +/// Status +/// replicas: [Topic0-0] +/// +///``` +/// If there are another topic1 with same number of partiition and replica then, they will +/// have different leader because Topic0-0 already is using spu 0. +#[derive(Debug)] +pub struct PartitionReducer { + partition_store: SharedPartitionStore, + spu_store: SharedSpuLocalStore, +} + +impl Default for PartitionReducer { + fn default() -> Self { + Self { + partition_store: PartitionLocalStore::new_shared(), + spu_store: SpuLocalStore::new_shared(), + } + } +} + +impl PartitionReducer { + pub fn new(partition_store: A, spu_store: B) -> Self + where + A: Into, + B: Into, + { + Self { + partition_store: partition_store.into(), + spu_store: spu_store.into(), + } + } + + /// + /// Process Partition Actions - dispatch to ADD/MOD/DEL handlers + /// + pub fn process_requests( + &self, + requests: PartitionChangeRequest, + ) -> Result { + trace!("Processing requests: {}", requests); + let mut actions = PartitionActions::default(); + + match requests { + PartitionChangeRequest::Partition(partition_requests) => { + for partiton_request in partition_requests.into_iter() { + match partiton_request { + LSChange::Add(partition) => { + log_on_err!(self.add_partition_action_handler(partition, &mut actions)); + } + + LSChange::Mod(new_partition, old_partition) => { + log_on_err!(self.mod_partition_action_handler( + new_partition, + old_partition, + &mut actions, + )); + } + + LSChange::Delete(partition) => { + log_on_err!(self.del_partition_action_handler(partition, &mut actions)); + } + } + } + } + PartitionChangeRequest::Spu(spu_requests) => { + for spu_request in spu_requests.into_iter() { + debug!("SPU LS: {}", spu_request); + trace!("SPU LS: {:#?}", spu_request); + match spu_request { + LSChange::Mod(new_spu, old_spu) => { + if old_spu.status.is_online() && new_spu.status.is_offline() { + self.force_election_spu_off(new_spu, &mut actions); + } else { + if old_spu.status.is_offline() && new_spu.status.is_online() { + self.force_election_spu_on(new_spu, &mut actions); + } + } + } + _ => {} + } + } + } + PartitionChangeRequest::LrsUpdate(lrs_status) => { + self.process_lrs_update_from_spu(lrs_status, &mut actions); + } + } + + Ok(actions) + } + + fn add_partition_action_handler( + &self, + mut partition: PartitionKV, + actions: &mut PartitionActions, + ) -> Result<(), ScServerError> { + debug!( + "Handling Add Partition: {}, sending to SPU", + partition.key() + ); + trace!("Add Partition: {:#?}", partition); + + actions + .conns + .push(ConnectionRequest::Partition(PartitionSpecChange::Add( + partition.key.clone(), + partition.spec.clone(), + ))); + // we set status to offline for all new partition until we know their status + partition.status.resolution = PartitionResolution::Offline; + debug!( + "Partition: {} set to offline because it is new", + partition.key() + ); + actions + .partitions + .push(PartitionWSAction::UpdateStatus(partition)); + Ok(()) + } + + /// + /// Modify Partition Action handler + /// + /// # Remarks + /// Action handler performs the following operations: + /// * update partition on local cluster cache + /// * generate message for replica SPUs + /// + fn mod_partition_action_handler( + &self, + new_partition: PartitionKV, + old_partition: PartitionKV, + actions: &mut PartitionActions, + ) -> Result<(), ScServerError> { + trace!("mod partition {:#?}", new_partition); + + // send out to SPU only if spec changes + if new_partition.spec != old_partition.spec { + debug!( + "Partition: {} spec changed, updating SPU", + new_partition.key() + ); + actions + .conns + .push(ConnectionRequest::Partition(PartitionSpecChange::Mod( + new_partition.key, + new_partition.spec, + old_partition.spec, + ))); + } else { + debug!( + "Parttion: {} status change only, doing nothing", + new_partition.key() + ); + } + + Ok(()) + } + + /// + /// Delete Partition Action handler + /// + /// # Remarks + /// Action handler performs the following operations: + /// * remove partition from local cluster cache + /// * generate message for live replicas + /// + fn del_partition_action_handler( + &self, + partition: PartitionKV, + _actions: &mut PartitionActions, + ) -> Result<(), ScServerError> { + debug!("DelPartition({}) - remove from metadata", partition.key()); + trace!("delete partition {:#?}", partition); + + /* + // notify msg for live replicas + if partition.has_live_replicas() { + notify_msg_for_live_replicas( + &mut actions.spu_messages, + MsgType::DELETE, + "DELETE", + partition.key(), + &partition, + partition.live_replicas(), + ); + } + */ + + Ok(()) + } + + fn process_lrs_update_from_spu( + &self, + lrs_req: UpdateLrsRequest, + actions: &mut PartitionActions, + ) { + debug!("updating lrs for replica: {}", lrs_req.id); + if self + .partition_store + .find_and_do(&lrs_req.id, |part_kv| { + let mut part_status_kv = part_kv.clone(); + let status = PartitionStatus::new2( + lrs_req.leader.clone(), + lrs_req.replicas.clone(), + PartitionResolution::Online, + ); + part_status_kv.status.merge(status); + actions + .partitions + .push(PartitionWSAction::UpdateStatus(part_status_kv)); + }) + .is_none() + { + error!("update lrs faild, no replia: {}", lrs_req.id); + } + } + + /// perform election when spu goes offline + fn force_election_spu_off(&self, offline_spu: SpuKV, actions: &mut PartitionActions) { + debug!( + "start election when spu went offline: {}", + offline_spu.key() + ); + let offline_leader_spu_id = offline_spu.spec.id; + + let spu_status = self.spu_store.online_status(); + + let policy = SimplePolicy::new(); + + // go thru each partitions whose leader matches offline spu. + self.partition_store.visit_values(|partition_kv| { + // find partition who's leader is same as offline spu + if partition_kv.spec.leader == offline_leader_spu_id { + // find suitable leader + if let Some(candidate_leader) = partition_kv.status.candidate_leader(&spu_status, &policy) + { + debug!( + "suitable leader has found: {} leader: {}", + partition_kv.key(), + candidate_leader + ); + let mut part_kv_change = partition_kv.clone(); + part_kv_change.spec.leader = candidate_leader; + actions + .partitions + .push(PartitionWSAction::UpdateSpec(part_kv_change)); + // change the + } else { + warn!("no suitable leader has found: {}", partition_kv.key()); + let mut part_kv_change = partition_kv.clone(); + part_kv_change.status.resolution = PartitionResolution::LeaderOffline; + actions + .partitions + .push(PartitionWSAction::UpdateStatus(part_kv_change)); + } + } + }); + } + + /// perform election when spu become online + fn force_election_spu_on(&self, online_spu: SpuKV, actions: &mut PartitionActions) { + debug!("start election spu went online: {}", online_spu.key()); + let online_leader_spu_id = online_spu.spec.id; + + let policy = SimplePolicy::new(); + // go thru each partitions which are not online and try to promote given online spu + + self.partition_store.visit_values(|partition_kv| { + if partition_kv.status.is_offline() { + // we only care about partition who is follower since, leader will set partition status when it start up + if partition_kv.spec.leader != online_leader_spu_id { + for replica_status in partition_kv.status.replica_iter() { + if replica_status.spu == online_leader_spu_id + && policy + .potential_leader_score( + &replica_status, + &partition_kv.status.leader, + ) + .is_suitable() + { + debug!( + "suitable leader has found: {} leader: {}", + partition_kv.key(), + online_leader_spu_id + ); + let mut part_kv_change = partition_kv.clone(); + part_kv_change.spec.leader = online_leader_spu_id; + actions + .partitions + .push(PartitionWSAction::UpdateSpec(part_kv_change)); + } + } + } + } + }); + } +} + +struct SimplePolicy {} + +impl SimplePolicy { + fn new() -> Self { + SimplePolicy {} + } +} + +impl ElectionPolicy for SimplePolicy { + fn potential_leader_score( + &self, + replica_status: &ReplicaStatus, + leader: &ReplicaStatus, + ) -> ElectionScoring { + let lag = leader.leo - replica_status.leo; + if lag < 4 { + ElectionScoring::Score(lag as u16) + } else { + ElectionScoring::NotSuitable + } + } +} + +// ----------------------------------- +// Unit Tests +// >> utils::init_logger(); +// >> RUST_LOG=sc_server=trace cargo test +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use utils::actions::Actions; + + use super::PartitionReducer; + use super::PartitionChangeRequest; + use super::PartitionWSAction; + use super::super::PartitionLSChange; + + #[test] + fn test_process_partition_actions_without_partitions() { + // utils::init_logger(); + + let partition_reducer = PartitionReducer::default(); + + let partition_requests: Actions = vec![ + // action, (topic,replica), (leader,lrs) + PartitionLSChange::Add((("topic1", 0), vec![1, 2, 3]).into()), + PartitionLSChange::Add((("topic1", 1), vec![2, 3, 1]).into()), + ] + .into(); + + // Run Test + let _actions = partition_reducer + .process_requests(PartitionChangeRequest::Partition(partition_requests)) + .expect("actions"); + + // partitions + let _expected_partitions: Actions = vec![ + PartitionWSAction::UpdateStatus((("topic1", 0), vec![1, 2, 3]).into()), + PartitionWSAction::UpdateStatus((("topic1", 1), vec![2, 3, 1]).into()), + ] + .into(); + + // assert_eq!(actions.partitions,expected_partitions); + + // leader message queue + /* + TODO: Fix this + let expected_msgs_for_select_spus: SpuNotifyById = SpuNotifyById::default(); + let mut leader_msgs = gen_leader_msg_vec(vec![ + //action, name, leader, live_replicas + (TAction::UPDATE, ("topic1", 0), 1, vec![1, 2, 3]), + (TAction::UPDATE, ("topic1", 1), 2, vec![2, 3, 1]), + ]); + expected_msgs_for_select_spus.push(&2, leader_msgs.pop().unwrap()); + expected_msgs_for_select_spus.push(&1, leader_msgs.pop().unwrap()); + + assert_eq!( + msgs_for_spus, + expected_msgs_for_select_spus + ); + */ + } + + /* + #[test] + fn test_process_partition_actions_with_partitions() { + // utils::init_logger(); + + let partitions = create_partitions(vec![ + // topic, idx, epoch, replicas + (("topic1", 0), 0, vec![0, 1, 2]), + (("topic1", 1), 0, vec![2, 3, 1]), + (("topic2", 0), 0, vec![1, 2, 0]), + ]); + let partition_actions = create_partition_actions(&vec![ + // action, topic, idx, (epoch lrs), Some(epoch, lrs) + (TAction::ADD, "topic1", 0, (5, vec![0, 1, 2]), None), + ( + TAction::MOD, + "topic1", + 1, + (1, vec![2, 3, 1]), + Some((0, vec![2, 3, 1])), + ), + (TAction::DEL, "topic2", 0, (0, vec![1, 2, 0]), None), + ]); + let mut ctx = PartitionContext::default().with_partition_actions(partition_actions); + + // Run Test + let res = process_partition_actions( + &partitions, + ctx.partition_actions(), + ctx.actions_for_kvs() + ); + + // Validate Result + assert_eq!(res.is_ok(), true); + + // partitions + let expected_partitions = create_partitions(vec![ + // topic, idx, epoch, replicas + (("topic1", 0), 5, vec![0, 1, 2]), + (("topic1", 1), 1, vec![2, 3, 1]), + ]); + assert_eq!(partitions, expected_partitions); + + // partition kvs actions + let expected_partition_actions: Actions = Actions::default(); + assert_eq!(ctx.takeover_actions_for_kvs(), expected_partition_actions); + + // leader messages + let expected_msgs_for_select_spus: SpuNotifyById = SpuNotifyById::default(); + let mut leader_msgs = gen_leader_msg_vec(vec![ + //action, name, epoch, leader, live_replicas + (TAction::UPDATE, ("topic1", 0), 5, 0, vec![0, 1, 2]), + (TAction::DEL, ("topic2", 0), 0, 1, vec![1, 2, 0]), + (TAction::UPDATE, ("topic1", 1), 1, 2, vec![2, 3, 1]), + ]); + + expected_msgs_for_select_spus.push(&2, leader_msgs.pop().unwrap()); + expected_msgs_for_select_spus.push(&1, leader_msgs.pop().unwrap()); + expected_msgs_for_select_spus.push(&0, leader_msgs.pop().unwrap()); + assert_eq!( + ctx.takeover_msgs_for_select_spus(), + expected_msgs_for_select_spus + ); + } + */ +} diff --git a/sc-server/src/core/partitions/replica_msgs_for_spus.rs b/sc-server/src/core/partitions/replica_msgs_for_spus.rs new file mode 100644 index 0000000000..443e183cf1 --- /dev/null +++ b/sc-server/src/core/partitions/replica_msgs_for_spus.rs @@ -0,0 +1,22 @@ +//! +//! # Send Update Replica Leader Request Handlers +//! +//! Establishes connetion with Spus and sends Update Replica Leader Request +//! +use std::sync::Arc; + +use kf_protocol::api::RequestMessage; + +use internal_api::messages::{ReplicaMsg, ReplicaMsgs}; +use internal_api::UpdateReplicaRequest; + +use error::ServerError; +use log::{debug, error, trace}; +use log::warn; +use utils::actions::Actions; + +use crate::core::common::spu_notify_by_id::SpuNotifyById; +use crate::core::spus::{Spu, Spus}; + use crate::conn_manager::SpuConnections; +use crate::hc_manager::HcAction; + diff --git a/sc-server/src/core/rflow/mod.rs b/sc-server/src/core/rflow/mod.rs new file mode 100644 index 0000000000..5aec37c54d --- /dev/null +++ b/sc-server/src/core/rflow/mod.rs @@ -0,0 +1,23 @@ +mod store; +mod rflow; + + +pub use store::MemStore; + +pub trait FlowSpec{ + + type Key; +} + +pub trait FlowStatus{} + + + + +/// interface to metadata backend +/// +pub trait KVService { + + + //fn watch(&self) -> ResponseFuture +} \ No newline at end of file diff --git a/sc-server/src/core/rflow/rflow.rs b/sc-server/src/core/rflow/rflow.rs new file mode 100644 index 0000000000..ad8b722f8c --- /dev/null +++ b/sc-server/src/core/rflow/rflow.rs @@ -0,0 +1,53 @@ +use std::sync::Arc; + +use futures::channel::mpsc::Receiver; + +use future_helper::spawn; + +use super::KVService; +use super::MemStore; +use super::FlowSpec; + + + +/// Based on Kubernetes Informer +/// Maintains Cache of object +/// it populates the cache from kv store +/// if it detects diff then it fireoff controller +pub struct FlowController where S: FlowSpec { + store: Arc>, + receiver: Receiver, + kv_service: KV +} + + +impl FlowController + where S: FlowSpec + Sync + Send + 'static, + ::Key: Sync + Send + 'static, + P: Sync + Send + 'static, + KV: KVService + Sync + Send + 'static + +{ + + /// start the controller with ctx and receiver + pub fn run(store: Arc>, kv_service: KV, receiver: Receiver) { + + let controller = Self { + store, + receiver, + kv_service + }; + + spawn(controller.inner_run()); + } + + async fn inner_run(mut self) -> Result<(), ()> { + + + // let mut auth_token_stream = self.kv_service + // .watch_stream::(); + Ok(()) + + } + +} \ No newline at end of file diff --git a/sc-server/src/core/rflow/store.rs b/sc-server/src/core/rflow/store.rs new file mode 100644 index 0000000000..d50677c3dd --- /dev/null +++ b/sc-server/src/core/rflow/store.rs @@ -0,0 +1,8 @@ +use utils::SimpleConcurrentBTreeMap; + +use crate::core::common::KVObject; + +use super::FlowSpec; + +#[derive(Debug)] +pub struct MemStore(SimpleConcurrentBTreeMap>) where S: FlowSpec ; diff --git a/sc-server/src/core/spus/actions.rs b/sc-server/src/core/spus/actions.rs new file mode 100644 index 0000000000..eecea7d845 --- /dev/null +++ b/sc-server/src/core/spus/actions.rs @@ -0,0 +1,55 @@ +//! +//! # SPU Actions +//! +//! SPU action definition and processing handlers +//! +use std::fmt; + + +use metadata::spu::SpuSpec; + +use utils::actions::Actions; + + +use crate::core::common::WSAction; +use crate::conn_manager::SpuConnectionStatusChange; +use crate::conn_manager::ConnectionRequest; + +use super::SpuLSChange; + +#[derive(Debug, PartialEq, Clone)] +pub enum SpuChangeRequest { + SpuLS(Actions), + Conn(SpuConnectionStatusChange) +} + + +impl fmt::Display for SpuChangeRequest { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + SpuChangeRequest::SpuLS(req) => write!(f,"SPU LS: {}",req.count()), + SpuChangeRequest::Conn(req) => write!(f,"Conn LS: {}",req) + } + } +} + + + +#[derive(Debug,Default)] +pub struct SpuActions { + pub spus: Actions>, + pub conns: Actions, +} + +impl fmt::Display for SpuActions { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, + "SPU WS: {}, Conn Mgr:: {}, ", + self.spus.count(), + self.conns.count(), + ) + } +} + diff --git a/sc-server/src/core/spus/controller.rs b/sc-server/src/core/spus/controller.rs new file mode 100644 index 0000000000..e35bbd4e04 --- /dev/null +++ b/sc-server/src/core/spus/controller.rs @@ -0,0 +1,250 @@ +//! +//! # Spu Controller + + +use log::debug; +use log::info; +use log::trace; +use log::error; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::channel; +use futures::select; +use futures::stream::StreamExt; + +use future_helper::spawn; +use utils::actions::Actions; +use metadata::spu::SpuSpec; +use types::log_on_err; + + +use crate::core::WSUpdateService; +use crate::conn_manager::SharedConnManager; +use crate::conn_manager::SpuConnectionStatusChange; +use crate::core::ShareLocalStores; +use crate::core::common::LSChange; +use crate::core::WSChangeChannel; + +use super::SpuReducer; +use super::SpuChangeRequest; + + +#[derive(Debug)] +pub struct SpuController { + local_stores: ShareLocalStores, + conn_manager: SharedConnManager, + ws_service: W, + spu_reducer: SpuReducer, + lc_receiver: Receiver>>, + conn_receiver: Receiver, + conn_sender: Sender +} + +impl SpuController + + where + W: WSUpdateService + Send + 'static + +{ + + pub fn new( + local_stores: ShareLocalStores, + conn_manager: SharedConnManager, + lc_receiver: WSChangeChannel, + ws_service: W + ) -> Self { + + let (conn_sender,conn_receiver) = channel(100); + + Self { + spu_reducer: SpuReducer::new(local_stores.spus().clone()), + local_stores: local_stores, + ws_service: ws_service, + conn_manager, + lc_receiver, + conn_receiver, + conn_sender + + } + } + + pub fn conn_sender(&self) -> Sender { + self.conn_sender.clone() + } + + + pub fn run(self) { + + let ft = async move { + self.dispatch_loop().await; + }; + + spawn(ft); + } + + async fn dispatch_loop(mut self) { + + loop { + select! { + receiver_req = self.lc_receiver.next() => { + match receiver_req { + None => { + error!("Listener LC dispatcher has been terminated. Ending Server loop"); + break; + }, + Some(request) => { + trace!("received SPU request from ws dispatcher: {:#?}",request); + self.process_request(SpuChangeRequest::SpuLS(request)).await; + }, + } + }, + conn_req = self.conn_receiver.next() => { + match conn_req { + None => { + error!("Listener to Conn Mgr has been terminated. Ending Server loop"); + break; + }, + Some(request) => { + trace!("received request from conn manager: {:#?}",request); + self.process_request(SpuChangeRequest::Conn(request)).await; + } + } + } + complete => {}, + } + } + + info!("spu controller is terminated"); + + } + + + /// process requests related to SPU management + async fn process_request(&mut self, request: SpuChangeRequest) { + + // process SPU action; update context with new actions + match self.spu_reducer.process_requests(request) { + + Ok(actions) => { + + debug!("SPU Controller apply actions: {}",actions); + + // send actions to kv + if actions.spus.count() > 0 { + for ws_action in actions.spus.into_iter() { + log_on_err!( + self.ws_service.update_spu(ws_action).await + ); + } + } + + if actions.conns.count() > 0 { + self.conn_manager.process_requests(actions.conns).await; + } + + }, + Err(err) => error!("error generating spu actions from reducer: {}",err) + } + + } + + + +} + + + + +/* +#[cfg(test)] +mod tests { + + use log::debug; + use futures::channel::mpsc::channel; + use futures::channel::mpsc::Receiver; + + use future_helper::test_async; + use utils::actions::Actions; + use metadata::spu::SpuSpec; + + use crate::cli::ScConfig; + use crate::core::ScMetadata; + use crate::core::ScContext; + use crate::core::ScRequest; + use crate::core::auth_tokens::AuthTokenAction; + use crate::core::spus::SpuAction; + use crate::core::spus::SpuKV; + use crate::tests::fixture::MockConnectionManager; + use crate::tests::fixture::SharedKVStore; + use crate::core::send_channels::ScSendChannels; + use crate::core::common::new_channel; + use crate::hc_manager::HcAction; + + use super::ScController; + + fn sample_controller() -> ( + ScController, + (Receiver, Receiver>), + ) { + let default_config = ScConfig::default(); + let metadata = ScMetadata::shared_metadata(default_config); + let conn_manager = MockConnectionManager::shared_conn_manager(); + + let (sc_sender, sc_receiver) = channel::(100); + let (hc_sender, hc_receiver) = new_channel::>(); + let kv_store = SharedKVStore::new(sc_sender.clone()); + let sc_send_channels = ScSendChannels::new(hc_sender.clone()); + + ( + ScController::new( + ScContext::new(sc_send_channels, conn_manager.clone()), + metadata.clone(), + kv_store.clone(), + ), + (sc_receiver, hc_receiver), + ) + } + + /// test add new spu + #[test_async] + async fn test_controller_basic() -> Result<(), ()> { + let (mut controller, _other) = sample_controller(); + + let auth_token_actions: Actions = Actions::default(); + + let spu: SpuKV = SpuSpec::new(5000).into(); + let mut spu_actions: Actions = Actions::default(); + + spu_actions.push(SpuAction::AddSpu(spu.spec().name(), spu.clone())); + /* + let topic_actions: Actions = Actions::default(); + for (name,topic) in self.kv.topics.read().iter() { + topic_actions.push(TopicAction::AddTopic(name.clone(),topic.clone())); + } + + let partition_actions: Actions = Actions::default(); + for (key,partition) in self.kv.partitions.read().iter() { + partition_actions.push(PartitionAction::AddPartition(key.clone(),partition.clone())); + } + */ + controller + .process_sc_request(ScRequest::UpdateAll( + auth_token_actions, + spu_actions, + Actions::default(), + Actions::default(), + )) + .await; + + let metadata = controller.metadata(); + debug!("metadata: {:#?}", metadata); + + // metdata should container new spu + assert!(metadata.spus().spu("spu-5000").is_some()); + + Ok(()) + } + + +} +*/ \ No newline at end of file diff --git a/sc-server/src/core/spus/metadata.rs b/sc-server/src/core/spus/metadata.rs new file mode 100644 index 0000000000..87f4e295ad --- /dev/null +++ b/sc-server/src/core/spus/metadata.rs @@ -0,0 +1,711 @@ +//! +//! # SC Spu Metadata +//! +//! Spu metadata information cached locally. +//! +use std::collections::BTreeMap; +use std::collections::HashSet; +use std::iter::FromIterator; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use types::socket_helpers::ServerAddress; +use types::SpuId; +use metadata::spu::{Endpoint, SpuSpec, SpuStatus}; +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::K8Obj; +use k8_metadata::spu::SpuSpec as K8SpuSpec; +use internal_api::messages::SpuMsg; + +use crate::core::common::LocalStore; +use crate::core::common::KVObject; +use crate::core::Spec; +use crate::core::Status; +use crate::k8::default_convert_from_k8; + +impl Spec for SpuSpec { + const LABEL: &'static str = "SPU"; + type Key = String; + type Status = SpuStatus; + type K8Spec = K8SpuSpec; + type Owner = SpuSpec; + + fn convert_from_k8( + k8_obj: K8Obj::Status>, + ) -> Result, IoError> { + default_convert_from_k8(k8_obj) + } +} + +impl Status for SpuStatus {} + +// ----------------------------------- +// Data Structures +// ----------------------------------- +pub type SpuKV = KVObject; + +// ----------------------------------- +// Spu - Implementation +// ----------------------------------- + +impl SpuKV { + // + // Accessors + // + pub fn id(&self) -> &i32 { + &self.spec.id + } + + pub fn name(&self) -> &String { + &self.key + } + + pub fn rack(&self) -> Option<&String> { + match &self.spec.rack { + Some(rack) => Some(rack), + None => None, + } + } + + pub fn rack_clone(&self) -> Option { + match &self.spec.rack { + Some(rack) => Some(rack.clone()), + None => None, + } + } + + pub fn public_endpoint(&self) -> &Endpoint { + &self.spec.public_endpoint + } + + pub fn private_endpoint(&self) -> &Endpoint { + &self.spec.private_endpoint + } + + pub fn resolution_label(&self) -> &'static str { + self.status.resolution_label() + } + + pub fn type_label(&self) -> String { + self.spec.type_label() + } + + pub fn is_custom(&self) -> bool { + self.spec.is_custom() + } + + pub fn is_managed(&self) -> bool { + !self.spec.is_custom() + } + + pub fn private_server_address(&self) -> ServerAddress { + let private_ep = self.private_endpoint(); + ServerAddress { + host: private_ep.host.clone(), + port: private_ep.port, + } + } + + pub fn set_rack(&mut self, rack: Option<&String>) { + match rack { + Some(r) => self.spec.rack = Some(r.clone()), + None => self.spec.rack = None, + } + } + + pub fn set_public_endpoint(&mut self, public_ep: &Endpoint) { + self.spec.public_endpoint = public_ep.clone(); + } + + pub fn set_private_endpoint(&mut self, private_ep: &Endpoint) { + self.spec.private_endpoint = private_ep.clone(); + } +} + +/// used in the bulk add scenario +impl From<(J, i32, bool, Option)> for SpuKV +where + J: Into, +{ + fn from(spu: (J, i32, bool, Option)) -> Self { + let mut spec = SpuSpec::default(); + spec.id = spu.1; + spec.rack = spu.3; + + let mut status = SpuStatus::default(); + if spu.2 { + status.set_online(); + } + + Self::new(spu.0.into(), spec, status) + } +} + +pub type SpuLocalStore = LocalStore; + +// ----------------------------------- +// Spus - Implementation +// ----------------------------------- + +impl SpuLocalStore { + /// update the spec + pub fn update_spec(&self, name: &str, other_spu: &SpuKV) -> Result<(), IoError> { + if let Some(spu) = (*self.inner_store().write()).get_mut(name) { + if spu.id() != other_spu.id() { + Err(IoError::new( + ErrorKind::InvalidData, + format!("spu '{}': id is immutable", name), + )) + } else { + if spu.rack() != other_spu.rack() { + spu.set_rack(other_spu.rack()); + } + if spu.public_endpoint() != other_spu.public_endpoint() { + spu.set_public_endpoint(other_spu.public_endpoint()); + } + if spu.private_endpoint() != other_spu.private_endpoint() { + spu.set_private_endpoint(other_spu.private_endpoint()); + } + spu.set_ctx(other_spu.kv_ctx()); + + Ok(()) + } + } else { + Err(IoError::new( + ErrorKind::InvalidData, + format!("spu '{}': not found, cannot update", name) + )) + } + } + + // do bulk add, + // assume spu stars with id + #[cfg(test)] + pub fn bulk_add(&self, spus: Vec<(i32, bool, Option)>) { + for (spu_id, online, rack) in spus.into_iter() { + let spu_key = format!("spu-{}", spu_id); + let spu: SpuKV = (spu_key, spu_id, online, rack).into(); + self.insert(spu); + } + } + + // build hashmap of online + pub fn online_status(&self) -> HashSet { + let mut status = HashSet::new(); + for (_, spu) in self.inner_store().read().iter() { + if spu.status.is_online() { + status.insert(*spu.id()); + } + } + status + } + + /// count online SPUs + pub fn online_spu_count(&self) -> i32 { + self.inner_store() + .read() + .values() + .filter_map(|spu| { + if spu.status.is_online() { + Some(1) + } else { + None + } + }) + .sum() + } + + /// count spus that can be used for replica + pub fn spu_used_for_replica(&self) -> i32 { + self.count() + } + + // retrieve SPU ids. + pub fn online_spu_ids(&self) -> Vec { + self.inner_store() + .read() + .values() + .filter_map(|spu| { + if spu.status.is_online() { + Some(*spu.id()) + } else { + None + } + }) + .collect() + } + + // find spu id that can be used in the reeokuca + pub fn spu_ids_for_replica(&self) -> Vec { + self.inner_store() + .read() + .values() + .filter_map(|spu| Some(*spu.id())) + .collect() + } + + pub fn online_spus(&self) -> Vec { + self.inner_store() + .read() + .values() + .filter_map(|spu| { + if spu.status.is_online() { + Some(spu.clone()) + } else { + None + } + }) + .collect() + } + + pub fn custom_spus(&self) -> Vec { + self.inner_store() + .read() + .values() + .filter_map(|spu| { + if spu.is_custom() { + Some(spu.clone()) + } else { + None + } + }) + .collect() + } + + pub fn spu(&self, name: &str) -> Option { + match (*self.inner_store().read()).get(name) { + Some(spu) => Some(spu.clone()), + None => None, + } + } + + pub fn get_by_id(&self, id: &i32) -> Option { + for (_, spu) in (*self.inner_store().read()).iter() { + if spu.id() == id { + return Some(spu.clone()); + } + } + None + } + + // check if spu can be registered + pub fn validate_spu_for_registered(&self, id: &SpuId) -> bool { + for (_, spu) in (self.inner_store().read()).iter() { + if spu.id() == id && spu.status.is_offline() { + return true; + } + } + false + } + + // check if given range is conflict with any of the range + pub fn is_conflict(&self, owner_uid: &str, start: i32, end_exclusive: i32) -> Option { + for (_, spu) in (self.inner_store().read()).iter() { + if !spu.is_owned(owner_uid) { + let id = *spu.id(); + if id >= start && id < end_exclusive { + return Some(id); + } + } + } + None + } + + #[cfg(test)] + pub fn all_spu_count(&self) -> i32 { + self.inner_store().read().len() as i32 + } + + pub fn all_names(&self) -> Vec { + self.inner_store().read().keys().cloned().collect() + } + + pub fn table_fmt(&self) -> String { + let mut table = String::new(); + + let hdr = format!( + "{n:<18} {d:<6} {s:<10} {t:<8} {p:<16} {i:<16} {r}\n", + n = "SPU", + d = "SPU-ID", + s = "STATUS", + t = "TYPE", + p = "PUBLIC", + i = "PRIVATE", + r = "RACK", + ); + table.push_str(&hdr); + + for (name, spu) in self.inner_store().read().iter() { + let rack = match spu.rack() { + Some(rack) => rack.clone(), + None => String::from(""), + }; + let row = format!( + "{n:<18} {d:^6} {s:<10} {t:<8} {p:<16} {i:<16} {r}\n", + n = name.clone(), + d = spu.id(), + s = spu.resolution_label(), + t = spu.type_label().clone(), + p = spu.public_endpoint(), + i = spu.private_endpoint(), + r = rack, + ); + table.push_str(&row); + } + + table + } + + /// number of spus in rack count + pub fn spus_in_rack_count(&self) -> i32 { + self.inner_store() + .read() + .values() + .filter_map(|spu| if spu.rack().is_some() { Some(1) } else { None }) + .sum() + } + + // Returns array of touples [("r1", [0,1,2]), ("r2", [3,4]), ("r3", [5])] + pub fn live_spu_rack_map_sorted(spus: &SpuLocalStore) -> Vec<(String, Vec)> { + let rack_map = SpuLocalStore::online_spu_rack_map(spus); + let mut racked_vector = Vec::from_iter(rack_map); + racked_vector.sort_by(|a, b| b.1.len().cmp(&a.1.len())); + racked_vector + } + + // Return a list of spu ids sorted by rack ["r1":[0,1,2], "r2":[3,4], "r3":[5]] + fn online_spu_rack_map(spus: &SpuLocalStore) -> BTreeMap> { + let mut rack_spus: BTreeMap> = BTreeMap::new(); + + for spu in spus.inner_store().read().values() { + if let Some(rack) = spu.rack() { + let mut ids: Vec; + let mut ids_in_map = rack_spus.remove(rack); + if ids_in_map.is_some() { + ids = ids_in_map.as_mut().unwrap().to_vec(); + ids.push(*spu.id()); + } else { + ids = vec![*spu.id()]; + } + ids.sort(); + rack_spus.insert(rack.clone(), ids); + } + } + + rack_spus + } + + // Returns a list of rack inter-leaved spus [0, 4, 5, 1, 3, 2] + pub fn online_spus_in_rack(rack_map: &Vec<(String, Vec)>) -> Vec<(i32)> { + let mut spus = vec![]; + let row_max = rack_map.len(); + let col_max = rack_map.iter().map(|(_, list)| list.len()).max().unwrap(); + let mut row_idx = 0; + let mut col_idx = 0; + + for idx in 0..(row_max * col_max) { + let row_list: &Vec = rack_map.get(row_idx).unwrap().1.as_ref(); + let spu_id = row_list[col_idx % row_list.len()]; + let duplicate = spus.iter().find(|&&id| id == spu_id).map(|_| true); + if duplicate.is_none() { + spus.push(spu_id); + } + row_idx = (row_idx + 1) % row_max; + col_idx = (((idx + 1) / row_max) + row_idx) % col_max; + } + + spus + } + + /// Encode all online SPUs to SPU Messages + pub fn all_spus_to_spu_msgs(&self) -> Vec { + self.all_specs() + .into_iter() + .map(|spu_spec| SpuMsg::update(spu_spec.into())) + .collect() + } +} + +#[cfg(test)] +impl From)>> for SpuLocalStore { + fn from(spus: Vec<(i32, bool, Option)>) -> Self { + let store = Self::default(); + store.bulk_add(spus); + store + } +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use metadata::spu::{SpuSpec, SpuStatus}; + + use super::{SpuKV, SpuLocalStore}; + + #[test] + fn test_spu_inquiry_online_offline_count() { + let online_spu: SpuKV = ("spu-0", 0, true, None).into(); + let offline_spu: SpuKV = ("spu-1", 1, false, None).into(); + let no_status_spu: SpuKV = ("spu-2", 5001, false, None).into(); + + assert_eq!(online_spu.status.is_online(), true); + assert_eq!(offline_spu.status.is_online(), false); + assert_eq!(no_status_spu.status.is_online(), false); + + let spus = SpuLocalStore::default(); + spus.insert(online_spu); + spus.insert(offline_spu); + spus.insert(no_status_spu); + + assert_eq!(spus.all_spu_count(), 3); + assert_eq!(spus.online_spu_count(), 1); + } + + #[test] + fn test_spu_status_updates_online_offline() { + let mut test_spu: SpuKV = ("spu", 10, false, None).into(); + assert_eq!(*test_spu.id(), 10); + + test_spu.status.set_online(); + assert_eq!(test_spu.status.is_online(), true); + + test_spu.status.set_offline(); + assert_eq!(test_spu.status.is_online(), false); + } + + #[test] + fn test_delete_spu_from_local_cache() { + let online_spu: SpuKV = ("spu-0", 0, true, None).into(); + let offline_spu: SpuKV = ("spu-1", 1, false, None).into(); + + let spus = SpuLocalStore::default(); + spus.insert(online_spu); + spus.insert(offline_spu); + + assert_eq!(spus.online_spu_count(), 1); + assert_eq!(spus.all_spu_count(), 2); + + spus.delete("spu-0"); + + assert_eq!(spus.online_spu_count(), 0); + assert_eq!(spus.all_spu_count(), 1); + } + + #[test] + fn test_update_spu_spec_in_local_cache() { + let spu_0 = ("spu-0", 0, false, None).into(); + let spu_1 = ("spu-1", 1, false, None).into(); + + let mut other_spec = SpuSpec::default(); + other_spec.id = 1; + other_spec.rack = Some("rack".to_string()); + let other_spu = SpuKV::new("spu-1", other_spec, SpuStatus::default()); + + let spus = SpuLocalStore::default(); + spus.insert(spu_0); + spus.insert(spu_1); + + // run test + let res = spus.update_spec("spu-1", &other_spu); + assert_eq!(res.is_ok(), true); + + // test result + let updated_spu = spus.spu("spu-1").unwrap(); + assert_eq!(updated_spu, other_spu); + } + + #[test] + fn test_update_spu_status_in_local_cache() { + let online: SpuKV = ("spu-0", 0, true, None).into(); + let offline: SpuKV = ("spu-1", 1, false, None).into(); + let offline2: SpuKV = ("spu-3", 2, false, None).into(); + + assert_eq!(online.status.is_online(), true); + assert_eq!(offline.status.is_online(), false); + + let spus = SpuLocalStore::default(); + spus.insert(online.clone()); + spus.insert(offline.clone()); + spus.insert(offline2); + assert_eq!(spus.all_spu_count(), 3); + assert_eq!(spus.online_spu_count(), 1); + + //test - not found + let res = spus.update_status("spu-9", offline.status.clone()); + assert_eq!( + res.unwrap_err().to_string(), + "SPU 'spu-9': not found, cannot update" + ); + + // [online] -> [offline] + let res = spus.update_status("spu-0", offline.status.clone()); + let spu = spus.spu("spu-0"); + assert_eq!(res.is_ok(), true); + assert_eq!(spus.all_spu_count(), 3); + assert_eq!(spus.online_spu_count(), 0); + assert_eq!(spu.unwrap().status.is_online(), false); + + // [offline] -> [online] + let res = spus.update_status("spu-3", online.status.clone()); + let spu = spus.spu("spu-3"); + assert_eq!(res.is_ok(), true); + assert_eq!(spus.all_spu_count(), 3); + assert_eq!(spus.online_spu_count(), 1); + assert_eq!(spu.unwrap().status.is_online(), true); + } + + #[test] + fn rack_map_test_racks_3_spus_6_unbalanced() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r2.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r3.clone())), + ] + .into(); + + // run test + let rack_map = SpuLocalStore::live_spu_rack_map_sorted(&spus); + let spu_list = SpuLocalStore::online_spus_in_rack(&rack_map); + + // validate result + let expected_map: Vec<(String, Vec)> = vec![ + (r1.clone(), vec![0, 1, 2]), + (r2.clone(), vec![3, 4]), + (r3.clone(), vec![5]), + ]; + let expected_list = vec![0, 4, 5, 1, 3, 2]; + + assert_eq!(6, spus.all_spu_count()); + assert_eq!(6, spus.online_spu_count()); + assert_eq!(expected_map, rack_map); + assert_eq!(expected_list, spu_list); + } + + #[test] + fn rack_map_test_racks_5_spus_10_unbalanced() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + let r4 = String::from("r4"); + let r5 = String::from("r5"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r1.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r2.clone())), + (6, true, Some(r3.clone())), + (7, true, Some(r3.clone())), + (8, true, Some(r4.clone())), + (9, true, Some(r5.clone())), + ] + .into(); + + // run test + let rack_map = SpuLocalStore::live_spu_rack_map_sorted(&spus); + let spu_list = SpuLocalStore::online_spus_in_rack(&rack_map); + + // validate result + let expected_map: Vec<(String, Vec)> = vec![ + (r1.clone(), vec![0, 1, 2, 3]), + (r2.clone(), vec![4, 5]), + (r3.clone(), vec![6, 7]), + (r4.clone(), vec![8]), + (r5.clone(), vec![9]), + ]; + let expected_list = vec![0, 5, 6, 8, 9, 1, 4, 7, 2, 3]; + + assert_eq!(rack_map, expected_map); + assert_eq!(spu_list, expected_list); + } + + #[test] + fn rack_map_test_racks_4_spus_10_unbalanced() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + let r4 = String::from("r4"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r2.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r2.clone())), + (6, true, Some(r3.clone())), + (7, true, Some(r3.clone())), + (8, true, Some(r4.clone())), + (9, true, Some(r4.clone())), + ] + .into(); + + // run test + let rack_map = SpuLocalStore::live_spu_rack_map_sorted(&spus); + let spu_list = SpuLocalStore::online_spus_in_rack(&rack_map); + + // validate result + let expected_map: Vec<(String, Vec)> = vec![ + (String::from("r1"), vec![0, 1, 2]), + (String::from("r2"), vec![3, 4, 5]), + (String::from("r3"), vec![6, 7]), + (String::from("r4"), vec![8, 9]), + ]; + let expected_list = vec![0, 4, 6, 8, 1, 5, 9, 2, 3, 7]; + + assert_eq!(rack_map, expected_map); + assert_eq!(spu_list, expected_list); + } + + #[test] + fn rack_map_test_racks_4_spus_12_full() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + let r4 = String::from("r4"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r2.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r2.clone())), + (6, true, Some(r3.clone())), + (7, true, Some(r3.clone())), + (8, true, Some(r3.clone())), + (9, true, Some(r4.clone())), + (10, true, Some(r4.clone())), + (11, true, Some(r4.clone())), + ] + .into(); + + // run test + let rack_map = SpuLocalStore::live_spu_rack_map_sorted(&spus); + let spu_list = SpuLocalStore::online_spus_in_rack(&rack_map); + + // validate result + let expected_map: Vec<(String, Vec)> = vec![ + (String::from("r1"), vec![0, 1, 2]), + (String::from("r2"), vec![3, 4, 5]), + (String::from("r3"), vec![6, 7, 8]), + (String::from("r4"), vec![9, 10, 11]), + ]; + let expected_list = vec![0, 4, 8, 9, 1, 5, 6, 10, 2, 3, 7, 11]; + + assert_eq!(rack_map, expected_map); + assert_eq!(spu_list, expected_list); + } +} diff --git a/sc-server/src/core/spus/mod.rs b/sc-server/src/core/spus/mod.rs new file mode 100644 index 0000000000..21ef5625e5 --- /dev/null +++ b/sc-server/src/core/spus/mod.rs @@ -0,0 +1,22 @@ +mod actions; +mod metadata; +mod reducer; +mod controller; + +pub use self::actions::SpuActions; +pub use self::actions::SpuChangeRequest; +pub use self::metadata::{SpuKV, SpuLocalStore}; +pub use self::reducer::SpuReducer; +pub use self::controller::SpuController; + + +use std::sync::Arc; +use ::metadata::spu::SpuSpec; +use crate::core::common::LSChange; + +use crate::k8::K8ClusterStateDispatcher; + + +pub type K8SpuChangeDispatcher = K8ClusterStateDispatcher; +pub type SharedSpuLocalStore = Arc; +pub type SpuLSChange = LSChange; \ No newline at end of file diff --git a/sc-server/src/core/spus/reducer.rs b/sc-server/src/core/spus/reducer.rs new file mode 100644 index 0000000000..c5f1242978 --- /dev/null +++ b/sc-server/src/core/spus/reducer.rs @@ -0,0 +1,215 @@ +//! +//! # SC Spu Metadata +//! +//! Spu metadata information cached locally. +//! + +use log::{debug, trace}; +use types::log_on_err; + + +use crate::core::common::LSChange; +use crate::core::common::WSAction; +use crate::conn_manager::ConnectionRequest; +use crate::conn_manager::SpuConnectionStatusChange; +use crate::conn_manager::SpuSpecChange; +use crate::ScServerError; + +use super::SpuActions; +use super::SharedSpuLocalStore; +use super::SpuKV; +use super::SpuChangeRequest; + +/// SpuReducer is responsible for updating state for SPU +#[derive(Debug)] +pub struct SpuReducer(SharedSpuLocalStore); + + + +impl SpuReducer { + + pub fn new(store: A) -> Self where A: Into{ + Self(store.into()) + } + + + pub fn process_requests( + &self, + change_request: SpuChangeRequest, + ) -> Result { + + debug!("processing requests: {}",change_request); + let mut actions = SpuActions::default(); + + match change_request { + + SpuChangeRequest::SpuLS(ls_requests) => { + + for local_change in ls_requests.into_iter() { + + match local_change { + + LSChange::Add(spu) => { + log_on_err!(self.add_spu_action_handler( + spu, + &mut actions)); + } + + LSChange::Mod(new_spu, local_spu) => { + log_on_err!(self.mod_spu_action_handler( + new_spu, + local_spu, + &mut actions)); + } + + LSChange::Delete(spu) => { + log_on_err!(self.del_spu_action_handler( + spu, + &mut actions)); + } + } + + } + + }, + + SpuChangeRequest::Conn(conn_request) => { + self.conn_status_update(conn_request,&mut actions); + } + + } + + Ok(actions) + } + + /// + /// Handle when new SPU events from World KV + /// + fn add_spu_action_handler( + &self, + mut spu: SpuKV, + actions: &mut SpuActions + ) -> Result<(), ScServerError> { + + debug!("AddSpu({})", spu.key()); + trace!("add spu action handler {:#?}", spu); + actions.conns.push( + ConnectionRequest::Spu( + SpuSpecChange::Add(spu.spec().clone()))); + + // always set to offlie status + if !spu.status.is_offline() { + spu.status.set_offline(); + actions.spus.push(WSAction::UpdateStatus(spu)); + } + + + Ok(()) + } + + /// + /// Modify SPU Action handler + /// + /// # Remarks + /// Action handler performs the following operations: + /// * if spec changed + /// * update spec in local cache + /// * if status changed, + /// * update status in local cache + /// * ask Topic to generate replica maps for topics waiting for additional SPUs + /// * notify Healthcheck module + /// * if spu spec or status changed + /// * update SPUs in cluster + /// + fn mod_spu_action_handler( + &self, + new_spu: SpuKV, + old_spu: SpuKV, + actions: &mut SpuActions, + ) -> Result<(), ScServerError> { + + + let spu_id = new_spu.spec.id; + debug!("Update SPU({})", new_spu.key()); + trace!("Update SPU: new {:#?} old: {:#?}", new_spu,old_spu); + + // spec changed + if new_spu.spec != old_spu.spec { + let _server_addr = new_spu.private_server_address(); + actions.conns.push(ConnectionRequest::Spu( + SpuSpecChange::Mod(new_spu.spec,old_spu.spec))); + } + + // status changed + if new_spu.status != old_spu.status { + + // if spu comes online + // * send SPU a full update + // * ask topic to generate replica map for pending topics + + if old_spu.status.is_offline() && new_spu.status.is_online() { + actions.conns.push(ConnectionRequest::RefreshSpu(spu_id)); + } + + } + + + Ok(()) + } + + /// + /// Delete Spu Action handler + /// + /// # Remarks + /// Action handler performs the following operations: + /// * delete SPU from local cache + /// * notify Healthcheck module + /// * update SPUs in cluster + /// + fn del_spu_action_handler( + &self, + spu: SpuKV, + actions: &mut SpuActions, + ) -> Result<(), ScServerError> { + let _spu_id = spu.id(); + + debug!("deleting spu: {}", spu.key()); + trace!("delete spu {:#?}", spu); + + actions.conns.push(ConnectionRequest::Spu( + SpuSpecChange::Remove(spu.spec))); + + Ok(()) + } + + /// notification from Connection Manager indicating connection status changed + /// this will generate KV action + fn conn_status_update( + &self, + status: SpuConnectionStatusChange, + actions: &mut SpuActions, + ) { + + debug!("processing conn request: {}",status); + let spu_id = status.spu_id(); + + if let Some(spu) = self.0.get_by_id(&spu_id) { + + let mut spu_kv = spu.clone(); + match status { + SpuConnectionStatusChange::Off(_) => spu_kv.status.set_offline(), + SpuConnectionStatusChange::On(_) => spu_kv.status.set_online() + } + + actions.spus.push(WSAction::UpdateStatus(spu_kv)); + } + + + } + + +} + + + + diff --git a/sc-server/src/core/topics/actions.rs b/sc-server/src/core/topics/actions.rs new file mode 100644 index 0000000000..29a2d0187c --- /dev/null +++ b/sc-server/src/core/topics/actions.rs @@ -0,0 +1,62 @@ +//! +//! # Topic Actions +//! +//! Topic actions define operations performed on the Topics. +//! +use std::fmt; + +use utils::actions::Actions; + +use crate::core::partitions::PartitionWSAction; +use crate::core::spus::SpuLSChange; + +use super::TopicWSAction; +use super::TopicLSChange; + +/// Change Request send to Topic +#[derive(Debug, PartialEq, Clone)] +pub enum TopicChangeRequest { + Topic(Actions), + Spu(Actions) +} + + +impl fmt::Display for TopicChangeRequest { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TopicChangeRequest::Topic(req) => write!(f,"Topic LS: {}",req.count()), + TopicChangeRequest::Spu(req) => write!(f,"SPU LS: {}",req.count()) + } + } +} + + + +#[derive(Debug,Default)] +pub struct TopicActions{ + pub topics: Actions, + pub partitions: Actions +} + + + +impl fmt::Display for TopicActions { + + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.topics.count() == 0 && self.partitions.count() == 0 { + write!(f, + "Empty topic actions" + ) + } else { + write!(f, + "Topic Actions: {}, Partition Actions:: {}, ", + self.topics.count(), + self.partitions.count(), + ) + } + + } +} + + diff --git a/sc-server/src/core/topics/controller.rs b/sc-server/src/core/topics/controller.rs new file mode 100644 index 0000000000..2006b3ff81 --- /dev/null +++ b/sc-server/src/core/topics/controller.rs @@ -0,0 +1,243 @@ +//! +//! # SC Controller +//! +//! Streaming Coordinator Controller receives messages from other components and +//! dispatches them to internal components: SPUs, Topics, Partitions. + +use log::trace; +use log::error; +use log::info; +use futures::select; +use futures::stream::StreamExt; + +use types::log_on_err; +use metadata::topic::TopicSpec; +use metadata::spu::SpuSpec; +use future_helper::spawn; + +use crate::core::WSUpdateService; +use crate::core::WSChangeChannel; +use crate::core::ShareLocalStores; + + +use super::TopicReducer; +use super::TopicChangeRequest; + + +#[derive(Debug)] +pub struct TopicController { + local_stores: ShareLocalStores, + ws_service: W, + topic_receiver: WSChangeChannel, + spu_receiver: WSChangeChannel, + reducer: TopicReducer, +} + +impl TopicController + + where + W: WSUpdateService + Send + 'static + +{ + + /// streaming coordinator controller constructor + pub fn new( + local_stores: ShareLocalStores, + spu_receiver: WSChangeChannel, + topic_receiver: WSChangeChannel, + ws_service: W) -> Self { + + + Self { + reducer: TopicReducer::new(local_stores.topics().clone(), + local_stores.spus().clone(), + local_stores.partitions().clone() + ), + local_stores, + spu_receiver, + topic_receiver, + ws_service, + + } + } + + + + pub fn run(self) { + + spawn(self.dispatch_loop()); + } + + + + async fn dispatch_loop(mut self) { + + loop { + select! { + topic_req = self.topic_receiver.next() => { + match topic_req { + None => { + error!("Topic LC dispatcher has been terminated. Ending Server loop"); + break; + }, + Some(request) => { + trace!("receive Topic request {:#?}",request); + self.process_request(TopicChangeRequest::Topic(request)).await; + }, + } + }, + spu_req = self.spu_receiver.next() => { + match spu_req { + None => { + error!("SPU lC dispatcher has been terminated. Ending server loop"); + break; + }, + Some(request) => { + trace!("received SPU request {:#?}",request); + self.process_request(TopicChangeRequest::Spu(request)).await; + } + } + } + complete => {}, + } + } + + info!("spu controller is terminated"); + + } + + + + + /// process requests related to SPU management + async fn process_request(&mut self, request: TopicChangeRequest) { + + match self.reducer.process_requests(request) { + Ok(actions) => { + + trace!( + "Topic actions: {}", + actions + ); + + if actions.topics.count() > 0 { + for ws_action in actions.topics.into_iter() { + log_on_err!( + self.ws_service.update_topic(ws_action).await + ); + } + } + + if actions.partitions.count() > 0 { + for ws_action in actions.partitions.into_iter() { + log_on_err!( + self.ws_service.update_partition(ws_action).await + ) + } + } + }, + Err(err) => error!("error processing topic spu: {}",err) + } + + + } + + +} + + + + +/* +#[cfg(test)] +mod tests { + + use log::debug; + use futures::channel::mpsc::channel; + use futures::channel::mpsc::Receiver; + + use future_helper::test_async; + use utils::actions::Actions; + use metadata::spu::SpuSpec; + + use crate::cli::ScConfig; + use crate::core::ScMetadata; + use crate::core::ScContext; + use crate::core::ScRequest; + use crate::core::auth_tokens::AuthTokenAction; + use crate::core::spus::SpuAction; + use crate::core::spus::SpuKV; + use crate::tests::fixture::MockConnectionManager; + use crate::tests::fixture::SharedKVStore; + use crate::core::send_channels::ScSendChannels; + use crate::core::common::new_channel; + use crate::hc_manager::HcAction; + + use super::ScController; + + fn sample_controller() -> ( + ScController, + (Receiver, Receiver>), + ) { + let default_config = ScConfig::default(); + let metadata = ScMetadata::shared_metadata(default_config); + let conn_manager = MockConnectionManager::shared_conn_manager(); + + let (sc_sender, sc_receiver) = channel::(100); + let (hc_sender, hc_receiver) = new_channel::>(); + let kv_store = SharedKVStore::new(sc_sender.clone()); + let sc_send_channels = ScSendChannels::new(hc_sender.clone()); + + ( + ScController::new( + ScContext::new(sc_send_channels, conn_manager.clone()), + metadata.clone(), + kv_store.clone(), + ), + (sc_receiver, hc_receiver), + ) + } + + /// test add new spu + #[test_async] + async fn test_controller_basic() -> Result<(), ()> { + let (mut controller, _other) = sample_controller(); + + let auth_token_actions: Actions = Actions::default(); + + let spu: SpuKV = SpuSpec::new(5000).into(); + let mut spu_actions: Actions = Actions::default(); + + spu_actions.push(SpuAction::AddSpu(spu.spec().name(), spu.clone())); + /* + let topic_actions: Actions = Actions::default(); + for (name,topic) in self.kv.topics.read().iter() { + topic_actions.push(TopicAction::AddTopic(name.clone(),topic.clone())); + } + + let partition_actions: Actions = Actions::default(); + for (key,partition) in self.kv.partitions.read().iter() { + partition_actions.push(PartitionAction::AddPartition(key.clone(),partition.clone())); + } + */ + controller + .process_sc_request(ScRequest::UpdateAll( + auth_token_actions, + spu_actions, + Actions::default(), + Actions::default(), + )) + .await; + + let metadata = controller.metadata(); + debug!("metadata: {:#?}", metadata); + + // metdata should container new spu + assert!(metadata.spus().spu("spu-5000").is_some()); + + Ok(()) + } + + +} +*/ \ No newline at end of file diff --git a/sc-server/src/core/topics/metadata.rs b/sc-server/src/core/topics/metadata.rs new file mode 100644 index 0000000000..b5d6f8c893 --- /dev/null +++ b/sc-server/src/core/topics/metadata.rs @@ -0,0 +1,1073 @@ +//! +//! # Topic & Topics Metadata +//! +//! Topic metadata information cached on SC. +//! +//! # Remarks +//! Topic Status uses TopicResolution to reflect the state of the replica map: +//! Ok, // replica map has been generated, topic is operational +//! Pending, // not enough SPUs to generate "replica map" +//! Inconsistent, // use change spec parameters, which is not supported +//! InvalidConfig, // invalid configuration parameters provided +//! +use std::collections::BTreeMap; +use std::fmt; +use std::io::Error as IoError; + +use log::trace; +use log::debug; +use log::warn; +use rand::prelude::*; + +use types::ReplicaMap; +use k8_metadata::core::metadata::K8Obj; +use metadata::topic::{TopicSpec, TopicStatus,PartitionMap,TopicResolution}; +use metadata::topic::TopicReplicaParam; +use metadata::topic::PartitionMaps; +use metadata::partition::ReplicaKey; +use k8_metadata::topic::TopicSpec as K8TopicSpec; +use k8_metadata::topic::TopicStatus as K8TopicStatus; + +use crate::core::partitions::PartitionKV; +use crate::core::partitions::PartitionLocalStore; + +use crate::core::common::LocalStore; +use crate::core::common::KVObject; +use crate::core::common::KvContext; +use crate::core::spus::SpuLocalStore; +use crate::core::Spec; +use crate::core::Status; + + +impl Spec for TopicSpec + +{ + const LABEL: &'static str = "Topic"; + type Key = String; + type Status = TopicStatus; + type K8Spec = K8TopicSpec; + type Owner = TopicSpec; + + +/// convert kubernetes objects into KV vbalue + fn convert_from_k8(k8_topic: K8Obj) -> + Result,IoError> + + { + + // metadata is mandatory + let topic_name = &k8_topic.metadata.name; + + // spec is mandatory + let topic_spec = create_computed_topic_spec_from_k8_spec(&k8_topic.spec); + + // topic status is optional + let topic_status = match &k8_topic.status { + Some(k8_status) => create_topic_status_from_k8_spec(k8_status), + None => TopicStatus::default(), + }; + + let ctx = KvContext::default().with_ctx(k8_topic.metadata.clone()); + Ok( + TopicKV::new( topic_name.to_owned(),topic_spec, topic_status).with_kv_ctx(ctx), + ) + } + +} + + +/// There are 2 types of topic configurations: +/// * Computed +/// - computed topics take partitions and replication factor +/// * Assigned +/// - assigned topics take custom replica assignment +/// - partitions & replica factor are derived +/// +/// If all parameters are provided, Assigned topics takes precedence. +/// * Values provided for partitions and replication factor are overwritten by the +/// values derived from custom replica assignment. +fn create_computed_topic_spec_from_k8_spec(k8_topic_spec: &K8TopicSpec) -> TopicSpec { + + if let Some(k8_replica_assign) = &k8_topic_spec.custom_replica_assignment { + // Assigned Topic + let mut partition_map: Vec = vec![]; + + for k8_partition in k8_replica_assign { + partition_map.push(PartitionMap { + id: k8_partition.id(), + replicas: k8_partition.replicas().clone(), + }); + } + + TopicSpec::new_assigned(partition_map) + + } else { + // Computed Topic + let partitions = match k8_topic_spec.partitions { + Some(partitions) => partitions, + None => -1, + }; + + let replication_factor = match k8_topic_spec.replication_factor { + Some(replication_factor) => replication_factor, + None => -1, + }; + + TopicSpec::new_computed( + partitions, + replication_factor, + k8_topic_spec.ignore_rack_assignment, + ) + } + +} + +/// converts K8 topic status into metadata topic status +fn create_topic_status_from_k8_spec(k8_topic_status: &K8TopicStatus) -> TopicStatus { + k8_topic_status.clone().into() +} + + + + +impl Status for TopicStatus{} + + +/// values for next state +#[derive(Default,Debug)] +pub struct TopicNextState { + pub resolution: TopicResolution, + pub reason: String, + pub replica_map: ReplicaMap, + pub partitions: Vec +} + + + +impl fmt::Display for TopicNextState { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{:#?}",self.resolution) + } +} + + + + +impl From<(TopicResolution,String)> for TopicNextState { + fn from(val: (TopicResolution,String)) -> Self { + let (resolution,reason) = val; + Self { + resolution, + reason, + ..Default::default() + } + } +} + +impl From<((TopicResolution,String),ReplicaMap)> for TopicNextState { + fn from(val: ((TopicResolution,String),ReplicaMap)) -> Self { + let ((resolution,reason),replica_map) = val; + Self { + resolution, + reason, + replica_map, + ..Default::default() + } + } +} + +impl From<((TopicResolution,String),Vec)> for TopicNextState { + fn from(val: ((TopicResolution,String),Vec)) -> Self { + let ((resolution,reason),partitions) = val; + Self { + resolution, + reason, + partitions, + ..Default::default() + } + } +} + + +// ----------------------------------- +// Data Structures +// ----------------------------------- +pub type TopicKV = KVObject; + +// ----------------------------------- +// Topic - Traits +// ----------------------------------- + +impl std::fmt::Display for TopicKV { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match &self.spec { + TopicSpec::Assigned(partition_map) => { + write!(f, "assigned::{}", partition_map) + } + TopicSpec::Computed(param) => { + write!(f, "computed::({})", param) + } + } + } +} + +// ----------------------------------- +// Topic - Implementation +// ----------------------------------- + +impl TopicKV { + pub fn is_provisioned(&self) -> bool { + self.status.is_resolution_provisioned() + } + + pub fn replica_map(&self) -> &ReplicaMap { + &self.status.replica_map + } + + pub fn reason(&self) -> &String { + &self.status.reason + } + + pub fn same_next_state(&self) -> TopicNextState { + TopicNextState { + resolution: self.status.resolution.clone(), + ..Default::default() + } + } + + /// update our state with next state, return remaining partiton kv changes + pub fn apply_next_state(&mut self,next_state: TopicNextState) -> Vec { + self.status.resolution = next_state.resolution; + self.status.reason = next_state.reason; + if next_state.replica_map.len() > 0 { + self.status.set_replica_map(next_state.replica_map); + } + next_state.partitions + } + + /// based on our current state, compute what should be next state + pub fn compute_next_state(&self, + spu_store: &SpuLocalStore, + partition_store: &PartitionLocalStore + ) -> TopicNextState { + + match self.spec() { + // Computed Topic + TopicSpec::Computed(ref param) => { + match self.status.resolution { + TopicResolution::Init | TopicResolution::InvalidConfig => { + self.validate_computed_topic_parameters(param) + }, + TopicResolution::Pending | TopicResolution::InsufficientResources => { + let mut next_state = self.generate_replica_map(spu_store,param); + if next_state.resolution == TopicResolution::Provisioned { + debug!("Topic: {} replica generate successfull, status is provisioned",self.key()); + next_state.partitions = self.create_new_partitions(partition_store); + next_state + } else { + next_state + } + }, + _ => { + debug!("topic: {} resolution: {:#?} ignoring",self.key,self.status.resolution); + let mut next_state = self.same_next_state(); + if next_state.resolution == TopicResolution::Provisioned { + next_state.partitions = self.create_new_partitions(partition_store); + next_state + } else { + next_state + } + + } + } + + } + + // Assign Topic + TopicSpec::Assigned(ref partition_map) => { + match self.status.resolution { + TopicResolution::Init | TopicResolution::InvalidConfig => { + self.validate_assigned_topic_parameters(partition_map) + }, + TopicResolution::Pending | TopicResolution::InsufficientResources => { + let mut next_state = self.update_replica_map_for_assigned_topic(partition_map,spu_store); + if next_state.resolution == TopicResolution::Provisioned { + next_state.partitions = self.create_new_partitions(partition_store); + next_state + } else { + next_state + } + }, + _ => { + debug!("assigned topic: {} resolution: {:#?} ignoring",self.key,self.status.resolution); + let mut next_state = self.same_next_state(); + if next_state.resolution == TopicResolution::Provisioned { + next_state.partitions = self.create_new_partitions(partition_store); + next_state + } else { + next_state + } + + } + } + + + } + } + } + + /// + /// Validate computed topic spec parameters and update topic status + /// * error is passed to the topic reason. + /// + pub fn validate_computed_topic_parameters( + &self, + param: &TopicReplicaParam, + ) -> TopicNextState { + if let Err(err) = TopicSpec::valid_partition(¶m.partitions) { + warn!("topic: {} partition config is invalid",self.key()); + TopicStatus::next_resolution_invalid_config(&err.to_string()).into() + } else if let Err(err) = TopicSpec::valid_replication_factor(¶m.replication_factor) { + warn!("topic: {} replication config is invalid",self.key()); + TopicStatus::next_resolution_invalid_config(&err.to_string()).into() + } else { + debug!("topic: {} config is valid, transition to pending",self.key()); + TopicStatus::next_resolution_pending().into() + } + } + + + + /// + /// Validate assigned topic spec parameters and update topic status + /// * error is passed to the topic reason. + /// + pub fn validate_assigned_topic_parameters( + &self, + partition_map: &PartitionMaps, + ) -> TopicNextState { + if let Err(err) = partition_map.valid_partition_map() { + TopicStatus::next_resolution_invalid_config(&err.to_string()).into() + } else { + TopicStatus::next_resolution_pending().into() + } + } + + + /// + /// Genereate Replica Map if there are enough online spus + /// * returns a replica map or a reason for the failure + /// * fatal error sare configuration errors and are not recovarable + /// + pub fn generate_replica_map( + &self, + spus: &SpuLocalStore, + param: &TopicReplicaParam + ) -> TopicNextState { + + let spu_count = spus.count(); + if spu_count < param.replication_factor { + + trace!( + "topic '{}' - R-MAP needs {:?} online spus, found {:?}", + self.key, + param.replication_factor, + spu_count + ); + + let reason = format!("need {} more SPU",param.replication_factor - spu_count); + TopicStatus::set_resolution_no_resource(reason).into() + + + } else { + let replica_map = generate_replica_map_for_topic(spus,param,None); + if replica_map.len() > 0 { + (TopicStatus::next_resolution_provisoned(),replica_map).into() + } else { + let reason = "empty replica map"; + TopicStatus::set_resolution_no_resource(reason.to_owned()).into() + } + + } + } + + + + /// create partition children if it doesn't exists + pub fn create_new_partitions( + &self, + partition_store: &PartitionLocalStore, + ) -> Vec { + + let parent_kv_ctx = self.kv_ctx.make_parent_ctx(); + + self.status.replica_map.iter() + .filter_map(| (idx,replicas) | { + + let replica_key = ReplicaKey::new(self.key(),*idx); + debug!("Topic: {} creating partition: {}",self.key(),replica_key); + if partition_store.contains_key(&replica_key) { + None + } else { + Some( + PartitionKV::with_spec( + replica_key, + replicas.clone().into() + ) + .with_kv_ctx(parent_kv_ctx.clone()) + ) + } + }).collect() + + } + + + + /// + /// Compare assigned SPUs versus local SPUs. If all assigned SPUs are live, + /// update topic status to ok. otherwise, mark as waiting for live SPUs + /// + pub fn update_replica_map_for_assigned_topic( + &self, + partition_maps: &PartitionMaps, + spu_store: &SpuLocalStore, + ) -> TopicNextState { + + let partition_map_spus = partition_maps.unique_spus_in_partition_map(); + let spus_id = spu_store.spu_ids_for_replica(); + + // ensure spu existds + for spu in &partition_map_spus { + if !spus_id.contains(spu) { + return TopicStatus::next_resolution_invalid_config(format!("invalid spu id: {}",spu)).into() + } + } + + let replica_map = partition_maps.partition_map_to_replica_map(); + if replica_map.len() == 0 { + TopicStatus::next_resolution_invalid_config("invalid replica map".to_owned()).into() + } else { + (TopicStatus::next_resolution_provisoned(),replica_map).into() + } + + } + + +} + + + +/// +/// Generate replica map for a specific topic +/// +pub fn generate_replica_map_for_topic( + spus: &SpuLocalStore, + param: &TopicReplicaParam, + from_index: Option, +) -> ReplicaMap { + + let in_rack_count = spus.spus_in_rack_count(); + let start_index = from_index.unwrap_or(-1); + + // generate partition map (with our without rack assignment) + if param.ignore_rack_assignment || in_rack_count == 0 { + generate_partitions_without_rack(&spus,¶m, start_index) + } else { + generate_partitions_with_rack_assignment(&spus, ¶m,start_index) + } +} + +/// +/// Generate partitions on spus that have been assigned to racks +/// +fn generate_partitions_with_rack_assignment( + spus: &SpuLocalStore, + param: &TopicReplicaParam, + start_index: i32, +) -> ReplicaMap { + let mut partition_map = BTreeMap::new(); + let rack_map = SpuLocalStore::live_spu_rack_map_sorted(&spus); + let spu_list = SpuLocalStore::online_spus_in_rack(&rack_map); + let spu_cnt = spus.online_spu_count(); + + let s_idx = if start_index >= 0 { + start_index + } else { + thread_rng().gen_range(0, spu_cnt) + }; + + for p_idx in 0..param.partitions { + let mut replicas: Vec = vec![]; + for r_idx in 0..param.replication_factor { + let spu_idx = ((s_idx + p_idx + r_idx) % spu_cnt) as usize; + replicas.push(spu_list[spu_idx]); + } + partition_map.insert(p_idx, replicas); + } + + partition_map +} + + /// + /// Generate partitions without taking rack assignments into consideration + /// +fn generate_partitions_without_rack( + spus: &SpuLocalStore, + param: &TopicReplicaParam, + start_index: i32, +) -> ReplicaMap { + let mut partition_map = BTreeMap::new(); + let spu_cnt = spus.spu_used_for_replica(); + let spu_ids = spus.spu_ids_for_replica(); + + let s_idx = if start_index >= 0 { + start_index + } else { + thread_rng().gen_range(0, spu_cnt) + }; + + let gap_max = spu_cnt - param.replication_factor + 1; + for p_idx in 0..param.partitions { + let mut replicas: Vec = vec![]; + let gap_cnt = ((s_idx + p_idx) / spu_cnt) % gap_max; + for r_idx in 0..param.replication_factor { + let gap = if r_idx != 0 { gap_cnt } else { 0 }; + let spu_idx = ((s_idx + p_idx + r_idx + gap) % spu_cnt) as usize; + replicas.push(spu_ids[spu_idx]); + } + partition_map.insert(p_idx, replicas); + } + + partition_map +} + + +pub type TopicLocalStore = LocalStore; + + +// ----------------------------------- +// Topics - Implementation +// ----------------------------------- + +impl TopicLocalStore { + + + pub fn topic(&self, topic_name: &str) -> Option { + match (*self.inner_store().read()).get(topic_name) { + Some(topic) => Some(topic.clone()), + None => None, + } + } + + + + pub fn table_fmt(&self) -> String { + let mut table = String::new(); + + let topic_hdr = format!( + "{n:<18} {t:<8} {p:<5} {s:<5} {g:<8} {l:<14} {m:<10} {r}\n", + n = "TOPIC", + t = "TYPE", + p = "PART", + s = "FACT", + g = "IGN-RACK", + l = "RESOLUTION", + m = "R-MAP-ROWS", + r = "REASON", + ); + table.push_str(&topic_hdr); + + for (name, topic) in self.inner_store().read().iter() { + let topic_row = format!( + "{n:<18} {t:^8} {p:^5} {s:^5} {g:<8} {l:^14} {m:^10} {r}\n", + n = name.clone(), + t = TopicSpec::type_label(&topic.spec.is_computed()), + p = TopicSpec::partitions_str(&topic.spec.partitions()), + s = TopicSpec::replication_factor_str(&topic.spec.replication_factor()), + g = TopicSpec::ignore_rack_assign_str(&topic.spec.ignore_rack_assignment()), + l = topic.status.resolution().resolution_label(), + m = topic.status.replica_map_cnt_str(), + r = topic.reason(), + ); + table.push_str(&topic_row); + } + + table + } + + +} + + + +// +// Unit Tests +// +#[cfg(test)] + +mod test { + use metadata::topic::{TopicResolution, TopicStatus}; + + use super::{TopicKV, TopicLocalStore}; + + + #[test] + fn test_topic_replica_map() { + // empty replica map + let topic1 = TopicKV::new( + "Topic-1", + (1, 1, false).into(), + TopicStatus::default(), + ); + assert_eq!(topic1.replica_map().len(), 0); + + // replica map with 2 partitions + let topic2 = TopicKV::new( + "Topic-2", + (1, 1, false).into(), + TopicStatus::new( + TopicResolution::Provisioned, + vec![vec![0, 1], vec![1, 2]], + "".to_owned(), + ), + ); + assert_eq!(topic2.replica_map().len(), 2); + } + + #[test] + fn test_update_topic_status_objects() { + // create topic 1 + let mut topic1 = TopicKV::new( + "Topic-1", + (2, 2, false).into(), + TopicStatus::default(), + ); + assert_eq!(topic1.status.resolution, TopicResolution::Init); + + // create topic 2 + let topic2 = TopicKV::new( + "Topic-1", + (2, 2, false).into(), + TopicStatus::new( + TopicResolution::Provisioned, + vec![vec![0, 1], vec![1, 2]], + "".to_owned(), + ), + ); + + // test update individual components + topic1.status.set_replica_map(topic2.replica_map().clone()); + topic1.status.reason = topic2.reason().clone(); + topic1.status.resolution = (&topic2.status.resolution).clone(); + + // topics should be identical + assert_eq!(topic1, topic2); + } + + #[test] + fn topic_list_insert() { + // create topics + let topic1 = TopicKV::new( + "Topic-1", + (1, 1, false).into(), + TopicStatus::default(), + ); + let topic2 = TopicKV::new( + "Topic-2", + (2, 2, false).into(), + TopicStatus::default(), + ); + + let topics = TopicLocalStore::default(); + topics.insert(topic1); + topics.insert(topic2); + + assert_eq!(topics.count(), 2); + } + + #[test] + fn test_topics_in_pending_state() { + let topics = TopicLocalStore::default(); + + // resolution: Init + let topic1 = TopicKV::new( + "Topic-1", + (1, 1, false).into(), + TopicStatus::default(), + ); + assert_eq!(topic1.status.is_resolution_initializing(), true); + + // resulution: Pending + let topic2 = TopicKV::new( + "Topic-2", + (1, 1, false).into(), + TopicStatus::new( + TopicResolution::Pending, + vec![], + "waiting for live spus".to_owned(), + ), + ); + assert_eq!(topic2.status.is_resolution_pending(), true); + + // resolution: Ok + let topic3 = TopicKV::new( + "Topic-3", + (2, 2, false).into(), + TopicStatus::new( + TopicResolution::Provisioned, + vec![vec![0, 1], vec![1, 2]], + "".to_owned(), + ), + ); + assert_eq!(topic3.status.is_resolution_provisioned(), true); + + // resolution: Inconsistent + let topic4 = TopicKV::new( + "Topic-4", + (2, 2, false).into(), + TopicStatus::new( + TopicResolution::InsufficientResources, + vec![vec![0], vec![1]], + "".to_owned(), + ), + ); + + topics.insert(topic1); + topics.insert(topic2); + topics.insert(topic3); + topics.insert(topic4); + + let expected = vec![String::from("Topic-2"),String::from("Topic-4")]; + let mut pending_state_names: Vec = vec![]; + + topics + .visit_values(|topic| { + if topic.status.need_replica_map_recal() { + pending_state_names.push(topic.key_owned()); + } + }); + + + assert_eq!(pending_state_names, expected); + } + + #[test] + fn test_update_topic_status_with_other_error_topic_not_found() { + let topics = TopicLocalStore::default(); + + let topic1 = TopicKV::new( + "Topic-1", + (1, 1, false).into(), + TopicStatus::default(), + ); + topics.insert(topic1); + + let topic2 = TopicKV::new( + "Topic-2", + (2, 2, false).into(), + TopicStatus::new( + TopicResolution::Provisioned, + vec![vec![0, 1], vec![1, 2]], + "".to_owned(), + ), + ); + + // test: update_status (returns error) + let res = topics.update_status(topic2.key(),topic2.status.clone()); + assert_eq!( + format!("{}", res.unwrap_err()), + "Topic 'Topic-2': not found, cannot update" + ); + } + + #[test] + fn test_update_topic_status_successful() { + let topics = TopicLocalStore::default(); + let topic1 = TopicKV::new( + "Topic-1", + (2, 2, false).into(), + TopicStatus::default(), + ); + topics.insert(topic1); + + let updated_topic = TopicKV::new( + "Topic-1", + (2, 2, false).into(), + TopicStatus::new( + TopicResolution::Provisioned, + vec![vec![0, 1], vec![1, 2]], + "".to_owned(), + ), + ); + + // run test + let res = topics.update_status(updated_topic.key(),updated_topic.status.clone()); + assert!(res.is_ok()); + + let topic = topics.topic("Topic-1"); + assert_eq!(topic.is_some(), true); + + assert_eq!(topic.unwrap(), updated_topic); + } + +} + + + + +// +// Unit Tests +// +#[cfg(test)] +pub mod replica_map_test { + + use std::collections::BTreeMap; + + use super::SpuLocalStore; + use super::generate_replica_map_for_topic; + + + #[test] + fn generate_replica_map_for_topic_1x_replicas_no_rack() { + + let spus: SpuLocalStore = vec![ + (0, true, None), + (1, true, None), + (2, true, None), + (4, true, None), + (5000, true, None), + ].into(); + + assert_eq!(spus.online_spu_count(), 5); + + // test 4 partitions, 1 replicas - index 8 + let param = (4, 1, false).into(); + let map_1xi = generate_replica_map_for_topic(&spus,¶m,Some(8)); + let mut map_1xi_expected = BTreeMap::new(); + map_1xi_expected.insert(0, vec![4]); + map_1xi_expected.insert(1, vec![5000]); + map_1xi_expected.insert(2, vec![0]); + map_1xi_expected.insert(3, vec![1]); + assert_eq!(map_1xi, map_1xi_expected); + } + + #[test] + fn generate_replica_map_for_topic_2x_replicas_no_rack() { + + let spus = vec![ + (0, true, None), + (1, true, None), + (2, true, None), + (3, true, None), + (4, true, None), + ].into(); + + // test 4 partitions, 2 replicas - index 3 + let param = (4, 2, false).into(); + let map_2xi = generate_replica_map_for_topic(&spus, ¶m, Some(3)); + let mut map_2xi_expected = BTreeMap::new(); + map_2xi_expected.insert(0, vec![3, 4]); + map_2xi_expected.insert(1, vec![4, 0]); + map_2xi_expected.insert(2, vec![0, 2]); + map_2xi_expected.insert(3, vec![1, 3]); + assert_eq!(map_2xi, map_2xi_expected); + } + + #[test] + fn generate_replica_map_for_topic_3x_replicas_no_rack() { + let spus = vec![ + (0, true, None), + (1, true, None), + (2, true, None), + (3, true, None), + (4, true, None), + ].into(); + + // test 21 partitions, 3 replicas - index 0 + let param = (21, 3, false).into(); + let map_3x = generate_replica_map_for_topic(&spus,¶m, Some(0)); + let mut map_3x_expected = BTreeMap::new(); + map_3x_expected.insert(0, vec![0, 1, 2]); + map_3x_expected.insert(1, vec![1, 2, 3]); + map_3x_expected.insert(2, vec![2, 3, 4]); + map_3x_expected.insert(3, vec![3, 4, 0]); + map_3x_expected.insert(4, vec![4, 0, 1]); + map_3x_expected.insert(5, vec![0, 2, 3]); + map_3x_expected.insert(6, vec![1, 3, 4]); + map_3x_expected.insert(7, vec![2, 4, 0]); + map_3x_expected.insert(8, vec![3, 0, 1]); + map_3x_expected.insert(9, vec![4, 1, 2]); + map_3x_expected.insert(10, vec![0, 3, 4]); + map_3x_expected.insert(11, vec![1, 4, 0]); + map_3x_expected.insert(12, vec![2, 0, 1]); + map_3x_expected.insert(13, vec![3, 1, 2]); + map_3x_expected.insert(14, vec![4, 2, 3]); + map_3x_expected.insert(15, vec![0, 1, 2]); + map_3x_expected.insert(16, vec![1, 2, 3]); + map_3x_expected.insert(17, vec![2, 3, 4]); + map_3x_expected.insert(18, vec![3, 4, 0]); + map_3x_expected.insert(19, vec![4, 0, 1]); + map_3x_expected.insert(20, vec![0, 2, 3]); + assert_eq!(map_3x, map_3x_expected); + + // test 4 partitions, 3 replicas - index 12 + let param = (4, 3, false).into(); + let map_3xi = generate_replica_map_for_topic(&spus, ¶m, Some(12)); + let mut map_3xi_expected = BTreeMap::new(); + map_3xi_expected.insert(0, vec![2, 0, 1]); + map_3xi_expected.insert(1, vec![3, 1, 2]); + map_3xi_expected.insert(2, vec![4, 2, 3]); + map_3xi_expected.insert(3, vec![0, 1, 2]); + assert_eq!(map_3xi, map_3xi_expected); + } + + #[test] + fn generate_replica_map_for_topic_4x_replicas_no_rack() { + + let spus = vec![ + (0, true, None), + (1, true, None), + (2, true, None), + (3, true, None), + (4, true, None), + ].into(); + + // test 4 partitions, 4 replicas - index 10 + let param = (4, 4, false).into(); + let map_4xi = generate_replica_map_for_topic(&spus, ¶m, Some(10)); + let mut map_4xi_expected = BTreeMap::new(); + map_4xi_expected.insert(0, vec![0, 1, 2, 3]); + map_4xi_expected.insert(1, vec![1, 2, 3, 4]); + map_4xi_expected.insert(2, vec![2, 3, 4, 0]); + map_4xi_expected.insert(3, vec![3, 4, 0, 1]); + assert_eq!(map_4xi, map_4xi_expected); + } + + #[test] + fn generate_replica_map_for_topic_5x_replicas_no_rack() { + + let spus = vec![ + (0, true, None), + (1, true, None), + (3, true, None), + (4, true, None), + (5002, true, None), + ].into(); + + // test 4 partitions, 5 replicas - index 14 + let param = (4, 5, false).into(); + let map_5xi = generate_replica_map_for_topic(&spus, ¶m, Some(14)); + let mut map_5xi_expected = BTreeMap::new(); + map_5xi_expected.insert(0, vec![5002, 0, 1, 3, 4]); + map_5xi_expected.insert(1, vec![0, 1, 3, 4, 5002]); + map_5xi_expected.insert(2, vec![1, 3, 4, 5002, 0]); + map_5xi_expected.insert(3, vec![3, 4, 5002, 0, 1]); + assert_eq!(map_5xi, map_5xi_expected); + } + + #[test] + fn generate_replica_map_for_topic_6_part_3_rep_6_brk_3_rak() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r2.clone())), + (2, true, Some(r2.clone())), + (3, true, Some(r3.clone())), + (4, true, Some(r3.clone())), + (5, true, Some(r3.clone())), + ].into(); + + // Compute & compare with result + let param = (6, 3, false).into(); + let computed = generate_replica_map_for_topic(&spus, ¶m, Some(0)); + let mut expected = BTreeMap::new(); + expected.insert(0, vec![3, 2, 0]); + expected.insert(1, vec![2, 0, 4]); + expected.insert(2, vec![0, 4, 1]); + expected.insert(3, vec![4, 1, 5]); + expected.insert(4, vec![1, 5, 3]); + expected.insert(5, vec![5, 3, 2]); + + assert_eq!(computed, expected); + } + + #[test] + fn generate_replica_map_for_topic_12_part_4_rep_11_brk_4_rak() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + let r4 = String::from("r4"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r2.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r2.clone())), + (6, true, Some(r3.clone())), + (7, true, Some(r3.clone())), + (8, true, Some(r3.clone())), + (9, true, Some(r4.clone())), + (10, true, Some(r4.clone())), + (11, true, Some(r4.clone())), + ].into(); + + // Compute & compare with result + let param = (12, 4, false).into(); + let computed = generate_replica_map_for_topic(&spus, ¶m, Some(0)); + let mut expected = BTreeMap::new(); + expected.insert(0, vec![0, 4, 8, 9]); + expected.insert(1, vec![4, 8, 9, 1]); + expected.insert(2, vec![8, 9, 1, 5]); + expected.insert(3, vec![9, 1, 5, 6]); + expected.insert(4, vec![1, 5, 6, 10]); + expected.insert(5, vec![5, 6, 10, 2]); + expected.insert(6, vec![6, 10, 2, 3]); + expected.insert(7, vec![10, 2, 3, 7]); + expected.insert(8, vec![2, 3, 7, 11]); + expected.insert(9, vec![3, 7, 11, 0]); + expected.insert(10, vec![7, 11, 0, 4]); + expected.insert(11, vec![11, 0, 4, 8]); + + assert_eq!(computed, expected); + } + + #[test] + fn generate_replica_map_for_topic_9_part_3_rep_9_brk_3_rak() { + let r1 = String::from("r1"); + let r2 = String::from("r2"); + let r3 = String::from("r3"); + + let spus = vec![ + (0, true, Some(r1.clone())), + (1, true, Some(r1.clone())), + (2, true, Some(r1.clone())), + (3, true, Some(r2.clone())), + (4, true, Some(r2.clone())), + (5, true, Some(r2.clone())), + (6, true, Some(r3.clone())), + (7, true, Some(r3.clone())), + (8, true, Some(r3.clone())), + ].into(); + + // test 9 partitions, 3 replicas - index 0 + let param = (9, 3, false).into(); + let computed = generate_replica_map_for_topic(&spus, ¶m, Some(0)); + let mut expected = BTreeMap::new(); + expected.insert(0, vec![0, 4, 8]); + expected.insert(1, vec![4, 8, 1]); + expected.insert(2, vec![8, 1, 5]); + expected.insert(3, vec![1, 5, 6]); + expected.insert(4, vec![5, 6, 2]); + expected.insert(5, vec![6, 2, 3]); + expected.insert(6, vec![2, 3, 7]); + expected.insert(7, vec![3, 7, 0]); + expected.insert(8, vec![7, 0, 4]); + + assert_eq!(computed, expected); + } +} diff --git a/sc-server/src/core/topics/mod.rs b/sc-server/src/core/topics/mod.rs new file mode 100644 index 0000000000..bfff55901a --- /dev/null +++ b/sc-server/src/core/topics/mod.rs @@ -0,0 +1,24 @@ +mod actions; +mod metadata; +mod reducer; +mod controller; + +pub use self::actions::TopicActions; +pub use self::metadata::{TopicKV, TopicLocalStore}; +pub use self::actions::TopicChangeRequest; +pub use self::reducer::TopicReducer; +pub use self::controller::TopicController; + +use ::metadata::topic::TopicSpec; +use crate::core::common::LSChange; +use crate::core::common::WSAction; + + +use crate::k8::K8ClusterStateDispatcher; + + +pub type K8TopicChangeDispatcher = K8ClusterStateDispatcher; +pub type TopicWSAction = WSAction; +pub type TopicLSChange = LSChange; + + diff --git a/sc-server/src/core/topics/reducer.rs b/sc-server/src/core/topics/reducer.rs new file mode 100644 index 0000000000..0daa5652de --- /dev/null +++ b/sc-server/src/core/topics/reducer.rs @@ -0,0 +1,378 @@ +//! +//! # Topic & Topics Metadata +//! +//! Topic metadata information cached on SC. +//! +//! # Remarks +//! Topic Status uses TopicResolution to reflect the state of the replica map: +//! Ok, // replica map has been generated, topic is operational +//! Pending, // not enough SPUs to generate "replica map" +//! Inconsistent, // use change spec parameters, which is not supported +//! InvalidConfig, // invalid configuration parameters provided +//! +use std::sync::Arc; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::{debug, trace}; +use types::log_on_err; +use metadata::spu::SpuSpec; + +use crate::core::spus::SpuLocalStore; +use crate::core::partitions::PartitionWSAction; +use crate::core::partitions::PartitionLocalStore; +use crate::core::common::LSChange; +use crate::ScServerError; + +use super::TopicActions; +use super::TopicChangeRequest; +use super::TopicKV; +use super::TopicLocalStore; +use super::TopicWSAction; + +/// Generates Partition Spec from Toic Spec based on replication and partition factor. +/// For example, if we have Topic with partitions = #1 and replication = #2, +/// it will generates Partition with name "Topic-0" with Replication of 2. +/// +/// Generated Partition looks like below. Initially, it is not assigned to any SPU +/// Spec +/// name: Topic-0 +/// replication: 2 +/// Status +/// state: Init +/// +/// +/// Actually replica assignment is done by Partition controller. +#[derive(Debug)] +pub struct TopicReducer { + topic_store: Arc, + spu_store: Arc, + partition_store: Arc, +} + +impl Default for TopicReducer { + fn default() -> Self { + Self { + topic_store: TopicLocalStore::new_shared(), + spu_store: SpuLocalStore::new_shared(), + partition_store: PartitionLocalStore::new_shared(), + } + } +} + +impl TopicReducer { + pub fn new(topic_store: A, spu_store: B, partition_store: C) -> Self + where + A: Into>, + B: Into>, + C: Into>, + { + Self { + topic_store: topic_store.into(), + spu_store: spu_store.into(), + partition_store: partition_store.into(), + } + } + fn topic_store(&self) -> &TopicLocalStore { + &self.topic_store + } + + fn spu_store(&self) -> &SpuLocalStore { + &self.spu_store + } + + fn partition_store(&self) -> &PartitionLocalStore { + &self.partition_store + } + + pub fn process_requests( + &self, + requests: TopicChangeRequest, + ) -> Result { + + trace!("processing requests: {}", requests); + + let mut actions = TopicActions::default(); + + match requests { + TopicChangeRequest::Topic(topic_requests) => { + for topic_request in topic_requests.into_iter() { + match topic_request { + LSChange::Add(topic) => self.add_topic_action_handler(topic, &mut actions), + + LSChange::Mod(new_topic, local_topic) => { + log_on_err!(self.mod_topic_action_handler( + new_topic, + local_topic, + &mut actions, + )); + } + + LSChange::Delete(_) => { + // ignore for now + } + } + } + } + + TopicChangeRequest::Spu(spu_requests) => { + for request in spu_requests.into_iter() { + self.process_spu_kv(request, &mut actions); + } + } + } + + trace!("\n{}", self.topic_store().table_fmt()); + + Ok(actions) + } + + // ----------------------------------- + // Action Handlers + // ----------------------------------- + + /// + /// Triggers new topic metadata is received. + /// + /// At this point, we only need to ensure that topic with init status can be moved + /// to pending or error state + /// + fn add_topic_action_handler(&self, topic: TopicKV, actions: &mut TopicActions) { + let name = topic.key(); + + debug!("AddTopic({}) - {}", name, topic); + + self.update_actions_next_state(&topic, actions); + } + + /// + /// Mod Topic Action handler + /// + /// # Remarks + /// Action handler performs the following operations: + /// * compare new topic with local topic + /// * update local cache (if chaged) + /// * generate replica_map (if possible) and push partitions to KV store + /// + fn mod_topic_action_handler( + &self, + new_topic: TopicKV, + old_topic: TopicKV, + actions: &mut TopicActions, + ) -> Result<(), IoError> { + let name = new_topic.key(); + debug!("Handling ModTopic: {} ", name); + + // if spec changed - not a supported feature + if new_topic.spec != old_topic.spec { + return Err(IoError::new( + ErrorKind::InvalidData, + format!( + "topic '{}' - update spec... not implemented", + name) + )); + } + + // if topic changed, update status & notify partitions + if new_topic. + status != old_topic.status { + self.update_actions_next_state(&new_topic, actions); + } + + Ok(()) + } + + /// process kv + fn process_spu_kv(&self, request: LSChange, actions: &mut TopicActions) { + match request { + LSChange::Add(new_spu) => { + debug!("processing SPU add: {}", new_spu); + + self.generate_replica_map_for_all_topics_handler(actions); + + } + + LSChange::Mod(_new_spu, _old_spu) => { + //debug!("processing SPU mod: {}", new_spu); + + /* + if new_spu.status != old_spu.status { + debug!("detected change in spu status"); + // if spu comes online + // * send SPU a full update + // * ask topic to generate replica map for pending topics + + if old_spu.status.is_offline() && new_spu.status.is_online() { + debug!("spu offline -> online"); + self.generate_replica_map_for_all_topics_handler(actions) + } + } + */ + } + _ => { + // do nothing, ignore for now + } + } + } + + /// + /// Compute next state for topic + /// if state is different, apply actions + /// + fn update_actions_next_state(&self, topic: &TopicKV, actions: &mut TopicActions) { + + let next_state = topic.compute_next_state(self.spu_store(), self.partition_store()); + + debug!("topic: {} next state: {}",topic.key(),next_state); + let mut updated_topic = topic.clone(); + trace!("next state detal: {:#?}", next_state); + + // apply changes in partitions + for partition_kv in updated_topic.apply_next_state(next_state).into_iter() { + actions + .partitions + .push(PartitionWSAction::Add(partition_kv)); + } + + // apply changes to topics + if updated_topic.status.resolution != topic.status.resolution || updated_topic.status.reason != topic.status.reason { + debug!("{} status change to {} from: {}",topic.key(),updated_topic.status,topic.status); + actions.topics.push(TopicWSAction::UpdateStatus(updated_topic)); + } + + } + + /// + /// Generate Replica Map for all Topics handler + /// + /// # Remarks + /// Generally triggered when a new live borker joins the cluster. + /// The handler performs the following operations: + /// * loop through all topics and identify the ones waiting for replica map + /// * generate replica map + /// * push push result to KV topic queue (if replica map generations succeeded) + /// + fn generate_replica_map_for_all_topics_handler(&self, actions: &mut TopicActions) { + debug!("updating replica maps for topics are in pending state"); + + // loop through topics & generate replica map (if needed) + self.topic_store().visit_values(|topic| { + if topic.status.need_replica_map_recal() { + let name = topic.key(); + debug!("Generate R-MAP for: {:?}", name); + // topic status to collect modifications + self.update_actions_next_state(topic, actions); + } + }); + } +} + +#[cfg(test)] +mod test2 { + use metadata::topic::{TopicResolution, TopicStatus}; + use metadata::topic::PENDING_REASON; + use utils::actions::Actions; + + use super::TopicReducer; + use super::TopicChangeRequest; + use super::TopicWSAction; + use super::TopicKV; + use super::super::TopicLSChange; + + // if topic are just created, it should transitioned to pending state if config are valid + #[test] + fn test_topic_reducer_init_to_pending() { + let topic_reducer = TopicReducer::default(); + let topic_requests: Actions = vec![ + TopicLSChange::add(TopicKV::with_spec("topic1", (1, 1).into())), + TopicLSChange::add(TopicKV::with_spec("topic2", (2, 2).into())), + ] + .into(); + + let actions = topic_reducer + .process_requests(TopicChangeRequest::Topic(topic_requests)) + .expect("actions"); + + // topic key/value store actions + let expected_actions: Actions = vec![ + TopicWSAction::UpdateStatus(TopicKV::new( + "topic1", + (1, 1).into(), + TopicStatus::new(TopicResolution::Pending, vec![], PENDING_REASON), + )), + TopicWSAction::UpdateStatus(TopicKV::new( + "topic2", + (2, 2).into(), + TopicStatus::new(TopicResolution::Pending, vec![], PENDING_REASON), + )), + ] + .into(); + assert_eq!(actions.topics, expected_actions); + } + + /* + #[test] + fn test_process_topics_actions_with_topics() { + + let topic_reducer = TopicReducer::default(); + let topic_store = topic_reducer.topic_store(); + let spu_store = topic_reducer.spu_store(); + + // initialize topics + let topic = TopicKV::new( + (1, 1, false).into(), + TopicStatus::new( + TopicResolution::Pending, + vec![], + "waiting for live spus".to_owned(), + ), + ); + + topic_store.insert("topic1", topic.clone()); + topic_store.insert("topic2", topic.clone()); + + + spu_store.bulk_add(vec![ + // spu_id, online, rack + (5000, true, None), + ]); + + + let mut spu_requests: Actions = Actions::default(); + // add LSChange to turn on SPU + spu_requests.push_once( + SpuLSChange::Mod( + "spu-5000".to_owned(), + (5000,false,None).into(), + (5000,true,None).into())); + + + // Run Test + let actions = topic_reducer.process_requests(TopicChangeRequest::Spu(spu_requests)).expect("requests"); + + // compare store + let expected_topics = TopicLocalStore::new_shared(); + expected_topics.insert("topic1", topic.clone()); + expected_topics.insert("topic2", topic.clone()); + assert_eq!(*topic_store,expected_topics); + + // partition actions + let expected_actions_for_partition: Actions = Actions::default(); + assert_eq!(actions.partitions, expected_actions_for_partition); + + // topic key/value store actions + let exepected_topic = TopicKV::new( + (1, 1, false).into(), + TopicStatus::new(TopicResolution::Ok, vec![vec![5000]], "".to_owned()), + ); + let mut expected_actions_for_kvs: Actions = Actions::default(); + expected_actions_for_kvs.push(TopicWSAction::UpdateStatus( + exepected_topic.clone(), + )); + expected_actions_for_kvs.push(TopicWSAction::UpdateStatus( + exepected_topic, + )); + assert_eq!(actions.topics, expected_actions_for_kvs); + } + */ +} diff --git a/sc-server/src/core/world_store.rs b/sc-server/src/core/world_store.rs new file mode 100644 index 0000000000..6972cf2fd7 --- /dev/null +++ b/sc-server/src/core/world_store.rs @@ -0,0 +1,36 @@ +use futures::Future; +use futures::channel::mpsc::Receiver; + +use utils::actions::Actions; +use metadata::spu::SpuSpec; +use metadata::topic::TopicSpec; +use metadata::partition::PartitionSpec; + +use crate::ScServerError; +use crate::core::common::WSAction; +use crate::core::common::LSChange; + + +/// Update the world state +pub trait WSUpdateService { + + type ResponseFuture: Send + Future> + 'static; + + fn update_spu(&self,ws_actions: WSAction) -> Self::ResponseFuture; + fn update_topic(&self,ws_actions: WSAction) -> Self::ResponseFuture; + fn update_partition(&self,ws_actions: WSAction) -> Self::ResponseFuture; + +} + + +pub type WSChangeChannel = Receiver>>; + + +pub trait WSChangeDispatcher { + + fn create_spu_channel(&mut self) -> WSChangeChannel; + + fn create_topic_channel(&mut self) -> WSChangeChannel; + + fn create_partition_channel(&mut self) -> WSChangeChannel; +} diff --git a/sc-server/src/error.rs b/sc-server/src/error.rs new file mode 100644 index 0000000000..b4a9989c09 --- /dev/null +++ b/sc-server/src/error.rs @@ -0,0 +1,76 @@ +// error.rs +// Server Error handling (union of errors used by server) +// + +use std::fmt; +use std::io::Error as StdIoError; +use futures::channel::mpsc::SendError; + +use kf_socket::KfSocketError; +use k8_config::ConfigError; +use k8_client::ClientError; +use types::PartitionError; +use types::SpuId; + +#[derive(Debug)] +pub enum ScServerError { + IoError(StdIoError), + SendError(SendError), + SocketError(KfSocketError), + K8ConfigError(ConfigError), + PartitionError(PartitionError), + K8ClientError(ClientError), + UnknownSpu(SpuId), + SpuCommuncationError(SpuId,KfSocketError), +} + +impl fmt::Display for ScServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::IoError(err) => write!(f, "{}", err), + Self::SendError(err) => write!(f,"{}",err), + Self::SocketError(err) => write!(f,"{}",err), + Self::K8ConfigError(err) => write!(f,"{}",err), + Self::PartitionError(err) => write!(f,"{}",err), + Self::K8ClientError(err) => write!(f,"{}",err), + Self::UnknownSpu(spu) => write!(f,"unknown spu: {}",spu), + Self::SpuCommuncationError(id,err) => write!(f,"spu comm error: {}, {}",id,err) + } + } +} + +impl From for ScServerError { + fn from(error: StdIoError) -> Self { + ScServerError::IoError(error) + } +} + +impl From for ScServerError { + fn from(error: KfSocketError) -> Self { + ScServerError::SocketError(error) + } +} + +impl From for ScServerError { + fn from(error: SendError) -> Self { + ScServerError::SendError(error) + } +} + +impl From for ScServerError { + fn from(error: ConfigError) -> Self { + ScServerError::K8ConfigError(error) + } +} + +impl From for ScServerError { + fn from(error: PartitionError) -> Self { + ScServerError::PartitionError(error) + } +} + +impl From for ScServerError { + fn from(error: ClientError) -> Self { + ScServerError::K8ClientError(error) + } +} diff --git a/sc-server/src/init.rs b/sc-server/src/init.rs new file mode 100644 index 0000000000..6c71c2cacb --- /dev/null +++ b/sc-server/src/init.rs @@ -0,0 +1,123 @@ +//! +//! # Iitialization routines for Streaming Coordinator (SC) +//! +//! All processing engines are hooked-up here. Channels are created and split between sencders +//! and receivers. +//! +use std::sync::Arc; +use log::info; + +use future_helper::run; + +use crate::conn_manager::ConnManager; + +use crate::core::LocalStores; +use crate::core::ShareLocalStores; +use crate::core::WSUpdateService; +use crate::core::WSChangeDispatcher; +use crate::core::spus::SpuController; +use crate::core::topics::TopicController; +use crate::core::partitions::PartitionController; +use crate::cli::parse_cli_or_exit; +use crate::services::create_public_server; +use crate::services::create_internal_server; +use crate::services::InternalApiServer; +use crate::services::PubliApiServer; +use crate::k8::K8WSUpdateService; +use crate::k8::new_shared; +use crate::k8::K8AllChangeDispatcher; +use crate::k8::operator::run_spg_operator; + +pub fn main_loop() { + // parse configuration (program exits on error) + let (sc_config,k8_config) = parse_cli_or_exit(); + + + run( async move { + + // init k8 service + let k8_client = new_shared(k8_config); + + let namespace = sc_config.namespace.clone(); + let local_stores = LocalStores::shared_metadata(sc_config); + + let k8_ws_service = K8WSUpdateService::new(k8_client.clone()); + let mut k8_dispatcher = K8AllChangeDispatcher::new(k8_client.clone(),namespace.clone(),local_stores.clone()); + let (metadata,internal_server) = create_core_services(local_stores,k8_ws_service.clone(),&mut k8_dispatcher); + let public_server = create_k8_services(metadata,k8_ws_service,namespace); + + k8_dispatcher.run(); + let _public_shutdown = public_server.run(); + let _private_shutdown = internal_server.run(); + + println!("Streaming Coordinator started successfully"); + info!("SC started successfully") + }); + +} + +/// essential services which are needed +pub fn create_core_services(local_stores: ShareLocalStores,ws_service: W,ws_dispatcher: &mut D) -> (ShareLocalStores,InternalApiServer) + where W: WSUpdateService + Clone + Sync + Send + 'static, + D: WSChangeDispatcher +{ + + // connect conn manager and controllers + let conn_manager = ConnManager::new_with_local_stores(local_stores.clone()); + let spu_lc_channel = ws_dispatcher.create_spu_channel(); + let topic_spu_channel = ws_dispatcher.create_spu_channel(); + let topic_topic_channel = ws_dispatcher.create_topic_channel(); + let partition_channel = ws_dispatcher.create_partition_channel(); + let partition_spu_channel = ws_dispatcher.create_spu_channel(); + + let shared_conn_manager = Arc::new(conn_manager); + + // start controller + let spu_controller = SpuController::new( + local_stores.clone(), + shared_conn_manager.clone(), + spu_lc_channel, + ws_service.clone()); + + let partiton_controller = PartitionController::new( + local_stores.clone(), + shared_conn_manager.clone(), + partition_channel, + partition_spu_channel, + ws_service.clone() + ); + + let private_server = create_internal_server( + local_stores.clone(), + shared_conn_manager, + spu_controller.conn_sender(), + partiton_controller.lrs_sendr() + ); + + spu_controller.run(); + + let topic_controller = TopicController::new( + local_stores.clone(), + topic_spu_channel, + topic_topic_channel, + ws_service.clone() + ); + topic_controller.run(); + + + partiton_controller.run(); + + + (local_stores,private_server) + +} + +/// k8 specific services +fn create_k8_services(metadata: ShareLocalStores, k8_ws: K8WSUpdateService,namespace: String) -> PubliApiServer { + + // k8 operators + run_spg_operator(k8_ws.own_client(),namespace.clone(),metadata.owned_spus()); + + create_public_server(metadata.clone(),k8_ws,namespace) + +} diff --git a/sc-server/src/k8/k8_dispatcher.rs b/sc-server/src/k8/k8_dispatcher.rs new file mode 100644 index 0000000000..7ba275e7bd --- /dev/null +++ b/sc-server/src/k8/k8_dispatcher.rs @@ -0,0 +1,190 @@ +//! +//! # Kubernetes Dispatcher +//! +//! Dispatcher is the Event Loop, listens to messages form etcd KV store, translates them +//! to actions, and sends them to Streaming Coordinator Workflow for processing. +//! +use std::time::Duration; +use std::sync::Arc; +use std::fmt::Debug; +use std::fmt::Display; + +use futures::future::FutureExt; +use futures::channel::mpsc::Sender; +use futures::select; +use futures::stream::StreamExt; +use futures::sink::SinkExt; +use log::debug; +use log::error; +use log::info; +use log::trace; +use serde::de::DeserializeOwned; + +use utils::actions::Actions; +use types::defaults::SC_RECONCILIATION_INTERVAL_SEC; +use future_helper::spawn; +use future_helper::sleep; +use k8_metadata::core::metadata::K8List; +use k8_metadata::core::metadata::K8Watch; +use k8_metadata::core::Spec as K8Spec; + +use crate::core::common::new_channel; +use crate::core::common::LocalStore; +use crate::core::common::LSChange; +use crate::core::Spec; +use crate::core::WSChangeChannel; +use crate::ScServerError; + +use crate::k8::SharedK8Client; +use crate::k8::k8_events_to_actions::k8_events_to_metadata_actions; +use crate::k8::k8_events_to_actions::k8_event_stream_to_metadata_actions; + + + +/// Sends out Local State Changes by comparing against Cluster state stored in local K8 where SC is running +/// Similar to Kubernetes Shared Informer +pub struct K8ClusterStateDispatcher where S: Spec, S::Status: Debug + PartialEq , S::Key: Debug { + client: SharedK8Client, + metadata: Arc>, + senders: Vec>>>, + namespace: String +} + + +impl K8ClusterStateDispatcher + + where + S: Spec + PartialEq + Debug + Sync + Send + 'static, + S::Status: PartialEq + Debug + Sync + Send + 'static, + S::Key: Display + Debug + Clone + Sync + Send + 'static, + K8Watch::K8Spec as K8Spec>::Status>: DeserializeOwned, + K8List::K8Spec as K8Spec>::Status>: DeserializeOwned, + S::K8Spec: Debug + Sync + Send + 'static , + <::K8Spec as K8Spec>::Status: Debug + Sync + Send + 'static + +{ + pub fn new(namespace: String,client: SharedK8Client,metadata: Arc>) -> Self { + Self { + namespace, + client, + metadata, + senders: vec![], + } + } + + pub fn create_channel(&mut self) -> WSChangeChannel { + let (sender,receiver) = new_channel(); + self.senders.push(sender); + receiver + } + + + pub fn run(self) { + spawn(self.outer_loop()); + } + + async fn outer_loop(mut self) { + info!("starting {} kv dispatcher loop",S::LABEL); + loop { + self.inner_loop().await; + } + } + + /// + /// Kubernetes Dispatcher Event Loop + /// + async fn inner_loop(&mut self) { + let mut resume_stream: Option = None; + + // retrieve all items from K8 store first + match self.retrieve_all_k8_items().await { + Ok(items) => { + resume_stream = Some(items); + } + Err(err) => error!("cannot retrieve K8 store objects: {}", err), + }; + + // create watch streams + let mut k8_stream = self + .client + .watch_stream_since::(&self.namespace, resume_stream) + .fuse(); + + trace!("starting watch stream for: {}",S::LABEL); + loop { + select! { + + _ = (sleep(Duration::from_secs(SC_RECONCILIATION_INTERVAL_SEC))).fuse() => { + debug!("timer fired - kickoff SC reconcillation"); + break; + }, + + k8_result = k8_stream.next() => { + + if let Some(result) = k8_result { + match result { + Ok(auth_token_msgs) => { + let actions = k8_event_stream_to_metadata_actions( + Ok(auth_token_msgs), + &self.metadata, + ); + self.send_actions(actions).await; + } + Err(err) => error!("{}", err), + } + + } else { + debug!("SPU stream terminated, during update auth-token processing... reconnecting"); + break; + } + }, + + } + } + } + + /// + /// Retrieve all items from Kubernetes (K8) store for forward them to processing engine + /// + async fn retrieve_all_k8_items(&mut self) -> Result { + let k8_objects = self + .client + .retrieve_items::(&self.namespace) + .await?; + + self.process_retrieved_items(k8_objects) + .await + } + + /// + /// Convert items into actions and send to Controller dispatcher for processing + /// + async fn process_retrieved_items( + &mut self, + k8_items: K8List::K8Spec as K8Spec>::Status>, + ) -> Result { + + let version = k8_items.metadata.resource_version.clone(); + + debug!("UpdateAll {}",S::LABEL); + + // wait to receive all items before sending to channel + let actions = k8_events_to_metadata_actions(k8_items, &self.metadata)?; + + self.send_actions(actions).await; + + // return versions to the caller + Ok(version) + } + + async fn send_actions(&mut self,actions: Actions>) { + + // for now do serially + trace!("sending {} LS Changes: {} to {} senders",S::LABEL,actions.count(),self.senders.len()); + for sender in &mut self.senders { + if let Err(err) = sender.send(actions.clone()).await { + error!("error sending actions: {:#?}",err); + } + } + } +} diff --git a/sc-server/src/k8/k8_events_to_actions.rs b/sc-server/src/k8/k8_events_to_actions.rs new file mode 100644 index 0000000000..308987fd87 --- /dev/null +++ b/sc-server/src/k8/k8_events_to_actions.rs @@ -0,0 +1,389 @@ +//! +//! # Auth Token Actions +//! +//! Converts Kubernetes Auth-Token events into Auth-Token actions +//! +use std::fmt::Debug; +use std::fmt::Display; + +use log::{error, trace}; +use log::warn; +use log::debug; +use utils::actions::Actions; +use k8_metadata::core::metadata::K8List; +use k8_metadata::core::metadata::K8Obj; +use k8_metadata::core::metadata::K8Watch; +use k8_metadata::core::Spec as K8Spec; +use k8_client::TokenStreamResult; + + +use crate::core::common::KVObject; +use crate::core::common::LSChange; +use crate::core::common::LocalStore; +use crate::core::Spec; +use crate::core::Status; +use crate::ScServerError; + + +/// +/// Translate incoming k8 items into KVInputAction against MemStore which contains local state +/// It only generates KVInputAction if incoming k8 object is different from memstore +/// +/// +pub fn k8_events_to_metadata_actions( + k8_tokens: K8List::Status>, + local_store: &LocalStore, +) -> Result>, ScServerError> + where + S: Spec + PartialEq, + S::Status: Status + PartialEq + Debug , + S::K8Spec: Debug, + S::Key: Clone + Ord + Debug + Display +{ + let (mut add_cnt, mut mod_cnt, mut del_cnt, mut skip_cnt) = (0, 0, 0, 0); + let mut local_names = local_store.all_keys(); + let all = local_store.count(); + let mut actions: Actions> = Actions::default(); + + // loop through items and generate add/mod actions + for k8_obj in k8_tokens.items { + + match k8_obj_to_kv_obj(k8_obj) { + + Ok(new_kv_value) => { + let key = new_kv_value.key_owned(); + if let Some(old_value) = local_store.value(&key) { + // object exists + if old_value == new_kv_value { + skip_cnt += 1; //nothing changed + } else { + // diff + mod_cnt += 1; + debug!("adding {}:{} to local store",S::LABEL,new_kv_value.key()); + local_store.insert(new_kv_value.clone()); + actions.push(LSChange::update(new_kv_value, old_value)); + } + + local_names.retain(|n| *n != key); + } else { + // object doesn't exisit + add_cnt += 1; + local_store.insert(new_kv_value.clone()); + actions.push(LSChange::add(new_kv_value)); + } + }, + Err(err) => { + error!("{}", err); + skip_cnt += 1; + } + } + + } + + // loop through the remaining names and generate delete actions + for name in local_names.into_iter() { + if local_store.contains_key(&name) { + + if let Some(old_value) = local_store.remove(&name) { + del_cnt += 1; + actions.push(LSChange::delete(old_value)); + } else { + skip_cnt += 1; + error!("delete should never fail since key exists: {:#?}",name); + } + + } else { + skip_cnt += 1; + error!("kv unexpectedly removed... skipped {:#?}", name); + } + } + + // log counters + trace!( + "KV {} events => local: {} [add:{}, mod:{}, del:{}, skip:{}]", + S::LABEL, + all, + add_cnt, + mod_cnt, + del_cnt, + skip_cnt + ); + + Ok(actions) +} + +/// +/// Translates K8 events into metadata action. +/// +pub fn k8_event_stream_to_metadata_actions( + stream: TokenStreamResult::Status>, + local_store: &LocalStore +) -> Actions> + where + S: Spec + Debug + PartialEq, + S::Key: Debug + Display + Clone, + S::Status: Debug + PartialEq +{ + + let (mut add_cnt, mut mod_cnt, mut del_cnt, mut skip_cnt) = (0, 0, 0, 0); + let mut actions: Actions> = Actions::default(); + + // loop through items and generate add/mod actions + for token in stream.unwrap() { + match token { + Ok(watch_obj) => match watch_obj { + K8Watch::ADDED(k8_obj) => { + let converted: Result,ScServerError> = k8_obj_to_kv_obj(k8_obj); // help out compiler + match converted { + Ok(new_kv_value) => { + trace!("KV ({}): push ADD action", new_kv_value.key()); + if let Some(old_value) = local_store.insert(new_kv_value.clone()) { + // some old value, check if same as new one, if they are same, no need for action + warn!("detected exist value: {:#?} which sould not exists",old_value); + if old_value == new_kv_value { + trace!("same value as old value, ignoring"); + } else { + trace!("generating update action: {:#?}",new_kv_value.key()); + actions.push(LSChange::update(new_kv_value,old_value)); + mod_cnt += 1; + } + } else { + // no existing value, which should be expected + debug!("adding {}:{} to local store",S::LABEL,new_kv_value.key()); + actions.push(LSChange::add(new_kv_value)); + + add_cnt += 1; + } + }, + Err(err) => { + error!("{}", err); + skip_cnt += 1; + } + } + }, + K8Watch::MODIFIED(k8_obj) => { + let converted: Result,ScServerError> = k8_obj_to_kv_obj(k8_obj); // help out compiler + match converted { + Ok(new_kv_value) => { + + if let Some(old_value) = local_store.insert(new_kv_value.clone()) { + + if old_value == new_kv_value { + // this is unexpected, + warn!("old and new value is same: {:#?}, ignoring",new_kv_value); + } else { + // normal + actions.push(LSChange::update(new_kv_value, old_value)); + mod_cnt += 1; + } + } else { + // doesn't exist, this is then new + warn!("KV ({}) - not found, generating add", new_kv_value.key()); + actions.push(LSChange::add(new_kv_value)); + } + }, + Err(err) => { + error!("{}", err); + skip_cnt += 1; + } + } + + } + K8Watch::DELETED(k8_obj) => { + match k8_obj_to_kv_obj(k8_obj) { + Ok(kv_value) => { + trace!("KV ({}): push DEL action", kv_value.key()); + + // try to delete it + if let Some(_old_value) = local_store.remove(kv_value.key()) { + del_cnt += 1; + actions.push(LSChange::delete(kv_value)); + } else { + skip_cnt += 1; + warn!("delete should never fail since key exists: {}",kv_value.key()); + } + }, + Err(err) => { + error!("{}", err); + skip_cnt += 1; + } + } + } + }, + Err(err) => error!("invalid AuthToken stream token: {} ... (exiting)", err), + } + + } + + // log counters + let all = add_cnt + mod_cnt + del_cnt + skip_cnt; + trace!("K8 Streams {} [all:{}, add:{},mod:{},del:{},ski: {}", + S::LABEL, + all, + add_cnt, + mod_cnt, + del_cnt, + skip_cnt); + + actions +} + +/// +/// Translates K8 object into Sc AuthToken metadata +/// +fn k8_obj_to_kv_obj(k8_obj: K8Obj::Status>) -> Result,ScServerError> + where + S: Spec +{ + S::convert_from_k8(k8_obj).map_err(|err| err.into()) +} + + + +#[cfg(test)] +pub mod test { + + use k8_metadata::topic::TopicSpec as K8TopicSpec; + use k8_metadata::topic::TopicStatus as K8TopicStatus; + use k8_metadata::topic::TopicStatusResolution as K8topicStatusResolution; + use k8_metadata::core::metadata::K8List; + use k8_metadata::core::metadata::K8Obj; + use k8_metadata::core::metadata::K8Watch; + use k8_client::as_token_stream_result; + + //use k8_metadata::core::metadata::K8Watch; + //use k8_metadata::core::Spec as K8Spec; + use crate::core::common::LSChange; + use crate::core::topics::TopicLocalStore; + + use super::k8_events_to_metadata_actions; + use super::k8_event_stream_to_metadata_actions; + use super::k8_obj_to_kv_obj; + + + type TopicList = K8List; + type K8Topic = K8Obj; + type K8TopicWatch = K8Watch; + + #[test] + fn test_check_items_aganst_empty() { + + let mut topics = TopicList::new(); + topics.items.push(K8Topic::new("topic1",K8TopicSpec::default())); + + let topic_store = TopicLocalStore::default(); + + let kv_actions = k8_events_to_metadata_actions(topics,&topic_store).expect("conversion"); + + assert_eq!(kv_actions.count(),1); + let action = kv_actions.iter().next().expect("first"); + match action { + LSChange::Add(new_value) => { + assert_eq!(new_value.key(),"topic1"); + } + _ => assert!(false), + } + topic_store.value(&"topic1".to_owned()).expect("topic1 shoudl exists"); + + } + + #[test] + fn test_check_items_aganst_same() { + + let mut topics = TopicList::new(); + topics.items.push(K8Topic::new("topic1",K8TopicSpec::default())); + + let topic_store = TopicLocalStore::default(); + let topic_kv = k8_obj_to_kv_obj(K8Topic::new("topic1",K8TopicSpec::default())).expect("work"); + topic_store.insert(topic_kv); + + let kv_actions = k8_events_to_metadata_actions(topics,&topic_store).expect("conversion"); + + assert_eq!(kv_actions.count(),0); + } + + #[test] + fn test_items_generate_modify() { + + let mut status = K8TopicStatus::default(); + status.resolution = K8topicStatusResolution::Provisioned; + let new_topic = K8Topic::new("topic1",K8TopicSpec::default()) + .set_status(status); + let old_topic = K8Topic::new("topic1",K8TopicSpec::default()); + + let mut topics = TopicList::new(); + topics.items.push(new_topic.clone()); + + let topic_store = TopicLocalStore::default(); + let old_kv = k8_obj_to_kv_obj(old_topic).expect("conversion"); + topic_store.insert(old_kv.clone()); + + let kv_actions = k8_events_to_metadata_actions(topics,&topic_store).expect("conversion"); + + assert_eq!(kv_actions.count(),1); + let action = kv_actions.iter().next().expect("first"); + match action { + LSChange::Mod(new,old) => { + let new_kv = k8_obj_to_kv_obj(new_topic).expect("conversion"); + assert_eq!(new.key(),new_kv.key()); + assert_eq!(new,&new_kv); + assert_eq!(old,&old_kv); + } + _ => assert!(false), + } + } + + #[test] + fn test_items_delete() { + + let topics = TopicList::new(); + + let topic_store = TopicLocalStore::default(); + let topic_kv = k8_obj_to_kv_obj(K8Topic::new("topic1",K8TopicSpec::default())).expect("work"); + topic_store.insert(topic_kv); + + let kv_actions = k8_events_to_metadata_actions(topics,&topic_store).expect("conversion"); + + assert_eq!(kv_actions.count(),1); + let action = kv_actions.iter().next().expect("first"); + match action { + LSChange::Delete(old_value) => { + assert_eq!(old_value.key(),"topic1"); + } + _ => assert!(false), + } + } + + + + + #[test] + fn test_watch_add_actions() { + + let new_topic = K8Topic::new("topic1",K8TopicSpec::default()) + .set_status(K8TopicStatus::default()); + + + let mut watches = vec![]; + watches.push(K8TopicWatch::ADDED(new_topic.clone())); + + + let topic_store = TopicLocalStore::default(); + + let kv_actions = k8_event_stream_to_metadata_actions(as_token_stream_result(watches),&topic_store); + + assert_eq!(kv_actions.count(),1); + let action = kv_actions.iter().next().expect("first"); + match action { + LSChange::Add(new_value) => { + assert_eq!(new_value.key(),"topic1"); + } + _ => assert!(false), + } + topic_store.value(&"topic1".to_owned()).expect("topic1 shoudl exists"); + + } + + +} + \ No newline at end of file diff --git a/sc-server/src/k8/k8_operations/config_map_ops.rs b/sc-server/src/k8/k8_operations/config_map_ops.rs new file mode 100644 index 0000000000..ad77eb7d5c --- /dev/null +++ b/sc-server/src/k8/k8_operations/config_map_ops.rs @@ -0,0 +1,53 @@ +//! +//! # ConfigMap Key/Value Store Actions +//! +//! Actions for ConfigMap communication with Key Value store. +//! +use std::collections::BTreeMap; + +use log::{debug, trace}; + +use k8_metadata::core::metadata::InputK8Obj; +use k8_metadata::core::metadata::InputObjectMeta; +use k8_metadata::core::Spec; +use k8_client::ConfigMapSpec; + + +use crate::k8::SharedK8Client; +use crate::ScServerError; + +#[allow(dead_code)] +/// Establish connection to K8 and create a new config_map +pub async fn add_config_map( + client: SharedK8Client, + config_map_name: String, + data: BTreeMap, +) -> Result<(), ScServerError> { + debug!( + "apply config_map '{}' with {} entries", + config_map_name, + data.len() + ); + + + let new_map: InputK8Obj = InputK8Obj { + api_version: ConfigMapSpec::api_version(), + kind: ConfigMapSpec::kind(), + metadata: InputObjectMeta { + name: config_map_name, + namespace: "default".to_string(), + ..Default::default() + }, + data, + ..Default::default() + }; + + trace!("send create config_map to K8 {:#?}", &new_map); + + client + .apply::(new_map) + .await?; + + + Ok(()) +} diff --git a/sc-server/src/k8/k8_operations/mod.rs b/sc-server/src/k8/k8_operations/mod.rs new file mode 100644 index 0000000000..08cc36e4fb --- /dev/null +++ b/sc-server/src/k8/k8_operations/mod.rs @@ -0,0 +1,5 @@ + +mod config_map_ops; +// mod secret_ops; + +pub use self::config_map_ops::add_config_map; diff --git a/sc-server/src/k8/k8_ws_service.rs b/sc-server/src/k8/k8_ws_service.rs new file mode 100644 index 0000000000..f15dc3fac8 --- /dev/null +++ b/sc-server/src/k8/k8_ws_service.rs @@ -0,0 +1,247 @@ +//! +//! # Update KV Store with SPU status (online/offline) +//! +use std::fmt::Debug; +use std::fmt::Display; +use std::convert::Into; +use std::io::Error as IoError; +use std::io::ErrorKind; + + +use futures::future::BoxFuture; +use futures::future::FutureExt; +use log::trace; +use log::warn; +use log::debug; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use metadata::topic::TopicSpec; +use metadata::partition::PartitionSpec; +use metadata::spu::SpuSpec; +use k8_metadata::core::metadata::InputK8Obj; + +use types::log_on_err; + +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::UpdateK8ObjStatus; +use k8_client::K8Client; + +use crate::ScServerError; +use crate::core::Spec; +use crate::core::common::KVObject; +use crate::core::WSUpdateService; +use crate::core::common::WSAction; +use super::SharedK8Client; + +#[derive(Clone)] +pub struct K8WSUpdateService(SharedK8Client); + + + +impl K8WSUpdateService { + + pub fn new(client: SharedK8Client) -> Self { + Self(client) + } + + + pub fn client(&self) -> &K8Client { + &self.0 + } + + pub fn own_client(&self) -> SharedK8Client { + self.0.clone() + } + + + pub async fn add( + &self, + value: KVObject, + ) -> Result<(), ScServerError> + where S: Spec + Debug, + S::Status: Debug + PartialEq, + S::Key: Display + Debug , + ::K8Spec: Debug + From + Default + DeserializeOwned + Serialize + Clone , + <::K8Spec as K8Spec>::Status: Default + Debug + DeserializeOwned + Serialize + Clone + + { + + debug!("Adding: {}:{}",S::LABEL,value.key()); + trace!("adding KV {:#?} to k8 kv", value); + + let (key, spec,kv_ctx) = value.parts(); + let k8_spec: S::K8Spec = spec.into(); + + if let Some(item_ctx) = kv_ctx.item_ctx { + + let new_k8 = InputK8Obj::new(k8_spec,item_ctx.into()); + + self.0 + .apply(new_k8) + .await.map(|_| ()).map_err(|err|err.into()) + + } else if let Some(ref parent_metadata) = kv_ctx.parent_ctx { + + let item_name = key.to_string(); + + let new_k8 = InputK8Obj::new(k8_spec,parent_metadata.make_child_input_metadata::<<::Owner as Spec>::K8Spec>(item_name)); + + self.0 + .apply(new_k8) + .await.map(|_| ()).map_err(|err| err.into()) + } else { + Err(IoError::new( + ErrorKind::Other, + format!("{} add failed - no item or context {}",S::LABEL,key) + ).into()) + } + + + } + + /// only update the status + async fn update_status( + &self, + value: KVObject, + ) -> Result<(), ScServerError> + where S: Spec + Debug, + S::Key: Debug + Display, + S::Status: Debug + Display + Into< <::K8Spec as K8Spec>::Status>, + ::K8Spec: Debug + Default + Serialize + DeserializeOwned, + <::K8Spec as K8Spec>::Status: Default + Debug + Serialize + DeserializeOwned + + { + + debug!("K8 Update Status: {} key: {} value: {}",S::LABEL,value.key(),value.status); + trace!("status update: {:#?}",value.status); + + let k8_status: <::K8Spec as K8Spec>::Status = value.status().clone().into(); + + if let Some(ref kv_ctx) = value.kv_ctx().item_ctx { + + let k8_input: UpdateK8ObjStatus::K8Spec as K8Spec>::Status> = UpdateK8ObjStatus { + api_version: S::K8Spec::api_version(), + kind: S::K8Spec::kind(), + metadata: kv_ctx.clone().into(), + status: k8_status, + ..Default::default() + }; + + + self.0 + .update_status(&k8_input) + .await + .map(|_| ()) + .map_err(|err| err.into()) + } else { + Err(IoError::new( + ErrorKind::Other, + "KVS update failed - missing KV ctx".to_owned(), + ).into()) + } + + + } + + /// update both spec and status + async fn update_spec( + &self, + value: KVObject, + ) -> Result<(), ScServerError> + where + S: Spec + Debug + Into<::K8Spec>, + S::Key: Debug + Display , + S::Status: Debug + Into< <::K8Spec as K8Spec>::Status>, + ::K8Spec: Debug + Default + Serialize + DeserializeOwned + Clone, + <::K8Spec as K8Spec>::Status: Default + Debug + Serialize + DeserializeOwned + Clone + + { + + debug!("K8 Update Spec: {} key: {}",S::LABEL,value.key()); + trace!("K8 Update Spec: {:#?}",value); + let k8_spec: ::K8Spec = value.spec().clone().into(); + + if let Some(ref kv_ctx) = value.kv_ctx().item_ctx { + + trace!("updating spec: {:#?}",k8_spec); + + let k8_input: InputK8Obj = InputK8Obj { + api_version: S::K8Spec::api_version(), + kind: S::K8Spec::kind(), + metadata: kv_ctx.clone().into(), + spec: k8_spec, + ..Default::default() + }; + + + self.0 + .apply(k8_input) + .await + .map(|_| ()) + .map_err(|err| err.into()) + } else { + Err(IoError::new( + ErrorKind::Other, + "KVS update failed - missing KV ctx".to_owned(), + ).into()) + } + + + } + + + async fn inner_process(&self,action: WSAction) -> Result<(), ScServerError> + + where + S: Spec + Debug, + S::Key: Display + Debug , + S::Status: Debug + PartialEq + Display, + ::K8Spec: From + Clone + Debug + Default + Serialize + DeserializeOwned , + <::K8Spec as K8Spec>::Status: From + Clone + Default + Debug + Serialize + DeserializeOwned + + { + + match action { + WSAction::Add(value) => log_on_err!(self.add(value).await), + WSAction::UpdateStatus(value) => log_on_err!(self.update_status(value).await), + WSAction::UpdateSpec(value) => log_on_err!(self.update_spec(value).await), + WSAction::Delete(_key) => warn!("delete not yet implemente") + } + + Ok(()) + } +} + +impl WSUpdateService for K8WSUpdateService { + + type ResponseFuture = BoxFuture<'static, Result<(), ScServerError>>; + + fn update_spu(&self,ws_actions: WSAction) -> Self::ResponseFuture { + + let service = self.clone(); + async move { + service.inner_process(ws_actions).await?; + Ok(()) + }.boxed() + } + + fn update_topic(&self,ws_actions: WSAction) -> Self::ResponseFuture { + + let service = self.clone(); + async move { + service.inner_process(ws_actions).await?; + Ok(()) + }.boxed() + } + + fn update_partition(&self,ws_actions: WSAction) -> Self::ResponseFuture { + + let service = self.clone(); + async move { + service.inner_process(ws_actions).await?; + Ok(()) + }.boxed() + } + +} \ No newline at end of file diff --git a/sc-server/src/k8/mod.rs b/sc-server/src/k8/mod.rs new file mode 100644 index 0000000000..7020faf30d --- /dev/null +++ b/sc-server/src/k8/mod.rs @@ -0,0 +1,80 @@ + +pub mod operator; +pub mod k8_operations; +pub mod k8_events_to_actions; +mod k8_dispatcher; +mod k8_ws_service; + +use std::sync::Arc; +use std::convert::TryInto; +use std::convert::TryFrom; +use std::fmt::Display; +use std::fmt::Debug; +use std::io::Error as IoError; +use std::io::ErrorKind; + +use k8_client::K8Client; +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::K8Obj; +use k8_config::K8Config; + +use crate::core::common::KVObject; +use crate::core::common::KvContext; +use crate::core::Spec; + +pub use k8_ws_service::K8WSUpdateService; +pub use k8_dispatcher::K8ClusterStateDispatcher; +pub use operator::K8AllChangeDispatcher; + +pub type SharedK8Client = Arc; + + +pub fn new_shared(config: K8Config) -> SharedK8Client { + let client= K8Client::new(config).expect("Error: K8 client failed to initialize!"); + Arc::new(client) +} + + + + pub fn default_convert_from_k8(k8_obj: K8Obj::Status>) -> + Result,IoError> + + where + S: Spec, + ::Status : Into, + S::K8Spec: Into, + S::Key: TryFrom + Display, + <::Key as TryFrom>::Error: Debug + { + let k8_name = k8_obj.metadata.name.clone(); + let result: Result = k8_name.try_into(); + match result { + Ok(key) => { + + // convert K8 Spec/Status into Metadata Spec/Status + let local_spec = k8_obj.spec.into(); + let local_status = if let Some(status) = k8_obj.status { + status.into() + } else { + ::default() + }; + + // grab KV ctx and create AuthToken + let ctx = KvContext::default().with_ctx(k8_obj.metadata); + let loca_kv = KVObject::new(key,local_spec, local_status).with_kv_ctx(ctx); + + Ok(loca_kv) + + }, + Err(err) => { + Err(IoError::new( + ErrorKind::InvalidData, + format!( + "error converting key: {:#?}", + err) + )) + } + } + +} + diff --git a/sc-server/src/k8/operator/conversion.rs b/sc-server/src/k8/operator/conversion.rs new file mode 100644 index 0000000000..7bdafcc364 --- /dev/null +++ b/sc-server/src/k8/operator/conversion.rs @@ -0,0 +1,168 @@ +/// convert clust spec to statefulset for input +use std::collections::HashMap; + +use k8_metadata::core::metadata::InputK8Obj; +use k8_metadata::core::metadata::InputObjectMeta; +use k8_metadata::core::metadata::Env; +use k8_metadata::core::metadata::ObjectMeta; +use k8_metadata::core::metadata::TemplateMeta; +use k8_metadata::core::metadata::LabelSelector; +use k8_metadata::core::metadata::TemplateSpec; +use k8_metadata::core::metadata::LabelProvider; +use k8_client::StatefulSetSpec; +use k8_client::VolumeAccessMode; +use k8_client::ContainerSpec; +use k8_client::ContainerPortSpec; +use k8_client::PodSpec; +use k8_client::VolumeMount; +use k8_client::ResourceRequirements; +use k8_client::VolumeRequest; +use k8_client::PersistentVolumeClaim; +use k8_client::ServiceSpec; +use k8_client::ServicePort; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::core::Spec; +use types::defaults::SPU_DEFAULT_NAME; +use types::defaults::SPU_PUBLIC_PORT; +use types::defaults::SPU_PRIVATE_PORT; +use types::defaults::PRODUCT_NAME; +use types::defaults::IMAGE_NAME; +use types::defaults::FLV_LOG_BASE_DIR; +use types::defaults::FLV_LOG_SIZE; + +/// convert SpuGroup to Statefulset +pub fn convert_cluster_to_statefulset( + group_spec: &SpuGroupSpec, + metadata: &ObjectMeta, + group_name: &str, + namespace: &str) + -> InputK8Obj +{ + + let statefulset_name = group_name.to_owned(); + let spec = generate_stateful(group_spec, group_name,namespace); + let owner_ref = metadata.make_owner_reference::(); + + InputK8Obj { + api_version: StatefulSetSpec::api_version(), + kind: StatefulSetSpec::kind(), + metadata: InputObjectMeta { + name: statefulset_name.clone(), + namespace: metadata.namespace().to_string(), + owner_references: vec![owner_ref], + ..Default::default() + }, + spec, + ..Default::default() + } + +} + +/// generate statefulset spec from cluster spec +fn generate_stateful(spg_spec: &SpuGroupSpec,name: &str,namespace: &str) -> StatefulSetSpec { + + let replicas = spg_spec.replicas; + let spg_template = &spg_spec.template.spec; + let mut public_port = ContainerPortSpec { + container_port: spg_template.public_endpoint.as_ref().map(|t| t.port).unwrap_or(SPU_PUBLIC_PORT), + ..Default::default() + }; + public_port.name = Some("public".to_owned()); + + let mut private_port = ContainerPortSpec { + container_port: spg_template.private_endpoint.as_ref().map(|t|t.port).unwrap_or(SPU_PRIVATE_PORT), + ..Default::default() + }; + private_port.name = Some("private".to_owned()); + + // storage is special because defaults are explicit. + let storage = spg_spec.template.spec.storage.clone().unwrap_or_default(); + let size = storage.size(); + let mut env = vec![ + Env::key_field_ref("SPU_INDEX", "metadata.name"), + Env::key_value("FLV_SC_PRIVATE_HOST",&format!("flv-sc-internal.{}.svc.cluster.local",namespace)), + Env::key_value("SPU_MIN", &format!("{}",spg_spec.min_id())), + Env::key_value(FLV_LOG_BASE_DIR,&storage.log_dir()), + Env::key_value(FLV_LOG_SIZE, &size) + ]; + + env.append(&mut spg_template.env.clone()); + + let template = TemplateSpec { + metadata: Some(TemplateMeta::default().set_labels( + vec![ + ("app", SPU_DEFAULT_NAME), + ("group",name) + ])), + spec: PodSpec { + termination_grace_period_seconds: Some(10), + containers: vec![ContainerSpec { + name: SPU_DEFAULT_NAME.to_owned(), + image: Some(format!("{}:0.1-alpha", IMAGE_NAME)), + ports: vec![public_port, private_port], + volume_mounts: vec![VolumeMount { + name: "data".to_owned(), + mount_path: format!("/var/lib/{}/data", PRODUCT_NAME), + ..Default::default() + }], + env: Some(env), + ..Default::default() + }], + ..Default::default() + }, + }; + let claim = PersistentVolumeClaim { + access_modes: vec![VolumeAccessMode::ReadWriteOnce], + storage_class_name: format!("{}-{}", PRODUCT_NAME, SPU_DEFAULT_NAME), + resources: ResourceRequirements { + requests: VolumeRequest { + storage: size + }, + }, + }; + + StatefulSetSpec { + replicas: Some(replicas), + service_name: name.to_owned(), + selector: LabelSelector::new_labels(vec![ + ("app", SPU_DEFAULT_NAME), + ("group",name) + ]), + template, + volume_claim_templates: vec![TemplateSpec { + spec: claim, + metadata: Some(TemplateMeta::named("data")), + }], + ..Default::default() + } +} + + +pub fn generate_service(spg: &SpuGroupSpec,name: &str) -> ServiceSpec { + + let spg_template = &spg.template.spec; + let mut public_port = ServicePort { + port: spg_template.public_endpoint.as_ref().map(|t|t.port).unwrap_or(SPU_PUBLIC_PORT), + ..Default::default() + }; + + public_port.name = Some("public".to_owned()); + let mut private_port = ServicePort { + port: spg_template.private_endpoint.as_ref().map(|t|t.port).unwrap_or(SPU_PRIVATE_PORT), + ..Default::default() + }; + private_port.name = Some("private".to_owned()); + + + let mut selector = HashMap::new(); + selector.insert("app".to_owned(), SPU_DEFAULT_NAME.to_owned()); + selector.insert("group".to_owned(),name.to_owned()); + + ServiceSpec { + cluster_ip: "None".to_owned(), + ports: vec![public_port, private_port], + selector: Some(selector), + ..Default::default() + } + +} \ No newline at end of file diff --git a/sc-server/src/k8/operator/mod.rs b/sc-server/src/k8/operator/mod.rs new file mode 100644 index 0000000000..7d8652f62c --- /dev/null +++ b/sc-server/src/k8/operator/mod.rs @@ -0,0 +1,75 @@ +mod spg_operator; +mod conversion; +mod spg_group; + +use metadata::spu::SpuSpec; +use metadata::topic::TopicSpec; +use metadata::partition::PartitionSpec; + +use crate::core::WSChangeDispatcher; +use crate::core::WSChangeChannel; +use crate::k8::SharedK8Client; +use crate::core::ShareLocalStores; +use crate::core::spus::K8SpuChangeDispatcher; +use crate::core::spus::SharedSpuLocalStore; +use crate::core::partitions::K8PartitionChangeDispatcher; +use crate::core::topics::K8TopicChangeDispatcher; +use spg_operator::SpgOperator; + +use self::conversion::convert_cluster_to_statefulset; +use self::conversion::generate_service; +use self::spg_group::SpuGroupObj; +use self::spg_group::SpuValidation; + +pub struct K8AllChangeDispatcher { + spu: K8SpuChangeDispatcher, + topic: K8TopicChangeDispatcher, + partition: K8PartitionChangeDispatcher +} + +impl K8AllChangeDispatcher { + + pub fn new(client: SharedK8Client,namespace: String,local_stores: ShareLocalStores) -> Self { + + Self { + spu: K8SpuChangeDispatcher::new(namespace.clone(),client.clone(),local_stores.spus().clone()), + topic: K8TopicChangeDispatcher::new(namespace.clone(),client.clone(),local_stores.topics().clone()), + partition: K8PartitionChangeDispatcher::new(namespace.clone(),client.clone(),local_stores.partitions().clone()) + } + } + + pub fn run(self) { + self.spu.run(); + self.topic.run(); + self.partition.run(); + } + + +} + + +impl WSChangeDispatcher for K8AllChangeDispatcher { + + fn create_spu_channel(&mut self) -> WSChangeChannel { + self.spu.create_channel() + } + + fn create_topic_channel(&mut self) -> WSChangeChannel { + self.topic.create_channel() + } + + fn create_partition_channel(&mut self) -> WSChangeChannel{ + self.partition.create_channel() + } +} + + + +pub fn run_spg_operator( + client: SharedK8Client, + namespace: String, + spu_store: SharedSpuLocalStore +) { + SpgOperator::new(client,namespace,spu_store).run(); +} + diff --git a/sc-server/src/k8/operator/spg_group.rs b/sc-server/src/k8/operator/spg_group.rs new file mode 100644 index 0000000000..846b3a9265 --- /dev/null +++ b/sc-server/src/k8/operator/spg_group.rs @@ -0,0 +1,38 @@ +use k8_metadata::core::metadata::K8Obj; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::spg::SpuGroupStatus; +use types::SpuId; + +use crate::core::spus::SpuLocalStore; + +pub type SpuGroupObj = K8Obj; + +/// need for adding SPG extensions +pub trait SpuValidation { + fn is_already_valid(&self) -> bool; + fn is_conflict_with(&self, spu_store: &SpuLocalStore) -> Option; +} + +impl SpuValidation for SpuGroupObj { + + /// check if I am already been validated + fn is_already_valid(&self) -> bool { + self.status.as_ref().map(|status| status.is_already_valid()).unwrap_or(false) + } + + /// check if my group's id is conflict with my spu local store + fn is_conflict_with(&self, spu_store: &SpuLocalStore) -> Option { + + if self.is_already_valid() { + return None; + } + + let min_id = self.spec.min_id() as SpuId; + + spu_store.is_conflict( + &self.metadata.uid, + min_id, + min_id + self.spec.replicas as SpuId, + ) + } +} diff --git a/sc-server/src/k8/operator/spg_operator.rs b/sc-server/src/k8/operator/spg_operator.rs new file mode 100644 index 0000000000..c98d6bf12b --- /dev/null +++ b/sc-server/src/k8/operator/spg_operator.rs @@ -0,0 +1,390 @@ + +use futures::stream::StreamExt; +use log::debug; +use log::error; +use log::info; +use log::trace; +use log::warn; +use std::collections::HashMap; + +use future_helper::spawn; +use k8_client::ClientError; +use k8_metadata::core::metadata::InputK8Obj; +use k8_metadata::core::metadata::InputObjectMeta; +use k8_metadata::core::metadata::K8Watch; +use k8_client::ApplyResult; +use k8_client::ServiceSpec; +use k8_client::ExternalTrafficPolicy; +use k8_client::LoadBalancerType; +use k8_client::ServicePort; +use k8_client::ServiceStatus; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::spg::SpuGroupStatus; +use k8_metadata::spg::SpuEndpointTemplate; +use k8_metadata::core::Spec; +use k8_metadata::spu::SpuSpec as K8SpuSpec; +use k8_metadata::spu::SpuType as K8SpuType; +use k8_metadata::spu::Endpoint as K8Endpoint; +use types::defaults::SPU_PUBLIC_PORT; +use types::defaults::SPU_DEFAULT_NAME; +use types::SpuId; + +use crate::k8::SharedK8Client; +use crate::core::spus::SharedSpuLocalStore; + +use super::convert_cluster_to_statefulset; +use super::generate_service; +use super::SpuGroupObj; +use super::SpuValidation; + +pub struct SpgOperator { + client: SharedK8Client, + spu_store: SharedSpuLocalStore, + namespace: String +} + +impl SpgOperator { + + pub fn new(client: SharedK8Client,namespace: String,spu_store: SharedSpuLocalStore) -> Self { + Self { + client, + namespace, + spu_store + } + } + + pub fn run(self) { + spawn(self.inner_run()) + } + + async fn inner_run(self) { + + let mut spg_stream = self.client.watch_stream_since::(&self.namespace, None); + + info!("start cluster operation with namespace: {}",self.namespace); + while let Some(result) = spg_stream.next().await { + match result { + Ok(events) => { + self.dispatch_events(events).await; + } + Err(err) => error!("error occurred during watch: {}", err), + } + } + + debug!("cluster dispatch finished"); + } + + + async fn dispatch_events(&self,events: Vec, ClientError>>) { + for event_r in events { + match event_r { + Ok(watch_event) => { + let result = self.process_event(watch_event).await; + match result { + Err(err) => error!("error proessing event: {}", err), + _ => {} + } + } + Err(err) => error!("error in watch item: {}", err), + } + } + } + + async fn process_event(&self,event: K8Watch) -> Result<(), ClientError> { + trace!("watch event: {:#?}", event); + match event { + K8Watch::ADDED(obj) => { + debug!("watch: ADD event -> apply"); + self.apply_spg_changes(obj).await + } + K8Watch::MODIFIED(obj) => { + debug!("watch: MOD event -> apply"); + self.apply_spg_changes(obj).await + } + K8Watch::DELETED(_) => { + debug!("RCVD watch item DEL event -> deleted"); + Ok(()) + } + } + } + + async fn apply_spg_changes(&self, + spu_group: SpuGroupObj, + ) -> Result<(), ClientError> { + + let spg_name = spu_group.metadata.name.as_ref(); + + let spg_spec = &spu_group.spec; + + if let Some(conflict_id) = spu_group.is_conflict_with(&self.spu_store) { + + warn!("spg group: {} is conflict with existing id: {}",spg_name,conflict_id); + let status = SpuGroupStatus::invalid(format!("conflict with: {}",conflict_id)); + + let k8_status_change = spu_group.as_status_update(status); + if let Err(err) = self.client + .update_status(&k8_status_change) + .await { + error!("error: {} updating status: {:#?}",err,k8_status_change) + } + + } else { + + // if we pass this stage, then we reserved id. + if !spu_group.is_already_valid() { + let status_change = spu_group.as_status_update(SpuGroupStatus::reserved()); + if let Err(err) = self.client + .update_status(&status_change) + .await { + error!("error: {} updating status: {:#?}",err,status_change) + } + } + + + // ensure we have service for statefulset + if let Err(err) = self.apply_statefulset_service(&spu_group, spg_spec, &spg_name).await { + error!("cluster '{}': error applying services: {}", spg_name, err); + } + + if let Err(err) = self.apply_stateful_set(&spu_group, spg_spec, &spg_name).await { + error!("cluster '{}': error applying stateful sets: {}", spg_name, err); + } + if let Err(err) = self.apply_spus(&spu_group, spg_spec, &spg_name).await { + error!("cluster '{}': error applying spus: {}", spg_name, err); + } + } + + + Ok(()) + } + + + + /// Generate and apply a stateful set for this cluster + async fn apply_stateful_set( + &self, + spu_group: &SpuGroupObj, + spg_spec: &SpuGroupSpec, + spg_name: &str, + ) -> Result<(), ClientError> { + + let input_stateful = convert_cluster_to_statefulset(spg_spec,&spu_group.metadata,spg_name,&self.namespace); + + debug!( + "cluster '{}': apply statefulset '{}' changes", + spg_name, + input_stateful.metadata.name, + ); + + self.client.apply(input_stateful).await?; + + Ok(()) + } + + + /// create SPU crd objects from cluster spec + async fn apply_spus( + &self, + spg_obj: &SpuGroupObj, + spg_spec: &SpuGroupSpec, + spg_name: &str, + ) -> Result<(), ClientError> { + + let replicas = spg_spec.replicas; + + // for each spu, we generate SPU,Extenal servic4qes + for i in 0..replicas { + let spu_id = match self.compute_spu_id(spg_spec.min_id(), i) { + Ok(id) => id, + Err(err) => { + error!("{}", err); + continue; + } + }; + + let spu_name = format!("{}-{}", spg_name,i); + debug!("generating spu with name: {}",spu_name); + + if let Err(err) = self.apply_spu_load_balancers( + spg_obj, + spg_spec, + &spu_name).await { + error!("error trying to create load balancer for spu: {}",err); + } + + self.apply_spu(spg_obj, spg_spec, spg_name, &spu_name, i, spu_id).await; + + + } + + Ok(()) + } + + + + + + /// create SPU crd objects from cluster spec + async fn apply_spu( + &self, + k8_group: &SpuGroupObj, + group_spec: &SpuGroupSpec, + group_name: &str, + spu_name: &str, + _replica_index: u16, + spu_id: SpuId, + ) { + let k8_metadata = &k8_group.metadata; + let k8_namespace = k8_metadata.namespace(); + let spu_template = &group_spec.template.spec; + + let spu_private_ep = if let Some(ref ep) = &spu_template.private_endpoint { + ep.clone() + } else { + SpuEndpointTemplate::default_private() + }; + let spu_public_ep = if let Some(ref ep) = &spu_template.public_endpoint { + ep.clone() + } else { + SpuEndpointTemplate::default_public() + }; + + let spu_spec = K8SpuSpec { + spu_id: spu_id, + spu_type: Some(K8SpuType::Managed), + public_endpoint: K8Endpoint { + host: format!("{}.{}.svc.cluster.local", spu_name, k8_namespace), + port: spu_public_ep.port, + encryption: spu_public_ep.encryption, + }, + private_endpoint: K8Endpoint { + host: format!("{}.{}",spu_name,group_name), + port: spu_private_ep.port, + encryption: spu_private_ep.encryption, + }, + rack: None, + }; + + let owner_ref = k8_metadata.make_owner_reference::(); + let input_spu: InputK8Obj = InputK8Obj { + api_version: K8SpuSpec::api_version(), + kind: K8SpuSpec::kind(), + metadata: InputObjectMeta { + name: spu_name.to_string(), + namespace: k8_namespace.to_owned(), + owner_references: vec![owner_ref], + ..Default::default() + }, + spec: spu_spec, + ..Default::default() + }; + + debug!("spu '{}': apply changes", spu_name); + + if let Err(err) = self.client.apply(input_spu).await { + error!("spu '{}': {}", spu_name, err); + } + } + + + + /// create external load balancer for each SPU + async fn apply_spu_load_balancers( + &self, + spg_obj: &SpuGroupObj, + spg_spec: &SpuGroupSpec, + spg_name: &str, + ) -> Result, ClientError> { + let metadata = &spg_obj.metadata; + + let spu_template = &spg_spec.template.spec; + let mut public_port = ServicePort { + port: spu_template.public_endpoint.as_ref().map(|t| t.port).unwrap_or(SPU_PUBLIC_PORT), + ..Default::default() + }; + public_port.target_port = Some(public_port.port); + + let mut selector = HashMap::new(); + selector.insert("statefulset.kubernetes.io/pod-name".to_owned(), spg_name.to_owned()); + + let service_spec = ServiceSpec { + r#type: Some(LoadBalancerType::LoadBalancer), + external_traffic_policy: Some(ExternalTrafficPolicy::Local), + selector: Some(selector), + ports: vec![public_port.clone()], + ..Default::default() + }; + + let owner_ref = metadata.make_owner_reference::(); + + let input_service: InputK8Obj = InputK8Obj { + api_version: ServiceSpec::api_version(), + kind: ServiceSpec::kind(), + metadata: InputObjectMeta { + name: spg_name.to_owned(), + namespace: metadata.namespace().to_string(), + owner_references: vec![owner_ref], + ..Default::default() + }, + spec: service_spec, + ..Default::default() + }; + + debug!("spu '{}': enable external services", spg_name); + + self.client.apply(input_service).await + } + + + + async fn apply_statefulset_service( + &self, + spg_obj: &SpuGroupObj, + spg_spec: &SpuGroupSpec, + spg_name: &str, + ) -> Result<(), ClientError> { + + let service_name = spg_name.to_owned(); + let service_spec = generate_service(spg_spec,spg_name); + let metadata = &spg_obj.metadata; + let owner_ref = metadata.make_owner_reference::(); + + let mut labels = HashMap::new(); + labels.insert("app".to_owned(), SPU_DEFAULT_NAME.to_owned()); + labels.insert("group".to_owned(),spg_name.to_owned()); + + + let input_service: InputK8Obj = InputK8Obj { + api_version: ServiceSpec::api_version(), + kind: ServiceSpec::kind(), + metadata: InputObjectMeta { + name: service_name.clone(), + namespace: metadata.namespace().to_string(), + labels, + owner_references: vec![owner_ref], + ..Default::default() + }, + spec: service_spec, + ..Default::default() + }; + + debug!( + "spg '{}': apply service '{}' changes", + spg_name, + service_name, + ); + + self.client.apply(input_service).await?; + + Ok(()) + } + + /// compute spu id with min_id as base + fn compute_spu_id( + &self, + min_id: i32, + replica_index: u16, + ) -> Result { + Ok(replica_index as i32 + min_id) + } +} \ No newline at end of file diff --git a/sc-server/src/lib.rs b/sc-server/src/lib.rs new file mode 100644 index 0000000000..3525f14cfc --- /dev/null +++ b/sc-server/src/lib.rs @@ -0,0 +1,22 @@ +#![feature(trace_macros, generators,specialization)] +#![recursion_limit = "512"] + +mod cli; +mod conn_manager; +mod core; +//mod hc_manager; +mod init; +mod services; +mod k8; +mod error; + +//#[cfg(test)] +//mod tests; +pub use init::create_core_services; +pub use self::error::ScServerError; + +// start controller services +pub fn start_main() { + utils::init_logger(); + init::main_loop(); +} diff --git a/sc-server/src/main.rs b/sc-server/src/main.rs new file mode 100644 index 0000000000..eaf1f174bc --- /dev/null +++ b/sc-server/src/main.rs @@ -0,0 +1,5 @@ +use sc_server::start_main; + +fn main() { + start_main(); +} diff --git a/sc-server/src/services/mod.rs b/sc-server/src/services/mod.rs new file mode 100644 index 0000000000..c6fdd52383 --- /dev/null +++ b/sc-server/src/services/mod.rs @@ -0,0 +1,9 @@ +// pub mod send_channels; + +mod public_api; +mod private_api; + +pub use public_api::create_public_server; +pub use public_api::PubliApiServer; +pub use private_api::create_internal_server; +pub use private_api::InternalApiServer; diff --git a/sc-server/src/services/private_api/internal_context.rs b/sc-server/src/services/private_api/internal_context.rs new file mode 100644 index 0000000000..dd5211ae9f --- /dev/null +++ b/sc-server/src/services/private_api/internal_context.rs @@ -0,0 +1,79 @@ + +use log::error; +use log::debug; +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; + + +use types::SpuId; +use kf_socket::KfSink; +use internal_api::UpdateLrsRequest; + +use crate::core::ShareLocalStores; +use crate::conn_manager::SharedConnManager; +use crate::conn_manager::ConnParams; +use crate::conn_manager::SpuConnectionStatusChange; + +/// Context used by Private API Server +pub struct InternalContext +{ + pub local_stores: ShareLocalStores, + conn_mgr: SharedConnManager, + conn_status_sender: Sender, + lrs_sender: Sender +} + + +impl InternalContext { + + pub fn new( + local_stores: ShareLocalStores, + conn_mgr: SharedConnManager, + conn_status_sender: Sender, + lrs_sender: Sender + ) -> Self { + Self { + local_stores, + conn_mgr, + conn_status_sender, + lrs_sender + } + } + + /// send connection status to all receivers + async fn send_state_to_sender(&self,state: SpuConnectionStatusChange) { + + let mut sender = self.conn_status_sender.clone(); + if let Err(err) = sender.send(state).await { + error!("error sending connection state to sender: {:#?}",err); + } + } + + pub async fn send_lrs_to_sender(&self, lrs: UpdateLrsRequest) { + let mut sender = self.lrs_sender.clone(); + if let Err(err) = sender.send(lrs).await { + error!("error sending lrs state to sender: {:#?}",err); + } + } + + /// Register new sink + /// true if successfully register + pub async fn register_sink(&self, spu_id: SpuId, sink: KfSink, param: ConnParams) { + + self.conn_mgr.register_sink(spu_id,sink,param).await; + debug!("Successfully registered SPU {}",spu_id); + self.send_state_to_sender(SpuConnectionStatusChange::On(spu_id)).await; + } + + /// Unregist sink. This happens when connection when down + pub async fn clear_sink(&self,spu_id: &SpuId) { + self.conn_mgr.clear_sink(spu_id).await; + debug!("removing socket sink for spu: {}",spu_id); + self.send_state_to_sender(SpuConnectionStatusChange::Off(*spu_id)).await; + } + + pub fn validate_spu(&self, spu_id: &SpuId) -> bool { + self.conn_mgr.validate_spu(spu_id) + } + +} \ No newline at end of file diff --git a/sc-server/src/services/private_api/mod.rs b/sc-server/src/services/private_api/mod.rs new file mode 100644 index 0000000000..b09d2bdede --- /dev/null +++ b/sc-server/src/services/private_api/mod.rs @@ -0,0 +1,43 @@ +mod private_server; +mod internal_context; + +pub use internal_context::InternalContext; + +use std::sync::Arc; + +use log::info; +use futures::channel::mpsc::Sender; + +use private_server::ScInternalService; +use internal_api::InternalScKey; +use internal_api::InternalScRequest; +use internal_api::UpdateLrsRequest; +use kf_service::KfApiServer; + +use crate::core::ShareLocalStores; +use crate::conn_manager::SharedConnManager; +use crate::conn_manager::SpuConnectionStatusChange; + +pub type SharedInternalContext = Arc; + +pub type InternalApiServer = KfApiServer; + +// start server +pub fn create_internal_server( + local_stores: ShareLocalStores, + conn_mgr: SharedConnManager, + conn_status_sender: Sender, + lrs_sender: Sender, +) -> InternalApiServer +{ + let addr = local_stores.config().private_endpoint.addr.clone(); + let ctx = InternalContext::new( + local_stores, + conn_mgr, + conn_status_sender, + lrs_sender + ); + info!("SC: starting internal services at: {}", addr); + + KfApiServer::new(addr, Arc::new(ctx), ScInternalService::new()) +} diff --git a/sc-server/src/services/private_api/private_server.rs b/sc-server/src/services/private_api/private_server.rs new file mode 100644 index 0000000000..e7d1511e15 --- /dev/null +++ b/sc-server/src/services/private_api/private_server.rs @@ -0,0 +1,105 @@ +use std::sync::Arc; +use std::io::Error as IoError; +use std::io::ErrorKind; + + +use futures::future::BoxFuture; +use futures::future::FutureExt; +use log::error; +use log::debug; + +use kf_service::api_loop; +use kf_service::KfService; +use kf_service::wait_for_request; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use internal_api::InternalScRequest; +use internal_api::InternalScKey; +use internal_api::RegisterSpuResponse; + +use crate::conn_manager::ConnParams; +use super::SharedInternalContext; + + +pub struct ScInternalService {} + +impl ScInternalService +{ + pub fn new() -> Self { + Self { } + } + + async fn handle( + self: Arc, + context: SharedInternalContext, + socket: KfSocket, + ) -> Result<(), KfSocketError> + { + let (mut sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + // wait for spu registeration request + let spu_id = wait_for_request!(api_stream, + InternalScRequest::RegisterSpuRequest(req_msg) => { + let spu_id = req_msg.request.spu(); + let mut status = true; + debug!("registration req from spu '{}'", spu_id); + + + let register_res = if context.validate_spu(&spu_id) { + debug!("SPU: {} validation succeed",spu_id); + RegisterSpuResponse::ok() + } else { + status = false; + debug!("SPU: {} validation failed",spu_id); + RegisterSpuResponse::failed_registeration() + }; + + let response = req_msg.new_response(register_res); + sink.send_response(&response,req_msg.header.api_version()).await?; + + if status { + context.register_sink(spu_id,sink,ConnParams::new()).await; + } else { + return Ok(()) + } + + + spu_id + } + ); + + + api_loop!( + api_stream, + InternalScRequest::UpdateLrsRequest(msg) => { + debug!("received lrs request: {}",msg); + context.send_lrs_to_sender(msg.request).await; + }, + InternalScRequest::RegisterSpuRequest(_request) => { + error!("registration req only valid during initialization"); + return Err(KfSocketError::IoError(IoError::new(ErrorKind::InvalidData,"register spu request is only valid beggining"))) + } + ); + + debug!("api loop terminated; clearing sink"); + context.clear_sink(&spu_id).await; + + Ok(()) + } +} + +impl KfService for ScInternalService +{ + type Context = SharedInternalContext; + type Request = InternalScRequest; + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn respond( + self: Arc, + context: SharedInternalContext, + socket: KfSocket, + ) -> Self::ResponseFuture { + self.handle(context, socket).boxed() + } +} diff --git a/sc-server/src/services/public_api/flv/api_versions_req.rs b/sc-server/src/services/public_api/flv/api_versions_req.rs new file mode 100644 index 0000000000..2fd59692b4 --- /dev/null +++ b/sc-server/src/services/public_api/flv/api_versions_req.rs @@ -0,0 +1,64 @@ +use std::io::Error; +use log::debug; + +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::api::Request; + +use sc_api::versions::ApiVersionKey; +use sc_api::versions::{ApiVersionsRequest, ApiVersionsResponse}; +use sc_api::ScApiKey; +use sc_api::topic::FlvCreateTopicsRequest; +use sc_api::topic::FlvDeleteTopicsRequest; +use sc_api::topic::FlvFetchTopicsRequest; +use sc_api::topic::FlvTopicCompositionRequest; +use sc_api::spu::FlvFetchSpusRequest; + +pub async fn handle_api_versions_request( + request: RequestMessage, +) -> Result, Error> { + let mut response = ApiVersionsResponse::default(); + + // topic versions + response.api_keys.push(make_version_key( + ScApiKey::FlvCreateTopics, + FlvCreateTopicsRequest::DEFAULT_API_VERSION, + FlvCreateTopicsRequest::DEFAULT_API_VERSION, + )); + response.api_keys.push(make_version_key( + ScApiKey::FlvDeleteTopics, + FlvDeleteTopicsRequest::DEFAULT_API_VERSION, + FlvDeleteTopicsRequest::DEFAULT_API_VERSION, + )); + response.api_keys.push(make_version_key( + ScApiKey::FlvFetchTopics, + FlvFetchTopicsRequest::DEFAULT_API_VERSION, + FlvFetchTopicsRequest::DEFAULT_API_VERSION, + )); + response.api_keys.push(make_version_key( + ScApiKey::FlvTopicComposition, + FlvTopicCompositionRequest::DEFAULT_API_VERSION, + FlvTopicCompositionRequest::DEFAULT_API_VERSION, + )); + + // spus versions + response.api_keys.push(make_version_key( + ScApiKey::FlvFetchSpus, + FlvFetchSpusRequest::DEFAULT_API_VERSION, + FlvFetchSpusRequest::DEFAULT_API_VERSION, + )); + + debug!("flv api versions response"); + + Ok(request.new_response(response)) +} + +/// Build version key object +fn make_version_key(key: ScApiKey, min_version: i16, max_version: i16) -> ApiVersionKey { + let api_key = key as i16; + ApiVersionKey { + api_key, + min_version, + max_version, + } +} diff --git a/sc-server/src/services/public_api/flv/create_custom_spus_req.rs b/sc-server/src/services/public_api/flv/create_custom_spus_req.rs new file mode 100644 index 0000000000..6db75a969d --- /dev/null +++ b/sc-server/src/services/public_api/flv/create_custom_spus_req.rs @@ -0,0 +1,118 @@ +//! +//! # Create Custom Spus Request +//! +//! Converts Custom Spu API request into KV request and sends to KV store for processing. +//! +use log::{debug, trace}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use kf_protocol::api::FlvErrorCode; + +use k8_metadata::core::metadata::ObjectMeta; +use metadata::spu::{SpuSpec, Endpoint, SpuType}; + +use sc_api::FlvResponseMessage; +use sc_api::spu::{FlvCreateCustomSpusRequest, FlvCreateCustomSpusResponse}; +use sc_api::spu::FlvCreateCustomSpuRequest; + +use crate::ScServerError; +use crate::core::LocalStores; +use crate::core::spus::SpuKV; +use crate::core::common::KvContext; + +use super::PublicContext; + +/// Handler for create spus request +pub async fn handle_create_custom_spus_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let (header, custom_spu_req) = request.get_header_request(); + + let mut response = FlvCreateCustomSpusResponse::default(); + let mut results: Vec = vec![]; + + // process create custom spus requests in sequence + for custom_spu_req in custom_spu_req.custom_spus { + debug!( + "api request: create custom-spu '{}({})'", + custom_spu_req.name, custom_spu_req.id + ); + + // validate custom-spu request + if let Err(err_msg) = validate_custom_spu_request(&custom_spu_req, ctx.metadata()) { + results.push(err_msg); + continue; + } + + // process custom-spu request + let result = process_custom_spu_request(ctx, &custom_spu_req).await; + results.push(result); + } + + // send response + response.results = results; + trace!("create custom-spus response {:#?}", response); + + Ok(RequestMessage::::response_with_header(&header, response)) +} + +/// Validate custom_spu requests (one at a time) +fn validate_custom_spu_request( + custom_spu_req: &FlvCreateCustomSpuRequest, + metadata: &LocalStores, +) -> Result<(), FlvResponseMessage> { + let spu_id = &custom_spu_req.id; + let spu_name = &custom_spu_req.name; + + debug!("validating custom-spu: {}({})", spu_name, spu_id); + + // look-up SPU by name or id to check if already exists + if metadata.spus().spu(spu_name).is_some() || metadata.spus().get_by_id(spu_id).is_some() { + Err(FlvResponseMessage::new( + spu_name.clone(), + FlvErrorCode::SpuAlreadyExists, + Some(format!("spu '{}({})' already defined", spu_name, spu_id)), + )) + } else { + Ok(()) + } +} + +/// Process custom spu, converts spu spec to K8 and sends to KV store +async fn process_custom_spu_request( + ctx: &PublicContext, + custom_spu_req: &FlvCreateCustomSpuRequest, +) -> FlvResponseMessage { + let name = &custom_spu_req.name; + + if let Err(err) = create_custom_spu(ctx, custom_spu_req).await { + let error = Some(err.to_string()); + FlvResponseMessage::new(name.clone(), FlvErrorCode::SpuError, error) + } else { + FlvResponseMessage::new_ok(name.clone()) + } +} + +async fn create_custom_spu( + ctx: &PublicContext, + spu_req: &FlvCreateCustomSpuRequest, +) -> Result<(), ScServerError> { + let meta = ObjectMeta::new(spu_req.name.clone(), ctx.namespace.clone()); + let public_ep = + Endpoint::from_port_host(spu_req.public_server.port, &spu_req.public_server.host); + let private_ep = + Endpoint::from_port_host(spu_req.private_server.port, &spu_req.private_server.host); + let spu_spec = SpuSpec { + id: spu_req.id, + spu_type: SpuType::Custom, + public_endpoint: public_ep, + private_endpoint: private_ep, + rack: spu_req.rack.clone(), + }; + let kv_ctx = KvContext::default().with_ctx(meta); + let custom_spu_kv = SpuKV::new_with_context(spu_req.name.clone(), spu_spec, kv_ctx); + + ctx.k8_ws().add(custom_spu_kv).await +} diff --git a/sc-server/src/services/public_api/flv/create_spu_groups_req.rs b/sc-server/src/services/public_api/flv/create_spu_groups_req.rs new file mode 100644 index 0000000000..30e28c3cc3 --- /dev/null +++ b/sc-server/src/services/public_api/flv/create_spu_groups_req.rs @@ -0,0 +1,123 @@ +//! +//! # Create Spu Groups Request +//! +//! Converts Spu Gruups API request into KV request and sends to KV store for processing. +//! +use log::{debug, trace}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use kf_protocol::api::FlvErrorCode; +use k8_metadata::spg::SpuGroupSpec; +use k8_metadata::spg::SpuTemplate; +use k8_metadata::spg::StorageConfig; +use k8_metadata::core::metadata::Env; +use k8_metadata::core::Spec as K8Spec; +use k8_metadata::core::metadata::TemplateSpec; +use sc_api::FlvResponseMessage; +use sc_api::spu::{FlvCreateSpuGroupsRequest, FlvCreateSpuGroupsResponse}; +use sc_api::spu::FlvCreateSpuGroupRequest; +use sc_api::spu::FlvEnvVar; +use sc_api::spu::FlvStorageConfig; + +use super::PublicContext; + +/// Handler for spu groups request +pub async fn handle_create_spu_groups_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let (header, spu_group_req) = request.get_header_request(); + + let mut results: Vec = vec![]; + + // process create spu groups requests in sequence + for spu_group in spu_group_req.spu_groups { + + debug!("api request: create spu-group '{}'", spu_group.name); + + + let result = process_custom_spu_request(ctx, spu_group).await; + results.push(result); + + } + + // send response + let mut response = FlvCreateSpuGroupsResponse::default(); + response.results = results; + trace!("create spu-group response {:#?}", response); + + Ok(RequestMessage::::response_with_header(&header, response)) +} + + +/// Process custom spu, converts spu spec to K8 and sends to KV store +async fn process_custom_spu_request( + ctx: &PublicContext, + group_req: FlvCreateSpuGroupRequest, +) -> FlvResponseMessage { + + + let (name,spg_spec): (String,SpuGroupSpec) = group_req.to_spec(); + + match ctx.create(name.clone(),spg_spec).await { + Ok(_) => FlvResponseMessage::new_ok(name.clone()), + Err(err) => { + let error = Some(err.to_string()); + FlvResponseMessage::new(name, FlvErrorCode::SpuError, error) + } + } +} + + + +// convert +trait K8Request where S: K8Spec { + + fn to_spec(self) -> (String,S) ; + +} + + +impl K8Request for FlvCreateSpuGroupRequest { + + fn to_spec(self) -> (String,SpuGroupSpec) { + + ( self.name, + SpuGroupSpec { + replicas: self.replicas, + min_id: self.min_id.clone(), + template: TemplateSpec::new(SpuTemplate{ + rack: self.rack.clone(), + storage: self.config.storage.map(|cfg| cfg.convert()), + env: self.config.env.into_iter().map(|env| env.convert()).collect(), + ..Default::default() + }) + } + ) + + } +} + +// simplify convert +pub trait Convert { + fn convert(self) -> T; +} + +impl Convert for FlvEnvVar { + fn convert(self) -> Env { + Env::key_value(self.name,self.value) + } +} + +impl Convert for FlvStorageConfig { + fn convert(self) -> StorageConfig { + StorageConfig { + log_dir: self.log_dir, + size: self.size + } + } +} + + + diff --git a/sc-server/src/services/public_api/flv/create_topics_req.rs b/sc-server/src/services/public_api/flv/create_topics_req.rs new file mode 100644 index 0000000000..f312089ae4 --- /dev/null +++ b/sc-server/src/services/public_api/flv/create_topics_req.rs @@ -0,0 +1,164 @@ +//! +//! # Create Topic Request +//! +//! Create topic request handler. There are 2 types of topics: +//! * Topics with Computed Replicas (aka. Computed Topics) +//! * Topics with Assigned Replicas (aka. Assigned Topics) +//! +//! Computed Topics use Fluvio algorithm for replica assignment. +//! Assigned Topics allow the users to apply their custom-defined replica assignment. +//! +use log::{debug, trace}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use kf_protocol::api::FlvErrorCode; + +use k8_metadata::core::metadata::ObjectMeta; + +use sc_api::FlvResponseMessage; +use sc_api::topic::{FlvCreateTopicsRequest, FlvCreateTopicsResponse}; + +use metadata::topic::TopicSpec; + +use crate::ScServerError; +use crate::core::LocalStores; +use crate::core::topics::TopicKV; +use crate::core::common::KvContext; + +use super::PublicContext; + +/// Handler for create topic request +pub async fn handle_create_topics_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let (header, topic_request) = request.get_header_request(); + + let validate_only = topic_request.validate_only; + let mut response = FlvCreateTopicsResponse::default(); + let mut topic_results: Vec = vec![]; + + // process create topic requests in sequence + for topic_req in topic_request.topics { + let name = topic_req.name; + let topic_spec = topic_req.topic; + debug!("api request: create topic '{}'", name); + + // validate topic request + if let Err(validation_message) = validate_topic_request(&name, &topic_spec, ctx.metadata()) + { + topic_results.push(validation_message); + continue; + } + if validate_only { + topic_results.push(FlvResponseMessage::new_ok(name.to_string())); + continue; + } + // process topic request + let result = process_topic_request(ctx, name, topic_spec).await; + topic_results.push(result); + } + + // send response + response.results = topic_results; + trace!("create topics request response {:#?}", response); + + Ok(RequestMessage::::response_with_header(&header, response)) +} + +/// Validate topic, takes advantage of the validation routines inside topic action workflow +fn validate_topic_request( + name: &str, + topic_spec: &TopicSpec, + metadata: &LocalStores, +) -> Result<(), FlvResponseMessage> { + debug!("validating topic: {}", name); + + // check if topic already exists + if metadata.topics().contains_key(name) { + return Err(FlvResponseMessage::new( + name.to_string(), + FlvErrorCode::TopicAlreadyExists, + Some(format!("topic '{}' already defined", name)), + )); + } + + // create temporary topic status to return validation result + let topic_kv = TopicKV::with_spec(name.to_owned(), topic_spec.clone()); + match topic_spec { + TopicSpec::Computed(param) => { + let next_state = topic_kv.validate_computed_topic_parameters(param); + trace!("validating, computed topic: {:#?}", next_state); + if next_state.resolution.is_invalid() { + Err(FlvResponseMessage::new( + name.to_string(), + FlvErrorCode::TopicError, + Some(next_state.reason), + )) + } else { + let next_state = topic_kv.generate_replica_map(metadata.spus(), param); + trace!("validating, generate replica map topic: {:#?}", next_state); + if next_state.resolution.no_resource() { + Err(FlvResponseMessage::new( + name.to_string(), + FlvErrorCode::TopicError, + Some(next_state.reason), + )) + } else { + Ok(()) + } + } + } + TopicSpec::Assigned(ref partition_map) => { + let next_state = topic_kv.validate_assigned_topic_parameters(partition_map); + trace!("validating, computed topic: {:#?}", next_state); + if next_state.resolution.is_invalid() { + Err(FlvResponseMessage::new( + name.to_string(), + FlvErrorCode::TopicError, + Some(next_state.reason), + )) + } else { + let next_state = + topic_kv.update_replica_map_for_assigned_topic(partition_map, metadata.spus()); + trace!("validating, assign replica map topic: {:#?}", next_state); + if next_state.resolution.is_invalid() { + Err(FlvResponseMessage::new( + name.to_string(), + FlvErrorCode::TopicError, + Some(next_state.reason), + )) + } else { + Ok(()) + } + } + } + } +} + +/// Process topic, converts topic spec to K8 and sends to KV store +async fn process_topic_request( + ctx: &PublicContext, + name: String, + topic_spec: TopicSpec, +) -> FlvResponseMessage { + if let Err(err) = create_topic(ctx, name.clone(), topic_spec).await { + let error = Some(err.to_string()); + FlvResponseMessage::new(name, FlvErrorCode::TopicError, error) + } else { + FlvResponseMessage::new_ok(name) + } +} + +async fn create_topic( + ctx: &PublicContext, + name: String, + topic: TopicSpec, +) -> Result<(), ScServerError> { + let meta = ObjectMeta::new(name.clone(), ctx.namespace.clone()); + let kv_ctx = KvContext::default().with_ctx(meta); + let topic_kv = TopicKV::new_with_context(name, topic, kv_ctx); + + ctx.k8_ws().add(topic_kv).await +} diff --git a/sc-server/src/services/public_api/flv/delete_custom_spus_req.rs b/sc-server/src/services/public_api/flv/delete_custom_spus_req.rs new file mode 100644 index 0000000000..1b2e6165f0 --- /dev/null +++ b/sc-server/src/services/public_api/flv/delete_custom_spus_req.rs @@ -0,0 +1,112 @@ +//! +//! # Delete Custom Spus Request +//! +//! Lookup custom-spu in local metadata, grab its K8 context +//! and send K8 a delete message. +//! +use log::{debug, trace}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use kf_protocol::api::FlvErrorCode; +use sc_api::{FlvResponseMessage}; +use sc_api::spu::{FlvDeleteCustomSpusRequest, FlvDeleteCustomSpusResponse}; +use sc_api::spu::FlvCustomSpu; +use k8_metadata::spu::SpuSpec as K8SpuSpec; + +use crate::core::spus::SpuKV; +use super::PublicContext; + +/// Handler for delete custom spu request +pub async fn handle_delete_custom_spus_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let mut response = FlvDeleteCustomSpusResponse::default(); + let mut results: Vec = vec![]; + + // look-up custom spus based on their names or ids + for custom_spu in &request.request.custom_spus { + let result = match custom_spu { + FlvCustomSpu::Name(spu_name) => { + debug!("api request: delete custom-spu with name '{}'", spu_name); + + // spu-name must exist + if let Some(spu) = &ctx.metadata().spus().spu(spu_name) { + delete_custom_spu(ctx, spu).await? + } else { + // spu does not exist + FlvResponseMessage::new( + spu_name.clone(), + FlvErrorCode::SpuNotFound, + Some("not found".to_owned()), + ) + } + } + FlvCustomSpu::Id(spu_id) => { + debug!("api request: delete custom-spu with id '{}'", spu_id); + + // spu-id must exist + if let Some(spu) = &ctx.metadata().spus().get_by_id(spu_id) { + delete_custom_spu(ctx, spu).await? + } else { + // spu does not exist + FlvResponseMessage::new( + format!("spu-{}", spu_id), + FlvErrorCode::SpuNotFound, + Some("not found".to_owned()), + ) + } + } + }; + + // save result + results.push(result); + } + + // update response + response.results = results; + trace!("flv delete custom-spus resp {:#?}", response); + + Ok(request.new_response(response)) +} + +/// Generate for delete custom spu operation and return result. +pub async fn delete_custom_spu( + ctx: &PublicContext, + spu: &SpuKV, +) -> Result { + let spu_name = spu.name(); + + // must be Custom Spu + if !spu.is_custom() { + return Ok(FlvResponseMessage::new( + spu_name.clone(), + FlvErrorCode::SpuError, + Some("expected 'Custom' spu, found 'Managed' spu".to_owned()), + )); + } + + // have have KV context + let item_ctx = match &spu.kv_ctx().item_ctx { + Some(ctx) => ctx, + None => { + return Ok(FlvResponseMessage::new( + spu_name.clone(), + FlvErrorCode::SpuError, + Some("missing Kv context".to_owned()), + )) + } + }; + + // delete custom spec and return result + let item = item_ctx.as_input(); + match ctx.k8_client().delete_item::(&item).await { + Ok(_) => Ok(FlvResponseMessage::new_ok(spu_name.clone())), + Err(err) => Ok(FlvResponseMessage::new( + spu_name.clone(), + FlvErrorCode::SpuError, + Some(err.to_string()), + )), + } +} diff --git a/sc-server/src/services/public_api/flv/delete_spu_groups_req.rs b/sc-server/src/services/public_api/flv/delete_spu_groups_req.rs new file mode 100644 index 0000000000..1a3d802a17 --- /dev/null +++ b/sc-server/src/services/public_api/flv/delete_spu_groups_req.rs @@ -0,0 +1,53 @@ +//! +//! # Delete Spu Groups Request +//! +//! Delete spu groups request handler. Lookup spu-group in local metadata, grab its K8 context +//! and send K8 a delete message. +//! +use log::{debug, trace}; +use std::io::Error; + +use k8_metadata::spg::SpuGroupSpec; +use kf_protocol::api::FlvErrorCode; +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use sc_api::{FlvResponseMessage}; +use sc_api::spu::{FlvDeleteSpuGroupsRequest, FlvDeleteSpuGroupsResponse}; + + +use super::PublicContext; + +/// Handler for delete spu group request +pub async fn handle_delete_spu_groups_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let mut response = FlvDeleteSpuGroupsResponse::default(); + let mut results: Vec = vec![]; + + debug!( + ">>>>>>>>>>> DELETE SPU GROUP REQ GOES HERE {:#?}", + request.request + ); + + + for spg_name in &request.request.spu_groups { + debug!("api request: delete spu group '{}'", spg_name); + + let result = match ctx.delete::(spg_name).await { + Ok(_) => FlvResponseMessage::new_ok(spg_name.clone()), + Err(err) => { + let error = Some(err.to_string()); + FlvResponseMessage::new(spg_name.clone(), FlvErrorCode::SpuError, error) + } + }; + + results.push(result); + } + + + // update response + response.results = results; + trace!("flv delete spu group resp {:#?}", response); + + Ok(request.new_response(response)) +} diff --git a/sc-server/src/services/public_api/flv/delete_topics_req.rs b/sc-server/src/services/public_api/flv/delete_topics_req.rs new file mode 100644 index 0000000000..849ff5481d --- /dev/null +++ b/sc-server/src/services/public_api/flv/delete_topics_req.rs @@ -0,0 +1,63 @@ +//! +//! # Delete Topic Request +//! +//! Delete topic request handler. Lookup topic in local metadata, grab its K8 context +//! and send K8 a delete message. +//! +use log::{debug, trace}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use kf_protocol::api::FlvErrorCode; +use sc_api::{FlvResponseMessage}; +use sc_api::topic::{FlvDeleteTopicsRequest, FlvDeleteTopicsResponse}; +use k8_metadata::topic::TopicSpec as K8TopicSpec; + +use super::PublicContext; + +/// Handler for delete topic request +pub async fn handle_delete_topics_request( + request: RequestMessage, + ctx: &PublicContext, +) -> Result, Error> { + let mut response = FlvDeleteTopicsResponse::default(); + let mut topic_results: Vec = vec![]; + + // process delete topic requests in sequence + for topic_name in &request.request.topics { + debug!("api request: delete topic '{}'", topic_name); + + // topic name must exist + let result = if let Some(topic) = ctx.metadata().topics().topic(topic_name) { + if let Some(item_ctx) = &topic.kv_ctx().item_ctx { + let item = item_ctx.as_input(); + if let Err(err) = ctx.k8_client().delete_item::(&item).await { + FlvResponseMessage::new( + topic_name.clone(), + FlvErrorCode::TopicError, + Some(err.to_string()), + ) + } else { + FlvResponseMessage::new_ok(topic_name.clone()) + } + } else { + FlvResponseMessage::new_ok(topic_name.clone()) + } + } else { + // topic does not exist + FlvResponseMessage::new( + topic_name.clone(), + FlvErrorCode::TopicNotFound, + Some("not found".to_owned()), + ) + }; + // push result + topic_results.push(result); + } + + // update response + response.results = topic_results; + trace!("flv delete topics resp {:#?}", response); + + Ok(request.new_response(response)) +} diff --git a/sc-server/src/services/public_api/flv/fetch_spu_groups_req.rs b/sc-server/src/services/public_api/flv/fetch_spu_groups_req.rs new file mode 100644 index 0000000000..765111f9d4 --- /dev/null +++ b/sc-server/src/services/public_api/flv/fetch_spu_groups_req.rs @@ -0,0 +1,42 @@ +use log::debug; +use std::io::Error; + + +use kf_protocol::api::FlvErrorCode; +use kf_protocol::api::{RequestMessage, ResponseMessage}; + +use k8_metadata::spg::{SpuGroupSpec}; + +use sc_api::spu::{FlvFetchSpuGroupsRequest, FlvFetchSpuGroupsResponse}; +use sc_api::FlvResponseMessage; + +use super::PublicContext; + +pub async fn handle_fetch_spu_groups_request( + request: RequestMessage, + ctx: &PublicContext +) -> Result, Error> { + + let mut response = FlvFetchSpuGroupsResponse::default(); + + match ctx.retrieve_items::().await { + Ok(k8_list) => { + debug!("fetched: {} spgs",k8_list.items.len()); + for group in k8_list.items { + response.spu_groups.push(group.into()); + } + }, + Err(err) => { + let error = Some(err.to_string()); + response.error = FlvResponseMessage::new("error".to_owned(), + FlvErrorCode::SpuError, error); + } + } + + + Ok(request.new_response(response)) +} + + + + diff --git a/sc-server/src/services/public_api/flv/fetch_spu_req.rs b/sc-server/src/services/public_api/flv/fetch_spu_req.rs new file mode 100644 index 0000000000..744511f14f --- /dev/null +++ b/sc-server/src/services/public_api/flv/fetch_spu_req.rs @@ -0,0 +1,89 @@ +use log::{trace, debug}; +use std::io::Error; + +use kf_protocol::api::FlvErrorCode; +use kf_protocol::api::{RequestMessage, ResponseMessage}; + +use sc_api::spu::{FlvFetchSpusRequest, FlvFetchSpusResponse}; +use sc_api::spu::FlvFetchSpuResponse; +use sc_api::spu::FlvRequestSpuType; +use sc_api::spu::FlvFetchSpu; +use sc_api::spu::FlvSpuType; +use sc_api::spu::FlvEndPointMetadata; +use sc_api::spu::FlvSpuResolution; + +use metadata::spu::SpuType; +use metadata::spu::SpuResolution; + +use crate::core::ShareLocalStores; +use crate::core::spus::SpuKV; + +pub async fn handle_fetch_spu_request( + request: RequestMessage, + metadata: ShareLocalStores, +) -> Result, Error> { + // identify query type + let (query_custom, query_type) = match request.request.req_spu_type { + FlvRequestSpuType::Custom => (true, "custom"), + FlvRequestSpuType::All => (false, "all"), + }; + + // traverse and convert spus to FLV response + let mut flv_spu: Vec = Vec::default(); + for (name, spu) in metadata.spus().inner_store().read().iter() { + // skip custom if necessary + if query_custom && !spu.is_custom() { + continue; + } + flv_spu.push(spu_store_metadata_to_spu_response(name, spu)); + } + + debug!( + "flv fetch {} spus resp: {} items", + query_type, + flv_spu.len() + ); + trace!("flv fetch {} spus resp {:#?}", query_type, flv_spu); + + // prepare response + let mut response = FlvFetchSpusResponse::default(); + response.spus = flv_spu; + + Ok(request.new_response(response)) +} + +/// Encode Spus metadata into SPU FLV response +pub fn spu_store_metadata_to_spu_response(name: &String, spu: &SpuKV) -> FlvFetchSpuResponse { + let public_ep = spu.public_endpoint(); + let private_ep = spu.private_endpoint(); + let flv_spu_type = match spu.spec().spu_type { + SpuType::Custom => FlvSpuType::Custom, + SpuType::Managed => FlvSpuType::Managed, + }; + let flv_resolution = match spu.status().resolution { + SpuResolution::Online => FlvSpuResolution::Online, + SpuResolution::Offline => FlvSpuResolution::Offline, + SpuResolution::Init => FlvSpuResolution::Init, + }; + + let flv_spu = FlvFetchSpu { + id: *spu.id(), + spu_type: flv_spu_type, + public_ep: FlvEndPointMetadata { + host: public_ep.host.clone(), + port: public_ep.port, + }, + private_ep: FlvEndPointMetadata { + host: private_ep.host.clone(), + port: private_ep.port, + }, + rack: spu.rack_clone(), + resolution: flv_resolution, + }; + + FlvFetchSpuResponse { + error_code: FlvErrorCode::None, + name: name.clone(), + spu: Some(flv_spu), + } +} diff --git a/sc-server/src/services/public_api/flv/fetch_topics_req.rs b/sc-server/src/services/public_api/flv/fetch_topics_req.rs new file mode 100644 index 0000000000..d2b2e451bd --- /dev/null +++ b/sc-server/src/services/public_api/flv/fetch_topics_req.rs @@ -0,0 +1,93 @@ +use log::{trace, debug}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; + +use sc_api::topic::{FlvFetchTopicsRequest, FlvFetchTopicsResponse}; +use sc_api::topic::FlvFetchTopicResponse; +use sc_api::topic::FlvPartitionReplica; +use metadata::partition::ReplicaKey; + +use crate::core::ShareLocalStores; +use crate::core::topics::TopicLocalStore; +use crate::core::partitions::PartitionLocalStore; + +pub async fn handle_fetch_topics_request( + request: RequestMessage, + metadata: ShareLocalStores, +) -> Result, Error> { + // is names is provided, return list, otherwise generate all names + let topic_names = match &request.request.names { + Some(topic_names) => topic_names.clone(), + None => metadata.topics().all_keys(), + }; + + // encode topics + let mut topics = vec![]; + for topic_name in &topic_names { + let mut topic_response = + topic_store_metadata_to_topic_response(metadata.topics(), topic_name); + + // lookup partitions, if topic was found + let partitions = if topic_response.topic.is_some() { + Some(partition_metadata_to_replica_response( + metadata.partitions(), + topic_name, + )) + } else { + None + }; + + topic_response.update_partitions(partitions); + + // push valid and error topics + topics.push(topic_response); + } + + // prepare response + let mut response = FlvFetchTopicsResponse::default(); + response.topics = topics; + + debug!("flv fetch topics resp: {} items", response.topics.len()); + trace!("flv fetch topics resp {:#?}", response); + + Ok(request.new_response(response)) +} + +/// Encode Topic metadata into a Topic FLV Reponse +pub fn topic_store_metadata_to_topic_response( + topics: &TopicLocalStore, + topic_name: &String, +) -> FlvFetchTopicResponse { + if let Some(topic) = topics.topic(topic_name) { + FlvFetchTopicResponse::new( + topic_name.clone(), + topic.spec.clone(), + topic.status.clone(), + None, + ) + } else { + FlvFetchTopicResponse::new_not_found(topic_name.clone()) + } +} + +/// Encode partitions into a Replica Reponse +pub fn partition_metadata_to_replica_response( + partitions: &PartitionLocalStore, + topic: &String, +) -> Vec { + let mut res: Vec = Vec::default(); + let partition_cnt = partitions.count_topic_partitions(topic); + for idx in 0..partition_cnt { + let name = ReplicaKey::new(topic.clone(), idx); + if let Some(partition) = partitions.value(&name) { + res.push(FlvPartitionReplica { + id: idx, + leader: partition.spec.leader, + replicas: partition.spec.replicas.clone(), + live_replicas: partition.status.live_replicas().clone(), + }) + } + } + res +} diff --git a/sc-server/src/services/public_api/flv/mod.rs b/sc-server/src/services/public_api/flv/mod.rs new file mode 100644 index 0000000000..06598befaa --- /dev/null +++ b/sc-server/src/services/public_api/flv/mod.rs @@ -0,0 +1,16 @@ +pub mod api_versions_req; + +pub mod create_custom_spus_req; +pub mod delete_custom_spus_req; +pub mod fetch_spu_req; + +pub mod create_spu_groups_req; +pub mod delete_spu_groups_req; +pub mod fetch_spu_groups_req; + +pub mod create_topics_req; +pub mod delete_topics_req; +pub mod fetch_topics_req; +pub mod topic_composition_req; + +use super::PublicContext; diff --git a/sc-server/src/services/public_api/flv/topic_composition_req.rs b/sc-server/src/services/public_api/flv/topic_composition_req.rs new file mode 100644 index 0000000000..7c3b343fa5 --- /dev/null +++ b/sc-server/src/services/public_api/flv/topic_composition_req.rs @@ -0,0 +1,161 @@ +use log::{trace, debug}; +use std::io::Error; + +use kf_protocol::api::{RequestMessage, ResponseMessage}; +use sc_api::topic::FlvTopicCompositionRequest; +use sc_api::topic::FlvTopicCompositionResponse; +use sc_api::topic::FetchTopicReponse; +use sc_api::topic::FetchPartitionResponse; +use sc_api::topic::FetchSpuReponse; +use kf_protocol::api::FlvErrorCode; + +use crate::core::ShareLocalStores; + +pub async fn handle_topic_composition_request( + request: RequestMessage, + metadata: ShareLocalStores, +) -> Result, Error> { + let mut topic_comp_resp = FlvTopicCompositionResponse::default(); + let mut spu_ids = vec![]; + + debug!( + "topic-composition, encode topics '{:?}'", + request.request.topic_names + ); + + // encode topics + let mut topics = vec![]; + for topic_name in &request.request.topic_names { + let mut topic = FetchTopicReponse::default(); + topic.name = topic_name.clone(); + + // if topic is found encode it, otherwise error + if let Some(topic_metadata) = metadata.topics().topic(topic_name) { + // check topic resolution, return error if not OK + let topic_status = topic_metadata.status(); + if !topic_status.is_resolution_provisioned() { + let error_code = if topic_status.is_resolution_transient() { + FlvErrorCode::TopicPendingInitialization + } else { + FlvErrorCode::TopicInvalidConfiguration + }; + + // add error and save + topic.error_code = error_code; + topics.push(topic); + } else { + // update partitions + let mut partitions = vec![]; + let partitions_mtd = metadata.partitions().topic_partitions(topic_name); + for (idx, partition_mtd) in partitions_mtd.iter().enumerate() { + let mut partition_response = FetchPartitionResponse::default(); + partition_response.partition_idx = idx as i32; + + // partitions pending initializations return error + if partition_mtd.spec.leader < 0 { + // add pending init error + partition_response.error_code = + FlvErrorCode::PartitionPendingInitialization; + } else { + // update partition with metadata + partition_response.leader_id = partition_mtd.spec.leader.clone(); + partition_response.replicas = partition_mtd.spec.replicas.clone(); + partition_response.live_replicas = partition_mtd.status.live_replicas().clone(); + } + + partitions.push(partition_response); + } + + // collect SPUs ids (if unique) + spu_ids = append_vals_unique(spu_ids, topic_status.spus_in_replica()); + + // add partitions and save + topic.partitions = partitions; + topics.push(topic); + } + } else { + // add not found error and save + topic.error_code = FlvErrorCode::TopicNotFound; + topics.push(topic); + } + } + + debug!("topic-composition, encode spus '{:?}'", spu_ids); + + // encode spus + let mut spus = vec![]; + for spu_id in &spu_ids { + let mut spu = FetchSpuReponse::default(); + spu.spu_id = *spu_id; + + // if spu is found encode it, otherwise error + if let Some(spu_metadata) = metadata.spus().get_by_id(spu_id) { + // check spu resolution, return error if not OK + let spu_status = spu_metadata.status(); + if !spu_status.is_online() { + // add error and save + spu.error_code = FlvErrorCode::SpuOffline; + spus.push(spu); + } else { + // update spu with metadata and save + let public_ep = spu_metadata.public_endpoint(); + spu.host = public_ep.host.clone(); + spu.port = public_ep.port; + + spus.push(spu); + } + } else { + // add not found error and save + spu.error_code = FlvErrorCode::SpuNotFound; + spus.push(spu); + } + } + + // update reponse + topic_comp_resp.topics = topics; + topic_comp_resp.spus = spus; + + trace!("topic-composition resp {:#?}", topic_comp_resp); + + Ok(request.new_response(topic_comp_resp)) +} + +/// append value if unique +fn append_vals_unique(mut existing_list: Vec, new_list: Vec) -> Vec +where + T: PartialEq + Clone, +{ + for new_id in &new_list { + if !existing_list.contains(new_id) { + existing_list.push(new_id.clone()); + } + } + existing_list +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- +#[cfg(test)] +pub mod test { + use super::*; + + #[test] + fn test_append_vals_unique() { + let mut existing_spu_ids: Vec = vec![2, 1]; + let new_spu_ids: Vec = vec![1, 3]; + let new_spu_ids2: Vec = vec![2, 3, 4]; + + existing_spu_ids = append_vals_unique(existing_spu_ids, new_spu_ids.clone()); + assert_eq!(existing_spu_ids, vec![2, 1, 3]); + + existing_spu_ids = append_vals_unique(existing_spu_ids, new_spu_ids2); + assert_eq!(existing_spu_ids, vec![2, 1, 3, 4]); + + existing_spu_ids = append_vals_unique(existing_spu_ids, new_spu_ids); + assert_eq!(existing_spu_ids, vec![2, 1, 3, 4]); + + existing_spu_ids = append_vals_unique(existing_spu_ids, vec![]); + assert_eq!(existing_spu_ids, vec![2, 1, 3, 4]); + } +} diff --git a/sc-server/src/services/public_api/kf/metadata_req.rs b/sc-server/src/services/public_api/kf/metadata_req.rs new file mode 100644 index 0000000000..552e1f6aea --- /dev/null +++ b/sc-server/src/services/public_api/kf/metadata_req.rs @@ -0,0 +1,115 @@ +use log::trace; +use std::io::Error; + +use types::Name; + +use kf_protocol::message::metadata::{KfMetadataRequest, KfMetadataResponse}; +use kf_protocol::message::metadata::MetadataResponseTopic; +use kf_protocol::message::metadata::MetadataResponseBroker; +use kf_protocol::message::metadata::MetadataResponsePartition; + +use kf_protocol::api::ErrorCode as KfErrorCode; +use kf_protocol::api::{RequestMessage, ResponseMessage}; + +use crate::core::ShareLocalStores; +use crate::core::spus::SpuKV; +use crate::core::topics::TopicLocalStore; +use crate::core::partitions::PartitionLocalStore; + +pub async fn handle_kf_metadata_request( + request: RequestMessage, + metadata: ShareLocalStores, +) -> Result, Error> { + // generate broker metadata (from online spus) + let spus = metadata.spus().online_spus(); + let resp_brokers = flv_online_spus_to_kf_brokers(&spus); + + // generate topics + let mut resp_topics: Vec = Vec::default(); + if let Some(topics_req) = &request.request.topics { + // lookup specific topics + for topic_req in topics_req { + resp_topics.push(make_kf_topic_response(&topic_req.name, metadata.topics())); + } + } else { + // generate all "ok" topics + for topic_name in metadata.topics().all_keys() { + resp_topics.push(make_kf_topic_response(&topic_name, metadata.topics())); + } + } + + // generate partitions for all valid topics + for idx in 0..resp_topics.len() { + let mut topic = &mut resp_topics[idx]; + if topic.error_code.is_error() { + continue; + } + + // append partitions + topic.partitions = topic_partitions_to_kf_partitions(metadata.partitions(), &topic.name); + } + + // prepare response + let mut response = KfMetadataResponse::default(); + response.brokers = resp_brokers; + response.topics = resp_topics; + + trace!("kf-metadata resp {:#?}", response); + + Ok(request.new_response(response)) +} + +/// Given a topic name, generate Topic Response +fn make_kf_topic_response(name: &Name, topics: &TopicLocalStore) -> MetadataResponseTopic { + let mut topic_resp = MetadataResponseTopic::default(); + topic_resp.name = name.clone(); + + if let Some(flv_topic) = topics.topic(&name) { + if !flv_topic.is_provisioned() { + topic_resp.error_code = KfErrorCode::UnknownTopicOrPartition; + } + } else { + topic_resp.error_code = KfErrorCode::UnknownTopicOrPartition; + } + + topic_resp +} + +/// Convert online SPUs to Kafka Brokers +fn flv_online_spus_to_kf_brokers(online_spus: &Vec) -> Vec { + let mut spu_metadata: Vec = Vec::default(); + + for online_spu in online_spus { + let public_ep = online_spu.public_endpoint(); + spu_metadata.push(MetadataResponseBroker { + node_id: *online_spu.id(), + host: public_ep.host.clone(), + port: public_ep.port as i32, + rack: online_spu.rack_clone(), + }); + } + + spu_metadata +} + +/// Encode all partitions for a topic in Kf format. +pub fn topic_partitions_to_kf_partitions( + partitions: &PartitionLocalStore, + topic: &String, +) -> Vec { + let mut kf_partitions = vec![]; + + for (idx, partition) in partitions.topic_partitions(topic).iter().enumerate() { + kf_partitions.push(MetadataResponsePartition { + error_code: KfErrorCode::None, + partition_index: idx as i32, + leader_id: partition.spec.leader, + leader_epoch: 0, + replica_nodes: partition.spec.replicas.clone(), + isr_nodes: partition.status.live_replicas().clone(), + offline_replicas: partition.status.offline_replicas(), + }) + } + + kf_partitions +} diff --git a/sc-server/src/services/public_api/kf/mod.rs b/sc-server/src/services/public_api/kf/mod.rs new file mode 100644 index 0000000000..6e4bbcda44 --- /dev/null +++ b/sc-server/src/services/public_api/kf/mod.rs @@ -0,0 +1 @@ +pub mod metadata_req; diff --git a/sc-server/src/services/public_api/mod.rs b/sc-server/src/services/public_api/mod.rs new file mode 100644 index 0000000000..2ac2efdc26 --- /dev/null +++ b/sc-server/src/services/public_api/mod.rs @@ -0,0 +1,163 @@ +mod public_server; + +mod flv; +mod kf; + +mod api { + // mixed + pub use super::flv::api_versions_req::*; + + // kafka + pub use super::kf::metadata_req::*; + + // fluvio + pub use super::flv::create_topics_req::*; + pub use super::flv::delete_topics_req::*; + pub use super::flv::fetch_topics_req::*; + pub use super::flv::topic_composition_req::*; + + pub use super::flv::create_custom_spus_req::*; + pub use super::flv::delete_custom_spus_req::*; + pub use super::flv::fetch_spu_req::*; + + pub use super::flv::create_spu_groups_req::*; + pub use super::flv::delete_spu_groups_req::*; + pub use super::flv::fetch_spu_groups_req::*; +} + +use std::sync::Arc; +use std::fmt::Debug; + +use log::info; +use log::debug; +use serde::Serialize; +use serde::de::DeserializeOwned; + +use sc_api::PublicRequest; +use sc_api::ScApiKey; +use kf_service::KfApiServer; +use public_server::PublicService; +use k8_client::K8Client; +use k8_client::ClientError; +use k8_metadata::core::metadata::InputObjectMeta; +use k8_metadata::core::metadata::InputK8Obj; +use k8_metadata::core::metadata::K8List; +use k8_metadata::core::Spec as K8Spec; + + +use crate::core::ShareLocalStores; +use crate::k8::K8WSUpdateService; +use crate::core::LocalStores; + +pub type SharedPublicContext = Arc; + +pub type PubliApiServer = KfApiServer; + +/// create public server +pub fn create_public_server( + metadata: ShareLocalStores, + k8_ws: K8WSUpdateService, + namespace: String, +) -> PubliApiServer { + let addr = metadata.config().public_endpoint.addr.clone(); + info!("start public api service at: {}", addr); + + KfApiServer::new( + addr, + Arc::new(PublicContext { + metadata, + k8_ws, + namespace, + }), + PublicService::new(), + ) +} + +#[derive(Clone)] +pub struct PublicContext { + metadata: ShareLocalStores, + k8_ws: K8WSUpdateService, + namespace: String, +} + +impl PublicContext { + pub fn k8_client(&self) -> &K8Client { + self.k8_ws.client() + } + + pub fn k8_ws(&self) -> &K8WSUpdateService { + &self.k8_ws + } + + pub fn metadata(&self) -> &LocalStores { + &self.metadata + } + + /// Create input metadata for our context + /// which has namespace + pub async fn create( + &self, + name: String, + spec: S + ) -> Result<(),ClientError> + where + S: K8Spec + Serialize + Default + Debug + Clone + DeserializeOwned , + ::Status: Default + Debug + Serialize + DeserializeOwned + { + debug!("creating k8 spec: {:#?}",spec); + let input = InputK8Obj { + api_version: S::api_version(), + kind: S::kind(), + metadata: InputObjectMeta { + name, + namespace: self.namespace.clone(), + ..Default::default() + }, + spec, + ..Default::default() + }; + + let client = self.k8_ws.client(); + client.apply(input).await?; + + Ok(()) + } + + /// Create input metadata for our context + /// which has namespace + pub async fn delete( + &self, + name: &str, + ) -> Result<(),ClientError> + where + S: K8Spec + Serialize + Default + Debug + Clone + DeserializeOwned , + ::Status: Default + Debug + DeserializeOwned + { + debug!("deleting k8 obj: {}",name); + let meta = InputObjectMeta { + name: name.to_owned(), + namespace: self.namespace.clone(), + ..Default::default() + }; + + let client = self.k8_ws.client(); + client.delete_item::(&meta).await?; + + Ok(()) + } + + /// retrieve all items in the namespace + pub async fn retrieve_items( + &self + ) -> Result, ClientError> + where + S: K8Spec, + K8List: DeserializeOwned, + { + + let client = self.k8_ws.client(); + client.retrieve_items::(&self.namespace).await + } + + +} diff --git a/sc-server/src/services/public_api/public_server.rs b/sc-server/src/services/public_api/public_server.rs new file mode 100644 index 0000000000..04536445fa --- /dev/null +++ b/sc-server/src/services/public_api/public_server.rs @@ -0,0 +1,156 @@ +//! +//! # Service Implementataion +//! +//! Public service API allows 3rd party systems to invoke operations on Fluvio +//! Streaming Controller. Requests are received and dispatched to handlers +//! based on API keys. +//! + +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use kf_service::api_loop; +use kf_service::call_service; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; + +use kf_service::KfService; + +use sc_api::PublicRequest; +use sc_api::ScApiKey; + +use super::api::handle_api_versions_request; + +use super::api::handle_kf_metadata_request; + +use super::api::handle_create_topics_request; +use super::api::handle_delete_topics_request; +use super::api::handle_fetch_topics_request; +use super::api::handle_topic_composition_request; + +use super::api::handle_create_custom_spus_request; +use super::api::handle_delete_custom_spus_request; +use super::api::handle_fetch_spu_request; + +use super::api::handle_create_spu_groups_request; +use super::api::handle_delete_spu_groups_request; +use super::api::handle_fetch_spu_groups_request; + +use super::SharedPublicContext; + +pub struct PublicService {} + +impl PublicService { + pub fn new() -> Self { + PublicService {} + } + + async fn handle( + self: Arc, + ctx: SharedPublicContext, + socket: KfSocket, + ) -> Result<(), KfSocketError> { + let (mut sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + api_loop!( + api_stream, + + // Common + PublicRequest::ApiVersionsRequest(request) => call_service!( + request, + handle_api_versions_request(request), + sink, + "api version handler" + ), + + // Kafka + PublicRequest::KfMetadataRequest(request) => call_service!( + request, + handle_kf_metadata_request(request, ctx.metadata.clone()), + sink, + "metadata request handler" + ), + + // Fluvio - Topics + PublicRequest::FlvCreateTopicsRequest(request) => call_service!( + request, + handle_create_topics_request(request, &ctx), + sink, + "create topic handler" + ), + PublicRequest::FlvDeleteTopicsRequest(request) => call_service!( + request, + handle_delete_topics_request(request, &ctx), + sink, + "delete topic handler" + ), + PublicRequest::FlvFetchTopicsRequest(request) => call_service!( + request, + handle_fetch_topics_request(request, ctx.metadata.clone()), + sink, + "fetch topic handler" + ), + PublicRequest::FlvTopicCompositionRequest(request) => call_service!( + request, + handle_topic_composition_request(request, ctx.metadata.clone()), + sink, + "topic metadata handler" + ), + + // Fluvio - Spus + PublicRequest::FlvCreateCustomSpusRequest(request) => call_service!( + request, + handle_create_custom_spus_request(request, &ctx), + sink, + "create custom spus handler" + ), + PublicRequest::FlvDeleteCustomSpusRequest(request) => call_service!( + request, + handle_delete_custom_spus_request(request, &ctx), + sink, + "delete custom spus handler" + ), + PublicRequest::FlvFetchSpusRequest(request) => call_service!( + request, + handle_fetch_spu_request(request, ctx.metadata.clone()), + sink, + "fetch spus handler" + ), + + PublicRequest::FlvCreateSpuGroupsRequest(request) => call_service!( + request, + handle_create_spu_groups_request(request, &ctx), + sink, + "create spu groups handler" + ), + PublicRequest::FlvDeleteSpuGroupsRequest(request) => call_service!( + request, + handle_delete_spu_groups_request(request, &ctx), + sink, + "delete spu groups handler" + ), + PublicRequest::FlvFetchSpuGroupsRequest(request) => call_service!( + request, + handle_fetch_spu_groups_request(request, &ctx), + sink, + "fetch spu groups handler" + ) + + ); + + Ok(()) + } +} + +impl KfService for PublicService { + type Context = SharedPublicContext; + type Request = PublicRequest; + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn respond(self: Arc, context: Self::Context, socket: KfSocket) -> Self::ResponseFuture { + self.handle(context, socket).boxed() + } +} diff --git a/sc-server/src/services/send_channels.rs b/sc-server/src/services/send_channels.rs new file mode 100644 index 0000000000..d1168f4424 --- /dev/null +++ b/sc-server/src/services/send_channels.rs @@ -0,0 +1,47 @@ +//! +//! # Dispatcher Send Channels +//! +//! Stores the sender part of the channel used by the Dispathers +//! +use futures::channel::mpsc::Sender; +use futures::sink::SinkExt; +use types::log_on_err; +use utils::actions::Actions; + +use crate::core::ScRequest; +use crate::core::spus::SpuAction; +use crate::core::partitions::PartitionAction; + + +/// Central action dispatcher +/// It will send action to appropriate controller. +/// For now, it will send to a central controller, but in the future +/// it will send to individual controller +#[derive(Debug, Clone)] +pub struct DispatcherSendChannels(Sender); + +impl DispatcherSendChannels { + pub fn new(sc_sender: Sender) -> Self { + Self(sc_sender) + } + + pub async fn send_msg_to_sc(&mut self, request: ScRequest) { + log_on_err!( + self.0.send(request).await, + "send Dispatch req to SC: {}" + ); + } + + + + /// send spu actions to spu controller + pub async fn send_spu_actions(&mut self,actions: Actions) { + //self.send_msg_to_sc(ScRequest::UpdateSPUs(actions)).await; + } + + + pub async fn send_partition_actions(&mut self,actions: Actions) { + // self.send_msg_to_sc(ScRequest::UpdatePartitions(actions)).await; + } + +} diff --git a/sc-server/src/tests/fixture/generator.rs b/sc-server/src/tests/fixture/generator.rs new file mode 100644 index 0000000000..40177963f0 --- /dev/null +++ b/sc-server/src/tests/fixture/generator.rs @@ -0,0 +1,219 @@ +use std::sync::Arc; + +use log::debug; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::channel; +use futures::channel::mpsc::Sender; +use futures::SinkExt; + +use error::ServerError; +use types::socket_helpers::EndPoint; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use utils::actions::Actions; + +use crate::cli::ScConfig; +use crate::core::SharedScMetadata; +use crate::create_core_services; +use crate::services::InternalApiServer; +use crate::core::ScRequest; +use crate::core::ScRequestSender; +use crate::core::HcActionSender; +use crate::hc_manager::HcAction; + +use super::mock_spu::SpuGlobalContext; +use super::mock_spu::SharedSpuContext; +use super::ScTestRunner; +use super::ScTest; +use super::SpuSpec; +use super::SharedKVStore; + +#[derive(Default)] +pub struct TestGenerator { + + base_id: i32, + base_port: u16, + total_spu: usize, // total number of spu + init_spu: usize // initial spu to start +} + + +impl TestGenerator { + + pub fn set_base_id(mut self,id: i32) -> Self { + self.base_id = id; + self + } + + pub fn set_base_port(mut self,port: u16) -> Self { + self.base_port = port; + self + } + + pub fn set_total_spu(mut self,len: usize) -> Self { + self.total_spu = len; + self + } + + pub fn set_init_spu(mut self,len: usize) -> Self { + self.init_spu = len; + self + } + + pub fn total_spu(&self) -> usize { + self.total_spu + } + + pub fn initial_spu(&self) -> usize { + self.init_spu + } + + + pub fn create_spu_spec(&self, spu_index: u16) -> SpuSpec { + + let port = spu_index + self.base_port + 2; + + SpuSpec::new(self.base_id + spu_index as i32, + EndPoint::local_end_point(port)) + } + + + pub fn sc_config(&self) -> ScConfig { + + ScConfig { + id: self.base_id, + public_endpoint: EndPoint::local_end_point(self.base_port), + private_endpoint: EndPoint::local_end_point(self.base_port + 1), + run_k8_dispatchers: false, + ..Default::default() + } + } + + + /// create mock sc server which only run internal services. + pub fn create_sc_server(&self) -> ((InternalApiServer,Receiver),ScClient) + { + + let config = self.sc_config(); + + let (private_terminator,receiver_private) = channel::(1); + let (sc_sender,sc_receiver) = channel::(100); + + let kv_store = SharedKVStore::new(sc_sender.clone()); + let (ctx,sc_request_sender,hc_sender,_dispatch_sender,private_server) = create_core_services(config.clone(),kv_store.clone(),(sc_sender,sc_receiver)); + + ( + (private_server,receiver_private), + ScClient { + config, + sc_request_sender, + private_terminator, + hc_sender, + ctx, + kv_store + }) + } + + pub fn run_spu_server(&self,spec: SpuSpec, test_runner: Arc>,receiver: Receiver) -> SharedSpuContext + where T: ScTest + Sync + Send + 'static + { + let spu_contxt = SpuGlobalContext::new_shared_context(spec); + spu_contxt.clone().run( test_runner,receiver); + spu_contxt + } + + pub fn run_server_with_index(&self,i: usize,test_runner: Arc>) -> (SharedSpuContext,Sender) + where T: ScTest + Sync + Send + 'static + { + let spu_spec = self.create_spu_spec(i as u16); + let (sender, receiver) = channel::(1); + let ctx = self.run_spu_server(spu_spec, test_runner, receiver); + (ctx,sender) + } + +} + + +/// Client representation to ScServer +pub struct ScClient{ + config: ScConfig, + ctx: SharedScMetadata, + private_terminator: Sender, + sc_request_sender: ScRequestSender, + hc_sender: HcActionSender, + kv_store: SharedKVStore +} + +impl ScClient { + + + pub fn config(&self) -> &ScConfig { + &self.config + } + + pub fn kv_store(&self) -> SharedKVStore { + self.kv_store.clone() + } + + + pub async fn terminate_private_server(&self) { + debug!("terminating sc private server"); + let mut terminate = self.private_terminator.clone(); + terminate + .send(true) + .await + .expect("sc private shutdown should work"); + } + + #[allow(dead_code)] + pub async fn send_to_internal_server<'a,R>(&'a self, req_msg: &'a RequestMessage) -> Result<(), KfSocketError> where R: Request, + { + + let end_point = &self.config().private_endpoint; + debug!( + "client: trying to connect to private endpoint: {}", + end_point + ); + let mut socket = KfSocket::connect(&end_point.addr).await?; + debug!("connected to internal endpoint {}", end_point); + let res_msg = socket.send(&req_msg).await?; + debug!("response: {:#?}", res_msg); + Ok(()) + } + + #[allow(dead_code)] + pub async fn send_to_public_server<'a,R>(&'a self, req_msg: &'a RequestMessage) -> Result, KfSocketError> + where + R: Request, + { + let end_point = &self.config().public_endpoint; + + debug!( + "client: trying to connect to public endpoint: {}", + end_point + ); + + let mut socket = KfSocket::connect(&end_point.addr).await?; + debug!("connected to public end point {:#?}", end_point); + let res_msg = socket.send(&req_msg).await?; + debug!("response: {:#?}", res_msg); + Ok(res_msg) + } + + pub async fn send_sc_request(&self,request: ScRequest) -> Result<(),ServerError> + { + let mut sender = self.sc_request_sender.clone(); + sender.send(request).await.map_err(|err| err.into()) + } + + pub async fn send_hc_action(&self,actions: Actions) -> Result<(),ServerError> + { + let mut sender = self.hc_sender.clone(); + sender.send(actions).await.map_err(|err| err.into()) + } + + +} diff --git a/sc-server/src/tests/fixture/mock_cm.rs b/sc-server/src/tests/fixture/mock_cm.rs new file mode 100644 index 0000000000..7912ee0ff3 --- /dev/null +++ b/sc-server/src/tests/fixture/mock_cm.rs @@ -0,0 +1,75 @@ +/// mock connection mananger +/// + +use std::sync::Arc; + +use log::debug; +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use error::ServerError; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +use types::SpuId; +use utils::actions::Actions; + + +use crate::conn_manager::SpuConnections; +use crate::conn_manager::ConnAction; + +// ----------------------------------- +// Data Structures +// ----------------------------------- +pub type SharedMockConnManager = Arc; + + +pub struct MockConnectionManager { + +} + +impl MockConnectionManager { + pub fn shared_conn_manager() -> Arc { + Arc::new(MockConnectionManager{}) + } +} + + + +impl SpuConnections for MockConnectionManager { + + type ResponseFuture = BoxFuture<'static, Result<(),ServerError>>; + + /// send request message to specific spu + /// this is a one way send + fn send_msg(self: Arc, _spu_id: SpuId, _req_msg: RequestMessage) -> Self::ResponseFuture + where R: Request + Send + Sync + 'static + { + async move { + Ok(()) + }.boxed() + + } + + + // ----------------------------------- + // Action Request Processing + // ----------------------------------- + + /// process connection action request + fn process_connection_request(&self, actions: Actions) { + debug!("conn actions: {:?}", actions); + } + + // ----------------------------------- + // Fromatting + // ----------------------------------- + + /// return connection information in table format + //#[cfg(test)] + fn table_fmt(&self) -> String { + String::new() + } + + +} + diff --git a/sc-server/src/tests/fixture/mock_kv.rs b/sc-server/src/tests/fixture/mock_kv.rs new file mode 100644 index 0000000000..9887207fa1 --- /dev/null +++ b/sc-server/src/tests/fixture/mock_kv.rs @@ -0,0 +1,222 @@ +use std::sync::Arc; + +use log::debug; +use futures::SinkExt; +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use types::SpuId; +use error::ServerError; +use metadata::partition::ReplicaKey; +use utils::actions::Actions; +use utils::SimpleConcurrentHashMap; +use types::log_on_err; + +use crate::core::auth_tokens::AuthTokenKV; +use crate::core::auth_tokens::AuthTokenAction; +use crate::core::auth_tokens::AuthTokenKvsAction; +use crate::core::topics::TopicAction; +use crate::core::partitions::PartitionAction; +use crate::core::partitions::PartitionKV; +use crate::core::partitions::PartitionKvsAction; +use crate::core::topics::TopicKV; +use crate::core::topics::TopicKvsAction; +use crate::core::spus::SpuAction; +use crate::core::spus::SpuKV; +use crate::core::KvMetadataService; +use crate::core::ScRequestSender; +use crate::core::ScRequest; + +/// dummy kv store, all it does is maintain in memory represnetation of kv +/// +/// +pub struct MockKVStore { + spus: SimpleConcurrentHashMap, + topics: SimpleConcurrentHashMap, + partitions: SimpleConcurrentHashMap, +} + +impl MockKVStore { + fn new() -> Self { + MockKVStore { + spus: SimpleConcurrentHashMap::new(), + topics: SimpleConcurrentHashMap::new(), + partitions: SimpleConcurrentHashMap::new(), + } + } +} + +#[derive(Clone)] +pub struct SharedKVStore { + kv: Arc, + sc_request: ScRequestSender, +} + +impl SharedKVStore { + pub fn new(sc_request: ScRequestSender) -> Self { + SharedKVStore { + kv: Arc::new(MockKVStore::new()), + sc_request, + } + } + + pub fn spus(&self) -> &SimpleConcurrentHashMap { + &self.kv.spus + } + + /// raw inserting spuys + pub fn insert_spus(&self, spus: Vec) { + let mut write_lock = self.kv.spus.write(); + for spu in spus { + write_lock.insert(spu.id(), spu); + } + } + + pub fn insert_topics(&self, topics: Vec<(String, TopicKV)>) { + let mut write_lock = self.kv.topics.write(); + for (name, topic) in topics { + write_lock.insert(name, topic); + } + } + + pub fn insert_partitions(&self, partitions: Vec<(ReplicaKey, PartitionKV)>) { + let mut write_lock = self.kv.partitions.write(); + for (key, partition) in partitions { + write_lock.insert(key, partition); + } + } + + // send all values to sc + pub async fn update_all(mut self) { + debug!("sending all metadata to controller"); + + let auth_token_actions: Actions = Actions::default(); + + let mut spu_actions: Actions = Actions::default(); + for (_, spu) in self.kv.spus.read().iter() { + let name = format!("spu-{}", spu.id()); + spu_actions.push(SpuAction::AddSpu(name, spu.clone())); + } + + let mut topic_actions: Actions = Actions::default(); + for (name, topic) in self.kv.topics.read().iter() { + topic_actions.push(TopicAction::AddTopic(name.clone(), topic.clone())); + } + + let mut partition_actions: Actions = Actions::default(); + for (key, partition) in self.kv.partitions.read().iter() { + partition_actions.push(PartitionAction::AddPartition( + key.clone(), + partition.clone(), + )); + } + + self.sc_request + .send(ScRequest::UpdateAll( + auth_token_actions, + spu_actions, + topic_actions, + partition_actions, + )) + .await + .expect("expect should work"); + } + + + async fn update_auth_token(&self, _name: String, auth_token: AuthTokenKV) -> Result<(), ServerError> { + debug!("update auth_token {:#?}", auth_token); + Ok(()) + } + + async fn add_partition(&self, _name: ReplicaKey, partition: PartitionKV) -> Result<(), ServerError> { + debug!("add partition {:#?}", partition); + //self.0.spus.insert(spu.id(),spu); + Ok(()) + } + + async fn update_partition(&self, _name: ReplicaKey, partition: PartitionKV) -> Result<(), ServerError> { + debug!("update partition {:#?}", partition); + //self.0.spus.insert(spu.id(),spu); + Ok(()) + } + + async fn update_topic(&self, _name: String, topic: TopicKV) -> Result<(), ServerError> { + debug!("update topic {:#?}", topic); + Ok(()) + } +} + +impl KvMetadataService for SharedKVStore { + type ResponseFuture = BoxFuture<'static, Result<(), ServerError>>; + + /// update kvs + /// + fn process_partition_actions(&self,partition_kvs_actions: Actions) -> Self::ResponseFuture + { + debug!( + "KVS-SND[partition]: {} actions", + partition_kvs_actions.count() + ); + + let service = self.clone(); + + async move { + + for action in partition_kvs_actions.iter() { + match action { + PartitionKvsAction::AddPartition(name, partition) => { + log_on_err!( + service + .add_partition(name.clone(), partition.clone()) + .await + ); + } + PartitionKvsAction::UpdatePartitionStatus(name, partition) => { + log_on_err!( + service + .update_partition(name.clone(), partition.clone()) + .await + ); + } + } + } + Ok(()) + + }.boxed() + + } + + /// update kvs + fn update_spu(&self, spu: SpuKV) -> Self::ResponseFuture { + debug!("updating kv spu {:#?}", spu); + let name = format!("spu-{}", spu.id()); + let mut spu_actions: Actions = Actions::default(); + match self.kv.spus.insert(spu.id(), spu.clone()) { + Some(old_spu) => spu_actions.push(SpuAction::ModSpu(name, spu, old_spu)), + None => spu_actions.push(SpuAction::AddSpu(name, spu)), + }; + let mut sc_sender = self.sc_request.clone(); + async move { + sc_sender + .send(ScRequest::UpdateSPUs(spu_actions)) + .await + .expect("send"); + Ok(()) + } + .boxed() + } + + + fn process_topic_actions(&self, _actions: Actions) -> Self::ResponseFuture { + async { + Ok(()) + }.boxed() + } + + fn process_auth_actions(&self, _actions: Actions) -> Self::ResponseFuture { + async { + Ok(()) + }.boxed() + } + +} diff --git a/sc-server/src/tests/fixture/mock_spu.rs b/sc-server/src/tests/fixture/mock_spu.rs new file mode 100644 index 0000000000..c52a6a1cff --- /dev/null +++ b/sc-server/src/tests/fixture/mock_spu.rs @@ -0,0 +1,264 @@ +use std::sync::Arc; +use std::time::Duration; +use std::io::Error as IoError; + +use log::info; +use log::debug; +use log::warn; +use log::trace; +use futures::select; +use futures::stream::StreamExt; +use futures::future::FutureExt; + +use futures::channel::mpsc::Receiver; + +use internal_api::InternalSpuApi; +use internal_api::InternalSpuRequest; +use internal_api::RegisterSpuRequest; +use internal_api::UpdateSpuRequest; +use internal_api::UpdateReplicaRequest; +use internal_api::messages::SpuContent; +use kf_protocol::api::RequestMessage; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use types::socket_helpers::EndPoint; +use types::SpuId; +use utils::SimpleConcurrentHashMap; + +use future_helper::spawn; +use future_helper::sleep; + + +use super::ScTestRunner; +use super::ScTest; + +#[derive(Debug, Clone)] +pub struct SpuSpec { + pub id: SpuId, + pub end_point: EndPoint, +} + +impl SpuSpec { + pub fn new(id: SpuId, end_point: EndPoint) -> Self { + Self { id, end_point } + } +} + + +pub struct SpuGlobalContext { + pub spec: SpuSpec, + pub spus: SimpleConcurrentHashMap +} + +impl SpuGlobalContext { + pub fn spec(&self) -> &SpuSpec { + &self.spec + } +} + +pub type SharedSpuContext = Arc; + +impl SpuGlobalContext { + pub fn new_shared_context(spec: SpuSpec) -> SharedSpuContext { + Arc::new(SpuGlobalContext { + spec, + spus: SimpleConcurrentHashMap::new() + }) + } + + pub fn id(&self) -> SpuId { + self.spec.id + } + + pub fn run( + self: Arc, + test_runner: Arc>, + receiver: Receiver, + ) where + T: ScTest + Sync + Send + 'static, + { + info!( + "starting Mock SPU Server:{} at: {:#?}", + self.spec.id, self.spec.end_point + ); + + MockSpuController::run(self.clone(), test_runner, receiver); + + // KfApiServer::new(addr, self.clone(), MockInternalService::new(test_runner,self.clone())) + } +} + +/// Spu controller +pub struct MockSpuController { + receiver: Receiver, + ctx: SharedSpuContext, + test_runner: Arc>, +} + +impl MockSpuController +where + T: ScTest + Sync + Send + 'static, +{ + /// start the controller with ctx and receiver + pub fn run(ctx: SharedSpuContext, test_runner: Arc>, receiver: Receiver) { + let controller = Self { + ctx, + receiver, + test_runner, + }; + + spawn(controller.inner_run()); + } + + async fn inner_run(mut self) -> Result<(), ()> { + debug!("Mock spu: waiting 10ms to spin up"); + sleep(Duration::from_millis(10)).await.expect("panic"); + info!("starting SPU Controller"); + + loop { + if let Some(socket) = self.create_socket_to_sc().await { + trace!("established connection to sc for spu: {}", self.ctx.id()); + match self.stream_loop(socket).await { + Ok(_) => break, + Err(err) => warn!("error, connecting to sc: {:#?}", err), + } + + // 1 seconds is heuratic value, may change in the future or could be dynamic + // depends on backoff algorithm + sleep(Duration::from_millis(1000)) + .await + .expect("waiting 5 seconds for each loop"); + } + } + + Ok(()) + } + + /// process api stream from socket + async fn stream_loop(&mut self, mut socket: KfSocket) -> Result<(), KfSocketError> { + self.send_spu_registeration(&mut socket).await?; + + let (mut _sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + loop { + select! { + _ = self.receiver.next() => { + info!("spu: received termination msg"); + break; + }, + api_msg = api_stream.next().fuse() => { + if let Some(msg) = api_msg { + if let Ok(req_message) = msg { + log::trace!("received request: {:#?}",req_message); + match req_message { + InternalSpuRequest::UpdateSpuRequest(request) => { + handle_spu_update_request(request, self.ctx.clone()).await.expect("spu handl should work"); + }, + InternalSpuRequest::UpdateReplicaRequest(request) => { + handle_update_replica_request(request, self.ctx.clone()).await.expect("replica request"); + } + } + + } else { + log::trace!("no content, end of connection {:#?}", msg); + break; + } + + } else { + log::trace!("client connect terminated"); + break; + } + } + + } + } + + info!("spu terminated"); + Ok(()) + + } + + async fn send_spu_registeration<'a>( + &'a self, + socket: &'a mut KfSocket, + ) -> Result<(), KfSocketError> { + let spu_id = self.ctx.id(); + debug!("sending spu registeration: {}", spu_id); + let mut message = RequestMessage::new_request(RegisterSpuRequest::new(spu_id)); + message + .get_mut_header() + .set_client_id(format!("spu: {}", spu_id)); + + let _response = socket.send(&message).await?; + debug!("received spu registeration: {}", spu_id); + Ok(()) + } + + /// connect to sc if can't connect try until we succeed + /// or if we received termination message + async fn create_socket_to_sc(&mut self) -> Option { + let spu_id = self.ctx.id(); + let sc_config = self.test_runner.test().env_configuration().sc_config(); + let addr = sc_config.private_endpoint.addr; + let wait_interval = 10; + loop { + trace!( + "trying to create socket to sc: {:#?} for spu: {}", + addr, + spu_id + ); + let connect_future = KfSocket::fusable_connect(&addr); + + select! { + socket_res = connect_future.fuse() => { + match socket_res { + Ok(socket) => { + debug!("connected to sc for spu: {}",spu_id); + return Some(socket) + } + Err(err) => warn!("error connecting to sc: {}",err) + } + + trace!("sleeping {} ms to connect to sc: {}",wait_interval,spu_id); + sleep(Duration::from_millis(wait_interval as u64)).await.expect("sleep should not fail"); + }, + _ = self.receiver.next() => { + info!("termination message received"); + return None + } + } + } + } +} + +async fn handle_spu_update_request( + req_msg: RequestMessage, + ctx: SharedSpuContext, +) -> Result<(), IoError> { + let (_header, request) = req_msg.get_header_request(); + + let req = request.content(); + + debug!("spu update request: {:#?}", req); + assert_eq!(req.target_spu, ctx.id()); + + for msg in req.content.spus { + let mut spu_lock = ctx.spus.write(); + let spu_content = msg.content; + spu_lock.insert(spu_content.name.clone(),spu_content); + } + Ok(()) +} + +async fn handle_update_replica_request( + req_msg: RequestMessage, + ctx: SharedSpuContext, +) -> Result<(), IoError> { + let (_header, request) = req_msg.get_header_request(); + let req = request.decode_request(); + debug!("spu update replica request: {:#?}", req); + assert_eq!(req.target_spu, ctx.id()); + + Ok(()) +} diff --git a/sc-server/src/tests/fixture/mod.rs b/sc-server/src/tests/fixture/mod.rs new file mode 100644 index 0000000000..a88832085e --- /dev/null +++ b/sc-server/src/tests/fixture/mod.rs @@ -0,0 +1,35 @@ + mod generator; + mod test_runner; + mod mock_spu; + mod mock_kv; + mod mock_cm; + +pub use generator::TestGenerator; +pub use generator::ScClient; +pub use test_runner::ScTestRunner; +pub use mock_spu::SharedSpuContext; +pub use mock_spu::SpuSpec; +pub use mock_kv::SharedKVStore; +pub use mock_kv::MockKVStore; +pub use mock_cm::MockConnectionManager; + +use std::sync::Arc; + +use futures::Future; + +use kf_socket::KfSocketError; +use types::SpuId; + +/// Customize System Test +pub trait ScTest: Sized { + + type ResponseFuture: Send + Future>; + + /// environment configuration + fn env_configuration(&self) -> TestGenerator; + + fn topics(&self) -> Vec<(String,Vec>)> ; + + /// main entry point + fn main_test(&self,runner: Arc>) -> Self::ResponseFuture; +} diff --git a/sc-server/src/tests/fixture/test_runner.rs b/sc-server/src/tests/fixture/test_runner.rs new file mode 100644 index 0000000000..836cf1f015 --- /dev/null +++ b/sc-server/src/tests/fixture/test_runner.rs @@ -0,0 +1,193 @@ +use std::time::Duration; +use std::sync::Arc; + +use log::debug; +use futures::SinkExt; +use futures::future::join; +use futures::channel::mpsc::Sender; + +use future_helper::sleep; +use kf_socket::KfSocketError; +use types::SpuId; +use metadata::topic::TopicSpec; +use metadata::topic::TopicStatus; +use metadata::topic::TopicResolution; +use metadata::partition::ReplicaKey; +use utils::SimpleConcurrentHashMap; + +use crate::core::common::test_fixtures::create_spu; +use crate::core::spus::SpuKV; +use crate::core::topics::TopicKV; +use crate::core::partitions::PartitionKV; + +use super::ScTest; +use super::SharedSpuContext; +use super::SpuSpec; +use super::ScClient; +use super::TestGenerator; + +pub struct SpuRunContext { + pub sender: Sender, + pub ctx: SharedSpuContext, +} +pub struct ScTestRunner { + client_id: String, + sc_server: ScClient, + spu_ctxs: SimpleConcurrentHashMap, + test: T, + generator: TestGenerator, +} + +impl ScTestRunner +where + T: ScTest + Send + Sync + 'static, +{ + pub async fn run(client_id: String, test: T) -> Result<(), KfSocketError> { + debug!("starting sc test harness"); + let generator = test.env_configuration(); + + let ((private_server, receiver_private), sc_server) = generator.create_sc_server(); + let runner = ScTestRunner { + client_id, + test, + sc_server, + generator, + spu_ctxs: SimpleConcurrentHashMap::new(), + }; + + let arc_runner = Arc::new(runner); + + let mut spu_ctx = vec![]; + + let generator = arc_runner.generator(); + debug!("starting init spu servers: {}", generator.initial_spu()); + for i in 0..generator.initial_spu() { + let (ctx, sender) = generator.run_server_with_index(i, arc_runner.clone()); + spu_ctx.push((ctx.id(), SpuRunContext { sender, ctx })); + } + arc_runner.set_spu_ctx(spu_ctx); + + join( + arc_runner.run_test(), + private_server.run_shutdown(receiver_private), + ) + .await; + Ok(()) + } + + async fn run_test(self: Arc) { + // wait until controller start u + self.send_initial_metadata_to_sc_controller().await; + debug!("starting main test: waiting 10 ms"); + sleep(Duration::from_millis(10)).await.expect("panic"); + self.test() + .main_test(self.clone()) + .await + .expect("test should run"); + self.terminate_server().await; + } + + pub fn test(&self) -> &T { + &self.test + } + + pub fn generator(&self) -> &TestGenerator { + &self.generator + } + + pub fn spu_ctxs(&self) -> &SimpleConcurrentHashMap { + &self.spu_ctxs + } + + pub fn sc_client(&self) -> &ScClient { + &self.sc_server + } + + // terminating server + async fn terminate_server(&self) { + self.sc_server.terminate_private_server().await; + let sender_ctx = self.spu_senders(); + debug!("start terminating mock spu servers: {}", sender_ctx.len()); + for spu in sender_ctx { + let (spec, mut sender) = spu; + debug!("terminating mock spu server: {}", spec.id); + sender.send(true).await.expect("spu shutdown should work"); + } + } + + fn spu_senders(&self) -> Vec<(SpuSpec, Sender)> { + let mut senders = vec![]; + let lock = self.spu_ctxs.read(); + for (_, ctx) in lock.iter() { + senders.push((ctx.ctx.spec().clone(), ctx.sender.clone())); + } + senders + } + + fn set_spu_ctx(&self, ctxs: Vec<(SpuId, SpuRunContext)>) { + let mut lock = self.spu_ctxs.write(); + for (id, ctx) in ctxs { + lock.insert(id, ctx); + } + } + + fn spu_specs(&self) -> Vec { + let mut sc_specs = vec![]; + let ctx_read_lock = self.spu_ctxs.read(); + for (_, spu_ctx) in ctx_read_lock.iter() { + let spu_spec = spu_ctx.ctx.spec(); + let sc_spu_spec = create_spu(spu_spec.id, "123556", false, None); + sc_specs.push(sc_spu_spec); + } + sc_specs + } + + pub async fn send_initial_metadata_to_sc_controller(&self) { + debug!("sending metadata to sc: waiting 5ms to spin up tests"); + sleep(Duration::from_millis(5)).await.expect("panic"); + debug!("populating store value with initial "); + // populate spu + let kv_store = self.sc_server.kv_store(); + + let mut spu_specs = vec![]; + debug!("metadata spu count: {}", self.generator().total_spu()); + for i in 0..self.generator().total_spu() { + let spu_spec = self.generator().create_spu_spec(i as u16); + let spu_id = spu_spec.id; + debug!("initial spu: {}", spu_id); + let sc_spu_spec = create_spu(spu_id, "123556", false, None); + spu_specs.push(sc_spu_spec); + } + kv_store.insert_spus(spu_specs); + + let mut topics = vec![]; + let mut partitions = vec![]; + + for (topic, replica_map) in self.test.topics() { + let status = TopicStatus::new(TopicResolution::Ok, replica_map.clone(), "".to_owned()); + + let replica_len = if replica_map.len() > 0 { + replica_map[0].len() as i32 + } else { + 0 + }; + topics.push(( + topic.clone(), + TopicKV::new(TopicSpec::new_computed(1, replica_len, None), status), + )); + + for replicas in replica_map { + partitions.push(( + ReplicaKey::new(topic.clone(), 0), + PartitionKV::with_replicas(0, replicas), + )); + } + } + + kv_store.insert_topics(topics); + kv_store.insert_partitions(partitions); + + // send + kv_store.update_all().await; + } +} diff --git a/sc-server/src/tests/mod.rs b/sc-server/src/tests/mod.rs new file mode 100644 index 0000000000..3446da7cdc --- /dev/null +++ b/sc-server/src/tests/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod fixture; +mod suite; \ No newline at end of file diff --git a/sc-server/src/tests/suite/conn_test.rs b/sc-server/src/tests/suite/conn_test.rs new file mode 100644 index 0000000000..b0541d2e68 --- /dev/null +++ b/sc-server/src/tests/suite/conn_test.rs @@ -0,0 +1,95 @@ +// test internal services +use std::sync::Arc; +use std::time::Duration; + +use log::debug; +use futures::future::BoxFuture; +use futures::future::FutureExt; +use futures::SinkExt; + +use future_helper::test_async; +use future_helper::sleep; +use kf_socket::KfSocketError; +use types::SpuId; +use metadata::spu::SpuResolution; + +use crate::tests::fixture::ScTestRunner; +use crate::tests::fixture::ScTest; +use crate::tests::fixture::TestGenerator; + +const BASE_ID: i32 = 7100; + + +struct SimpleInternalTest { + +} + + +impl ScTest for SimpleInternalTest { + + type ResponseFuture = BoxFuture<'static, Result<(),KfSocketError>>; + + fn env_configuration(&self) -> TestGenerator { + TestGenerator::default() + .set_base_id(BASE_ID) + .set_base_port(BASE_ID as u16) + .set_total_spu(1) + } + + + fn topics(&self) -> Vec<(String,Vec>)>{ + vec![("test".to_owned(),vec![vec![BASE_ID]])] + } + + /// spin up spu and down. + fn main_test(&self,runner: Arc>) -> Self::ResponseFuture { + + let generator = runner.generator(); + let (ctx,mut spu0_terminator) = generator.run_server_with_index(0,runner.clone()); // 7000 + + async move { + sleep(Duration::from_millis(100)).await.expect("panic"); + + debug!("spu server: {} is up, let's make sure sc and spu done it's work",BASE_ID); + + let sc_server = runner.sc_client(); + let kv_store = sc_server.kv_store(); + { + let lock = kv_store.spus().read(); + let spu = lock.get(&BASE_ID).expect("spu"); + assert_eq!(spu.status.resolution,SpuResolution::Online); + let spu_lock = ctx.spus.read(); + let name = format!("spu-{}",BASE_ID); + spu_lock.get(&name).expect("spu content"); + } + + + // shutdown internal server + debug!("shutting down spu {}",BASE_ID); + spu0_terminator.send(true).await.expect("shutdown spu 0"); + debug!("ready to test down state"); + sleep(Duration::from_millis(20)).await.expect("panic"); + { + let lock = kv_store.spus().read(); + let spu = lock.get(&BASE_ID).expect("spu"); + assert_eq!(spu.status.resolution,SpuResolution::Offline); + let spu_lock = ctx.spus.read(); + let name = format!("spu-{}",BASE_ID); + spu_lock.get(&name).expect("spu content"); + } + + Ok(()) + }.boxed() + } + +} + + +/// test spu online and offline +#[test_async] +async fn connection_test() -> Result<(), KfSocketError> { + + let test = SimpleInternalTest{}; + ScTestRunner::run("connection test".to_owned(),test).await.expect("test runner should not failer"); + Ok(()) +} diff --git a/sc-server/src/tests/suite/mod.rs b/sc-server/src/tests/suite/mod.rs new file mode 100644 index 0000000000..446f24eb99 --- /dev/null +++ b/sc-server/src/tests/suite/mod.rs @@ -0,0 +1,2 @@ +mod conn_test; +mod partition_test; \ No newline at end of file diff --git a/sc-server/src/tests/suite/partition_test.rs b/sc-server/src/tests/suite/partition_test.rs new file mode 100644 index 0000000000..09747a0196 --- /dev/null +++ b/sc-server/src/tests/suite/partition_test.rs @@ -0,0 +1,65 @@ +// test internal services +use std::sync::Arc; +use std::time::Duration; + +use log::debug; +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use future_helper::test_async; +use future_helper::sleep; +use kf_socket::KfSocketError; +use types::SpuId; + +use crate::tests::fixture::ScTestRunner; +use crate::tests::fixture::ScTest; +use crate::tests::fixture::TestGenerator; + + +struct ReplicationTest { + +} + + +impl ScTest for ReplicationTest { + + type ResponseFuture = BoxFuture<'static, Result<(),KfSocketError>>; + + fn env_configuration(&self) -> TestGenerator { + TestGenerator::default() + .set_base_id(7000) + .set_base_port(7000) + .set_total_spu(2) + .set_init_spu(2) + } + + + fn topics(&self) -> Vec<(String,Vec>)>{ + vec![("test".to_owned(),vec![vec![7000,7001]])] + } + + /// spin up spu and down. + fn main_test(&self,_runner: Arc>) -> Self::ResponseFuture { + + + async move { + sleep(Duration::from_millis(100)).await.expect("panic"); + + debug!("spu server: 7000 is up, let's make sure sc and spu done it's work"); + + + Ok(()) + }.boxed() + } + +} + + +/// test spu online and offline +#[test_async] +async fn replication_test() -> Result<(), KfSocketError> { + + let test = ReplicationTest{}; + ScTestRunner::run("replication test".to_owned(),test).await.expect("test runner should not failer"); + Ok(()) +} diff --git a/sc-server/test-data/config/sc_invalid.toml b/sc-server/test-data/config/sc_invalid.toml new file mode 100644 index 0000000000..7fd6e81011 --- /dev/null +++ b/sc-server/test-data/config/sc_invalid.toml @@ -0,0 +1,5 @@ +unknown = "value" + + + + diff --git a/sc-server/test-data/config/sc_server.toml b/sc-server/test-data/config/sc_server.toml new file mode 100644 index 0000000000..dcdc7c33b9 --- /dev/null +++ b/sc-server/test-data/config/sc_server.toml @@ -0,0 +1,12 @@ +version = "1.0" + +[sc] +id = 500 + +[bind_public] +host = "127.0.0.1" +port = 9999 + + + + diff --git a/spu-server/Cargo.toml b/spu-server/Cargo.toml new file mode 100644 index 0000000000..72a18a6476 --- /dev/null +++ b/spu-server/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "spu-server" +edition = "2018" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + + +[[bin]] +name = "spu-server" +path = "src/main.rs" +doc = false + +[dependencies] +log = "0.4.6" +bytes = "0.4.12" +structopt = "0.2.14" +toml = "0.5.0" +futures-preview = { version = "0.3.0-alpha.17" } +serde_yaml = "0.8.8" +serde = { version ="1.0.82", features = ['derive'] } +chrono = { version = "0.4.6", features = ["serde"] } +chashmap = "2.2.0" +pin-utils = "0.1.0-alpha.4" +regex = "1.1.6" +future-helper = { path = "../future-helper" } +kf-protocol = { path = "../kf-protocol"} +kf-socket = {path = "../kf-socket"} +kf-service = { path = "../kf-service"} +k8-client = { path = "../k8-client"} +internal-api = { path = "../api/internal-api"} +spu-api = { path = "../api/spu-api"} +types = { path= "../types"} +utils = { path= "../utils"} +metadata = { path = "../metadata" } +future-aio = { path = "../future-aio"} +storage = { path = "../storage"} + +[dev-dependencies] +future-helper = { path = "../future-helper", features = ["fixture"] } +utils = { path = "../utils", features=["fixture"]} \ No newline at end of file diff --git a/spu-server/Makefile b/spu-server/Makefile new file mode 100644 index 0000000000..7f2f88c29e --- /dev/null +++ b/spu-server/Makefile @@ -0,0 +1,2 @@ +start: + RUST_BACKTRACE=1 cargo run -- --port 9000 diff --git a/spu-server/README.md b/spu-server/README.md new file mode 100644 index 0000000000..bfa168d6b2 --- /dev/null +++ b/spu-server/README.md @@ -0,0 +1,11 @@ +## Running SPU in development mode + +To run SPU with default parameters. +``` +./target/debug/spu-server +``` + +Run server with debug flag +``` +RUST_LOG=debug ./target/debug/spu-server +``` \ No newline at end of file diff --git a/spu-server/rust-toolchain b/spu-server/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/spu-server/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/spu-server/src/config/cli.rs b/spu-server/src/config/cli.rs new file mode 100644 index 0000000000..134e6d4016 --- /dev/null +++ b/spu-server/src/config/cli.rs @@ -0,0 +1,70 @@ +//! +//! # CLI for Streaming Processing Unit (SPU) +//! +//! Command line interface to provision SPU id and configure various +//! system parameters. +//! +use std::io::Error as IoError; +use std::process; +use std::path::PathBuf; + +use log::trace; +use structopt::StructOpt; + +use types::print_cli_err; + +use super::{SpuConfig, SpuConfigFile}; + +/// cli options +#[derive(Debug, Default, StructOpt)] +#[structopt(name = "spu-server", author = "", about = "Streaming Processing Unit")] +pub struct SpuOpt { + /// SPU unique identifier + #[structopt(short = "i", long = "id", value_name = "integer")] + pub id: Option, + + + + #[structopt(short = "p", long = "public-server", value_name = "host:port")] + /// Spu server for external communication + pub public_server: Option, + + #[structopt(short = "v", long = "private-server", value_name = "host:port")] + /// Spu server for internal cluster communication + pub private_server: Option, + + /// Address of the SC Server + #[structopt(short = "c", long = "sc-controller", value_name = "host:port")] + pub sc_server: Option, + + #[structopt(short = "f", long = "conf", value_name = "file")] + /// Configuration file + pub config_file: Option, +} + +/// Run SPU Cli and return SPU configuration. Errors are consider fatal +/// and the program exits. +pub fn process_spu_cli_or_exit() -> SpuConfig { + match get_spu_config() { + Err(err) => { + print_cli_err!(err); + process::exit(0x0100); + } + Ok(config) => config, + } +} + +/// Validate SPU (Streaming Processing Unit) cli inputs and generate SpuConfig +pub fn get_spu_config() -> Result { + let cfg = SpuOpt::from_args(); + + // generate config from file from user-file or default (if exists) + let spu_config_file = match &cfg.config_file { + Some(cfg_file) => Some(SpuConfigFile::from_file(&cfg_file)?), + None => SpuConfigFile::from_default_file()?, + }; + + trace!("spu cli: {:#?}, file: {:#?}",cfg,spu_config_file); + // send config file and cli parameters to generate final config. + SpuConfig::new_from_all(cfg, spu_config_file) +} diff --git a/spu-server/src/config/mod.rs b/spu-server/src/config/mod.rs new file mode 100644 index 0000000000..74cd2a20a4 --- /dev/null +++ b/spu-server/src/config/mod.rs @@ -0,0 +1,16 @@ +mod cli; +mod spu_config; +mod spu_config_file; + + +pub use self::cli::SpuOpt; +pub use self::cli::process_spu_cli_or_exit; + +pub use self::spu_config::SpuConfig; +pub use self::spu_config::SpuType; +pub use self::spu_config::Endpoint; +pub use self::spu_config::Log; + +pub use self::spu_config_file::SpuConfigFile; + + diff --git a/spu-server/src/config/spu_config.rs b/spu-server/src/config/spu_config.rs new file mode 100644 index 0000000000..a6bbaad6f9 --- /dev/null +++ b/spu-server/src/config/spu_config.rs @@ -0,0 +1,803 @@ +//! +//! # Streaming Processing Unit Configurations +//! +//! Stores configuration parameter used by Streaming Processing Unit module. +//! Parameters looked-up in following sequence (first value wins): +//! 1) cli parameters +//! 2) environment variables +//! 3) custom configuration or default configuration (from file) +//! +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::net::SocketAddr; +use std::env; +use std::path::PathBuf; + +use log::debug; +use log::error; + +// defaults values +use types::defaults::{SPU_PUBLIC_HOSTNAME, SPU_PUBLIC_PORT}; +use types::defaults::{SPU_PRIVATE_HOSTNAME, SPU_PRIVATE_PORT}; +use types::defaults::{SC_HOSTNAME, SC_PRIVATE_PORT}; +use types::defaults::SPU_RETRY_SC_TIMEOUT_MS; +use types::defaults::SPU_MIN_IN_SYNC_REPLICAS; +use types::defaults::SPU_LOG_BASE_DIR; +use types::defaults::SPU_LOG_SIZE; +use types::defaults::SPU_LOG_INDEX_MAX_BYTES; +use types::defaults::SPU_LOG_INDEX_MAX_INTERVAL_BYTES; +use types::defaults::SPU_LOG_SEGMENT_MAX_BYTES; + +// environment variables +use types::defaults::FLV_SPU_ID; +use types::defaults::FLV_SPU_TYPE; +use types::defaults::FLV_RACK; +use types::defaults::{FLV_SPU_PUBLIC_HOST, FLV_SPU_PUBLIC_PORT}; +use types::defaults::{FLV_SPU_PRIVATE_HOST, FLV_SPU_PRIVATE_PORT}; +use types::defaults::{FLV_SC_PRIVATE_HOST, FLV_SC_PRIVATE_PORT}; +use types::defaults::FLV_SC_RETRY_TIMEOUT_MS; +use types::defaults::FLV_REPLICA_IN_SYNC_REPLICA_MIN; +use types::defaults::FLV_LOG_BASE_DIR; +use types::defaults::FLV_LOG_SIZE; +use types::defaults::FLV_LOG_INDEX_MAX_BYTES; +use types::defaults::FLV_LOG_INDEX_MAX_INTERVAL_BYTES; +use types::defaults::FLV_LOG_SEGMENT_MAX_BYTES; + +use types::SpuId; +use types::socket_helpers::ServerAddress; +use types::socket_helpers::server_to_socket_addr; +use storage::ConfigOption; + +use super::{SpuOpt, SpuConfigFile}; + +#[derive(Debug, PartialEq, Clone)] +pub enum SpuType { + Custom, + Managed, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct Endpoint { + pub socket_addr: SocketAddr, + pub server_addr: ServerAddress, +} + + +#[derive(Debug, PartialEq, Clone)] +pub struct Replication { + pub min_in_sync_replicas: u16, +} + +#[derive(Debug, PartialEq, Clone)] +pub struct Log { + pub base_dir: PathBuf, + pub size: String, + pub index_max_bytes: u32, + pub index_max_interval_bytes: u32, + pub segment_max_bytes: u32, +} + +impl Log { + + /// create new storage config + pub fn new_config(&self) -> ConfigOption { + ConfigOption::new( + self.base_dir.clone(), + self.index_max_bytes, + self.index_max_interval_bytes, + self.segment_max_bytes + ) + } +} + + + +/// streaming processing unit configuration file +#[derive(Debug, PartialEq, Clone)] +pub struct SpuConfig { + pub id: SpuId, + pub spu_type: SpuType, + pub rack: Option, + + // spu (local server) points + pub public_endpoint: Endpoint, + pub private_endpoint: Endpoint, + + // sc (remote server) endpoint + pub sc_endpoint: ServerAddress, + pub sc_retry_ms: u16, + + // parameters + pub replication: Replication, + pub log: Log +} + + + +impl SpuConfig { + /// Creates an SPU Config object by merging object in the following order of precedence: + /// * cli configuration + /// * environment variable + /// * config file + pub fn new_from_all( + cli_cfg: SpuOpt, + file_cfg: Option, + ) -> Result { + let spu_id = SpuConfig::make_spu_id(&cli_cfg, &file_cfg)?; + let spu_type = SpuConfig::make_spu_type()?; + let rack = SpuConfig::make_rack(&file_cfg)?; + let public_endpoint = SpuConfig::make_public_endpoint(&cli_cfg, &file_cfg)?; + let private_endpoint = SpuConfig::make_private_endpoint(&cli_cfg, &file_cfg)?; + let sc_endpoint = SpuConfig::make_sc_endpoint(&cli_cfg, &file_cfg)?; + let sc_retry_ms = SpuConfig::make_sc_retry_ms(&file_cfg)?; + let min_in_sync_replicas = SpuConfig::make_min_in_sync_replicas(&file_cfg)?; + let log_base_dir = SpuConfig::make_log_base_dir(&file_cfg)?; + let log_size = SpuConfig::make_log_size(&file_cfg)?; + let log_index_max_bytes = SpuConfig::make_log_index_max_bytes(&file_cfg)?; + let log_index_max_interval_bytes = SpuConfig::make_log_index_max_interval_bytes(&file_cfg)?; + let log_segment_max_bytes = SpuConfig::make_log_segment_max_bytes(&file_cfg)?; + + Ok(SpuConfig { + id: spu_id, + spu_type: spu_type, + rack: rack, + public_endpoint: public_endpoint, + private_endpoint: private_endpoint, + sc_endpoint: sc_endpoint, + sc_retry_ms: sc_retry_ms, + replication: Replication { + min_in_sync_replicas: min_in_sync_replicas, + }, + log: Log { + base_dir: log_base_dir, + size: log_size, + index_max_bytes: log_index_max_bytes, + index_max_interval_bytes: log_index_max_interval_bytes, + segment_max_bytes: log_segment_max_bytes, + } + }) + } + + + /// Generate spu-id by combining all config elements. Returns error on failure. + fn make_spu_id(cli_cfg: &SpuOpt, _file_cfg: &Option) -> Result { + // 1) check cli + let spu_id = cli_cfg.id; + + // 2) environment variable (optional field, ignore errors) + if let Some(some_id) = spu_id { + debug!("spu id is supplied by config: {}",some_id); + Ok(some_id) + } else { + debug!("no spu is supplied, looking up env"); + if let Ok(id_str) = env::var(FLV_SPU_ID) { + debug!("found spu id from env: {}",id_str); + let id = id_str.parse().map_err(|err| { + IoError::new(ErrorKind::InvalidInput, format!("spu-id: {}", err)) + })?; + Ok(id) + } else { + + // try get special env SPU which has form of {}-{id} when in as in-cluster config + if let Ok(spu_name) = env::var("SPU_INDEX") { + debug!("extracting SPU from: {}",spu_name); + let spu_tokens: Vec<&str> = spu_name.split('-').collect(); + if spu_tokens.len() < 2 { + error!("SPU is invalid format. bailing out"); + } else { + let spu_token = spu_tokens[1]; + let id: SpuId = spu_token.parse().expect("spu id should be integer"); + debug!("found SPU INDEX ID: {}",id); + + // now we get SPU_MIN which tells min + let spu_min_var = env::var("SPU_MIN").unwrap_or("0".to_owned()); + debug!("found SPU MIN ID: {}",spu_min_var); + let base_id: SpuId = spu_min_var.parse().expect("spu min should be integer"); + return Ok(id + base_id) + } + } else { + error!("no spu founded from env. this is bad"); + } + + for (key, value) in env::vars() { + debug!("{}: {}", key, value); + } + std::process::exit(0x0100); + } + } + + } + + + + /// Generate spu-type base on the presence of the environment variable. Returns error on failure. + fn make_spu_type() -> Result { + // look-up environment variable + if let Ok(spu_type) = env::var(FLV_SPU_TYPE) { + // match to appropriate values + match spu_type.as_str() { + "Custom" => Ok(SpuType::Custom), + "Managed" => Ok(SpuType::Managed), + _ => Err(IoError::new( + ErrorKind::InvalidInput, + format!( + "spu-type: expected 'Managed' or 'Custom', found '{}'", + spu_type + ), + )), + } + } else { + // default to Custom, if environment variable is not present + Ok(SpuType::Custom) + } + } + + /// Generate rack by combining all config elements. Returns error on failure. + fn make_rack(file_cfg: &Option) -> Result, IoError> { + let mut rack = None; + + // 1) environment variable (optional field, ignore errors) + if rack.is_none() { + if let Ok(rack_str) = env::var(FLV_RACK) { + rack = Some(rack_str); + } + } + + // 2) config file + if rack.is_none() && file_cfg.is_some() { + rack = file_cfg.as_ref().unwrap().rack(); + } + + // return result + Ok(rack) + } + + /// Generate public_endpoint by combining all config elements. Returns error on failure. + fn make_public_endpoint( + cli_cfg: &SpuOpt, + file_cfg: &Option, + ) -> Result { + // 1) check cli and convert to server address + let mut public_ep = server_str_to_server_addr(&cli_cfg.public_server)?; + + // 2) environment variable (optional field, ignore errors) + if public_ep.is_none() { + if let Ok(host) = env::var(FLV_SPU_PUBLIC_HOST) { + if let Ok(port_str) = env::var(FLV_SPU_PUBLIC_PORT) { + let port: u16 = port_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid env port: {}", err), + ) + })?; + public_ep = Some(ServerAddress { host, port }); + } + } + } + + // 3) config file + if public_ep.is_none() && file_cfg.is_some() { + public_ep = file_cfg.as_ref().unwrap().public_endpoint(); + } + + // 4) use default + if public_ep.is_none() { + let host = SPU_PUBLIC_HOSTNAME.to_owned(); + let port = SPU_PUBLIC_PORT; + public_ep = Some(ServerAddress { host, port }); + } + + // 5) create endpoint + let ep = Endpoint::new(&public_ep.unwrap())?; + + // return result + Ok(ep) + } + + /// Generate private_endpoint by combining all config elements. Returns error on failure. + fn make_private_endpoint( + cli_cfg: &SpuOpt, + file_cfg: &Option, + ) -> Result { + // 1) check cli and convert to server address + let mut private_ep = server_str_to_server_addr(&cli_cfg.private_server)?; + + // 2) environment variable (optional field, ignore errors) + if private_ep.is_none() { + if let Ok(host) = env::var(FLV_SPU_PRIVATE_HOST) { + if let Ok(port_str) = env::var(FLV_SPU_PRIVATE_PORT) { + let port: u16 = port_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid env port: {}", err), + ) + })?; + private_ep = Some(ServerAddress { host, port }); + } + } + } + + // 3) config file + if private_ep.is_none() && file_cfg.is_some() { + private_ep = file_cfg.as_ref().unwrap().private_endpoint(); + } + + // 4) use default + if private_ep.is_none() { + let host = SPU_PRIVATE_HOSTNAME.to_owned(); + let port = SPU_PRIVATE_PORT; + private_ep = Some(ServerAddress { host, port }); + } + + // 5) create endpoint + let ep = Endpoint::new(&private_ep.unwrap())?; + + // return result + Ok(ep) + } + + /// Generate sc_endpoint by combining all config elements. Returns error on failure. + fn make_sc_endpoint( + cli_cfg: &SpuOpt, + file_cfg: &Option, + ) -> Result { + // 1) check cli and convert to server address + let mut sc_ep = server_str_to_server_addr(&cli_cfg.sc_server)?; + + // 2) environment variable (optional field, ignore errors) + if sc_ep.is_none() { + debug!("no sc endpoint is supplied. checking env"); + if let Ok(host) = env::var(FLV_SC_PRIVATE_HOST) { + debug!("found sc addr from env var: {}",host); + if let Ok(port_str) = env::var(FLV_SC_PRIVATE_PORT) { + let port: u16 = port_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("invalid env port: {}", err), + ) + })?; + return Ok(ServerAddress { host, port }); + } else { + debug!("no port supplied with env var, using default port: {}",SC_PRIVATE_PORT); + return Ok(ServerAddress { host, port: SC_PRIVATE_PORT}) + } + } else { + debug!("no sc endpoint from env var"); + } + } + + // 3) config file + if sc_ep.is_none() && file_cfg.is_some() { + debug!("try reading sc from file"); + sc_ep = file_cfg.as_ref().unwrap().controller_endpoint(); + } + + // 4) use default + match sc_ep { + Some(addr) => Ok(addr), + None => { + debug!("no sc endpoint from any config source, default to localhost"); + let host = SC_HOSTNAME.to_owned(); + let port = SC_PRIVATE_PORT; + Ok(ServerAddress { host, port }) + } + } + } + + /// Generate retry SC connection by combining all config elements. Returns error on failure. + fn make_sc_retry_ms(file_cfg: &Option) -> Result { + let mut sc_retry_ms = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(retry_ms_str) = env::var(FLV_SC_RETRY_TIMEOUT_MS) { + let retry_ms = retry_ms_str.parse().map_err(|err| { + IoError::new(ErrorKind::InvalidInput, format!("sc-retry-ms: {}", err)) + })?; + sc_retry_ms = Some(retry_ms); + } + + // 2) config file + if sc_retry_ms.is_none() && file_cfg.is_some() { + sc_retry_ms = file_cfg.as_ref().unwrap().sc_retry_ms(); + } + + // 3) unwrap or use default + Ok(sc_retry_ms.unwrap_or(SPU_RETRY_SC_TIMEOUT_MS)) + } + + /// Generate min in-sync replicas by combining all config elements. Returns error on failure. + fn make_min_in_sync_replicas(file_cfg: &Option) -> Result { + let mut make_min_in_sync_replicas = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(in_sync_replica_str) = env::var(FLV_REPLICA_IN_SYNC_REPLICA_MIN) { + let in_sync_replica = in_sync_replica_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("min-in-sync-replica: {}", err), + ) + })?; + make_min_in_sync_replicas = Some(in_sync_replica); + } + + // 2) config file + if make_min_in_sync_replicas.is_none() && file_cfg.is_some() { + make_min_in_sync_replicas = file_cfg.as_ref().unwrap().min_in_sync_replicas(); + } + + // 3) unwrap or use default + Ok(make_min_in_sync_replicas.unwrap_or(SPU_MIN_IN_SYNC_REPLICAS)) + } + + /// Generate log base dir by combining all config elements. Returns error on failure. + fn make_log_base_dir(file_cfg: &Option) -> Result { + let mut log_base_dir = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(log_base_dir_str) = env::var(FLV_LOG_BASE_DIR) { + log_base_dir = Some(PathBuf::from(log_base_dir_str)); + } + + // 2) config file + if log_base_dir.is_none() && file_cfg.is_some() { + log_base_dir = file_cfg.as_ref().unwrap().log_base_dir(); + } + + // 3) unwrap or use default + Ok(log_base_dir.unwrap_or(PathBuf::from(SPU_LOG_BASE_DIR))) + } + + /// Generate log size by combining all config elements. Returns error on failure. + fn make_log_size(file_cfg: &Option) -> Result { + let mut log_size = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(log_size_str) = env::var(FLV_LOG_SIZE) { + log_size = Some(log_size_str); + } + + // 2) config file + if log_size.is_none() && file_cfg.is_some() { + log_size = file_cfg.as_ref().unwrap().log_size(); + } + + // 3) unwrap or use default + Ok(log_size.unwrap_or(SPU_LOG_SIZE.to_owned())) + } + + /// Generate log index_max_bytes by combining all config elements. Returns error on failure. + fn make_log_index_max_bytes(file_cfg: &Option) -> Result { + let mut log_index_max_bytes = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(log_index_max_bytes_str) = env::var(FLV_LOG_INDEX_MAX_BYTES) { + let index_max_bytes: u32 = log_index_max_bytes_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("log index-max-bytes: {}", err), + ) + })?; + log_index_max_bytes = Some(index_max_bytes); + } + + // 2) config file + if log_index_max_bytes.is_none() && file_cfg.is_some() { + log_index_max_bytes = file_cfg.as_ref().unwrap().log_index_max_bytes(); + } + + // 3) unwrap or use default + Ok(log_index_max_bytes.unwrap_or(SPU_LOG_INDEX_MAX_BYTES)) + } + + /// Generate log index_max_interval_bytes by combining all config elements. Returns error on failure. + fn make_log_index_max_interval_bytes(file_cfg: &Option) -> Result { + let mut log_index_max_interval_bytes = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(log_index_max_interval_bytes_str) = env::var(FLV_LOG_INDEX_MAX_INTERVAL_BYTES) { + let index_max_interval_bytes: u32 = + log_index_max_interval_bytes_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("log index-max-interval-bytes: {}", err), + ) + })?; + log_index_max_interval_bytes = Some(index_max_interval_bytes); + } + + // 2) config file + if log_index_max_interval_bytes.is_none() && file_cfg.is_some() { + log_index_max_interval_bytes = + file_cfg.as_ref().unwrap().log_index_max_interval_bytes(); + } + + // 3) unwrap or use default + Ok(log_index_max_interval_bytes.unwrap_or(SPU_LOG_INDEX_MAX_INTERVAL_BYTES)) + } + + /// Generate log segment_max_bytes by combining all config elements. Returns error on failure. + fn make_log_segment_max_bytes(file_cfg: &Option) -> Result { + let mut log_segment_max_bytes = None; + + // 1) environment variable (optional field, ignore errors) + if let Ok(log_segment_max_bytes_str) = env::var(FLV_LOG_SEGMENT_MAX_BYTES) { + let segment_max_bytes: u32 = log_segment_max_bytes_str.parse().map_err(|err| { + IoError::new( + ErrorKind::InvalidInput, + format!("log segment_max_bytes: {}", err), + ) + })?; + log_segment_max_bytes = Some(segment_max_bytes); + } + + // 2) config file + if log_segment_max_bytes.is_none() && file_cfg.is_some() { + log_segment_max_bytes = file_cfg.as_ref().unwrap().log_segment_max_bytes(); + } + + // 3) unwrap or use default + Ok(log_segment_max_bytes.unwrap_or(SPU_LOG_SEGMENT_MAX_BYTES)) + } + + pub fn id(&self) -> SpuId { + self.id + } + + pub fn rack(&self) -> &Option { + &self.rack + } + + pub fn is_custom(&self) -> bool { + match self.spu_type { + SpuType::Custom => true, + SpuType::Managed => false, + } + } + + pub fn type_label(&self) -> String { + match self.spu_type { + SpuType::Custom => "custom".to_string(), + SpuType::Managed => "managed".to_string(), + } + } + + pub fn sc_endpoint(&self) -> &ServerAddress { + &self.sc_endpoint + } + + pub fn public_socket_addr(&self) -> &SocketAddr { + &self.public_endpoint.socket_addr + } + + pub fn public_server_addr(&self) -> &ServerAddress { + &self.public_endpoint.server_addr + } + + pub fn private_socket_addr(&self) -> &SocketAddr { + &self.private_endpoint.socket_addr + } + + pub fn private_server_addr(&self) -> &ServerAddress { + &self.public_endpoint.server_addr + } + + + pub fn storage(&self) -> &Log { + &self.log + } + + +} + +impl Endpoint { + pub fn new(server_addr: &ServerAddress) -> Result { + let socket_addr = server_to_socket_addr(server_addr)?; + let server_addr = server_addr.clone(); + Ok(Endpoint { + server_addr, + socket_addr, + }) + } +} + +/// Convert Server string to Server Address +fn server_str_to_server_addr( + server_str: &Option, +) -> Result, IoError> { + if let Some(server) = server_str { + // parse host and port + let host_port: Vec<&str> = server.split(':').collect(); + if host_port.len() != 2 { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("Expected 'host:port' format, found '{}'", server), + )); + } + + let host = host_port[0].to_owned(); + let port: u16 = host_port[1].parse().map_err(|err| { + IoError::new(ErrorKind::InvalidInput, format!("invalid port: {}", err)) + })?; + + Ok(Some(ServerAddress { host, port })) + } else { + Ok(None) + } +} + +// --------------------------------------- +// Unit Tests +// --------------------------------------- +#[cfg(test)] +pub mod test { + + use super::*; + use types::defaults::SPU_DEFAULT_ID; + + #[test] + fn test_get_spu_config_with_secret() { + let spu_opt = SpuOpt::default(); + let spu_config_file = None; + + // test read & parse + let result = SpuConfig::new_from_all(spu_opt, spu_config_file); + assert!(result.is_ok()); + + // setup endpoints + let public_endpoint_res = Endpoint::new(&ServerAddress { + host: SPU_PUBLIC_HOSTNAME.to_owned(), + port: SPU_PUBLIC_PORT, + }); + assert!(public_endpoint_res.is_ok()); + + let private_endpoint_res = Endpoint::new(&ServerAddress { + host: SPU_PRIVATE_HOSTNAME.to_owned(), + port: SPU_PRIVATE_PORT, + }); + assert!(private_endpoint_res.is_ok()); + + let sc_endpoint_res = ServerAddress { + host: SC_HOSTNAME.to_owned(), + port: SC_PRIVATE_PORT, + }; + + // compare with expected result + let expected = SpuConfig { + id: SPU_DEFAULT_ID, + spu_type: SpuType::Custom, + rack: None, + public_endpoint: public_endpoint_res.unwrap(), + private_endpoint: private_endpoint_res.unwrap(), + sc_endpoint: sc_endpoint_res, + sc_retry_ms: SPU_RETRY_SC_TIMEOUT_MS, + replication: Replication { + min_in_sync_replicas: SPU_MIN_IN_SYNC_REPLICAS, + }, + log: Log { + base_dir: PathBuf::from(SPU_LOG_BASE_DIR), + size: SPU_LOG_SIZE.to_owned(), + index_max_bytes: SPU_LOG_INDEX_MAX_BYTES, + index_max_interval_bytes: SPU_LOG_INDEX_MAX_INTERVAL_BYTES, + segment_max_bytes: SPU_LOG_SEGMENT_MAX_BYTES, + } + }; + + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_get_spu_config_from_named_config_file() { + let spu_opt = SpuOpt::default(); + + let file = PathBuf::from("./test-data/config/spu_server.toml"); + let spu_config_file = SpuConfigFile::from_file(&file); + assert!(spu_config_file.is_ok()); + + // test read & parse + let result = SpuConfig::new_from_all(spu_opt, Some(spu_config_file.unwrap())); + assert!(result.is_ok()); + + // setup endpoints + let public_endpoint_res = Endpoint::new(&ServerAddress { + host: "127.0.0.1".to_owned(), + port: 5555, + }); + assert!(public_endpoint_res.is_ok()); + + let private_endpoint_res = Endpoint::new(&ServerAddress { + host: "127.0.0.1".to_owned(), + port: 5556, + }); + assert!(private_endpoint_res.is_ok()); + + let sc_endpoint_res = ServerAddress { + host: "127.0.0.1".to_owned(), + port: 5554, + }; + + // compare with expected result + let expected = SpuConfig { + id: 5050, + spu_type: SpuType::Custom, + rack: Some("rack-1".to_owned()), + public_endpoint: public_endpoint_res.unwrap(), + private_endpoint: private_endpoint_res.unwrap(), + sc_endpoint: sc_endpoint_res, + sc_retry_ms: 2000, + replication: Replication { + min_in_sync_replicas: 3, + }, + log: Log { + base_dir: PathBuf::from("/tmp/data_streams"), + size: "2Gi".to_owned(), + index_max_bytes: 888888, + index_max_interval_bytes: 2222, + segment_max_bytes: 9999999, + } + }; + + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_get_spu_config_overwite_config_file() { + utils::init_logger(); + + let spu_opt = SpuOpt { + id: Some(9898), + public_server: Some("1.1.1.1:8888".to_owned()), + private_server: Some("2.2.2.2:9999".to_owned()), + sc_server: Some("3.3.3.3:5555".to_owned()), + config_file: None, + }; + + let file = PathBuf::from("./test-data/config/spu_server.toml"); + let spu_config_file = SpuConfigFile::from_file(&file); + assert!(spu_config_file.is_ok()); + + // test read & parse + let result = SpuConfig::new_from_all(spu_opt, Some(spu_config_file.unwrap())); + assert!(result.is_ok()); + + // setup endpoints + let public_endpoint_res = Endpoint::new(&ServerAddress { + host: "1.1.1.1".to_owned(), + port: 8888, + }); + assert!(public_endpoint_res.is_ok()); + + let private_endpoint_res = Endpoint::new(&ServerAddress { + host: "2.2.2.2".to_owned(), + port: 9999, + }); + assert!(private_endpoint_res.is_ok()); + + let sc_endpoint_res = ServerAddress { + host: "3.3.3.3".to_owned(), + port: 5555, + }; + + // compare with expected result + let expected = SpuConfig { + id: 9898, + spu_type: SpuType::Custom, + rack: Some("rack-1".to_owned()), + public_endpoint: public_endpoint_res.unwrap(), + private_endpoint: private_endpoint_res.unwrap(), + sc_endpoint: sc_endpoint_res, + sc_retry_ms: 2000, + replication: Replication { + min_in_sync_replicas: 3, + }, + log: Log { + base_dir: PathBuf::from("/tmp/data_streams"), + size: "2Gi".to_owned(), + index_max_bytes: 888888, + index_max_interval_bytes: 2222, + segment_max_bytes: 9999999, + } + }; + + assert_eq!(result.unwrap(), expected); + } +} diff --git a/spu-server/src/config/spu_config_file.rs b/spu-server/src/config/spu_config_file.rs new file mode 100644 index 0000000000..59f72ff363 --- /dev/null +++ b/spu-server/src/config/spu_config_file.rs @@ -0,0 +1,342 @@ +//! +//! # Config file for Streaming Processing Unit +//! +//! Given a configuration file, load and return spu parameters +//! + +use std::fs::read_to_string; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::path::Path; +use std::path::PathBuf; + +use serde::Deserialize; +use types::socket_helpers::ServerAddress; + +use types::defaults::SPU_CONFIG_FILE; +use utils::config_helper::build_server_config_file_path; + +// ----------------------------------- +// Data Structures +// ----------------------------------- + +#[derive(Debug, PartialEq, Deserialize)] +pub struct SpuConfigFile { + version: String, + spu: Option, + servers: Option, + controller: Option, + configurations: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct SpuGroup { + pub id: Option, + pub rack: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ServersGroup { + pub public: Option, + pub private: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ServerGroup { + pub host: String, + pub port: u16, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ControllerGroup { + pub host: String, + pub port: u16, + pub retry_timeout_ms: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ConfigurationsGroup { + pub replication: Option, + pub log: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct ReplicationGroup { + pub min_in_sync_replicas: Option, +} + +#[derive(Debug, PartialEq, Deserialize)] +struct LogGroup { + pub base_dir: Option, + pub size: Option, + pub index_max_bytes: Option, + pub index_max_interval_bytes: Option, + pub segment_max_bytes: Option, +} + +// --------------------------------------- +// Implementation +// --------------------------------------- + +impl SpuConfigFile { + // read and parse the .toml file + pub fn from_file>(path: T) -> Result { + let filename = { format!("{}", path.as_ref().display()) }; + let file_str = read_to_string(path)?; + toml::from_str(&file_str) + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}: {}", filename, err))) + } + + // parse the default file if exists, otherwise None + pub fn from_default_file() -> Result, IoError> { + let default_cfg_file_path = build_server_config_file_path(SPU_CONFIG_FILE); + if Path::new(&default_cfg_file_path).exists() { + Ok(Some(SpuConfigFile::from_file(default_cfg_file_path)?)) + } else { + Ok(None) + } + } + + + + /// Retrieve rack or none + pub fn rack(&self) -> Option { + if let Some(ref spu_group) = &self.spu { + if let Some(ref rack) = spu_group.rack { + return Some(rack.clone()); + } + } + None + } + + /// Retrieve public endpoint or none + pub fn public_endpoint(&self) -> Option { + if let Some(ref servers_group) = &self.servers { + if let Some(ref server) = servers_group.public { + return Some(ServerAddress { + host: server.host.clone(), + port: server.port, + }); + } + } + None + } + + /// Retrieve private endpoint or none + pub fn private_endpoint(&self) -> Option { + if let Some(ref servers_group) = &self.servers { + if let Some(ref server) = servers_group.private { + return Some(ServerAddress { + host: server.host.clone(), + port: server.port, + }); + } + } + None + } + + /// Retrieve controller private endpoint or none + pub fn controller_endpoint(&self) -> Option { + if let Some(ref controller) = &self.controller { + return Some(ServerAddress { + host: controller.host.clone(), + port: controller.port, + }); + } + None + } + /// Retrieve controller connection retry in miliseconds or none + pub fn sc_retry_ms(&self) -> Option { + if let Some(ref controller) = &self.controller { + return controller.retry_timeout_ms.clone(); + } + None + } + + /// Retrieve minim in sync replicas or none + pub fn min_in_sync_replicas(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref replication_group) = &config_group.replication { + return replication_group.min_in_sync_replicas.clone(); + } + } + None + } + + /// Retrieve log base directory or none + pub fn log_base_dir(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref log_group) = &config_group.log { + return log_group.base_dir.clone(); + } + } + None + } + + /// Retrieve log size or none + pub fn log_size(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref log_group) = &config_group.log { + return log_group.size.clone(); + } + } + None + } + + /// Retrieve log index max bytes or none + pub fn log_index_max_bytes(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref log_group) = &config_group.log { + return log_group.index_max_bytes.clone(); + } + } + None + } + + /// Retrieve log index max interval bytes or none + pub fn log_index_max_interval_bytes(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref log_group) = &config_group.log { + return log_group.index_max_interval_bytes.clone(); + } + } + None + } + + /// Retrieve segment max bytes or none + pub fn log_segment_max_bytes(&self) -> Option { + if let Some(ref config_group) = &self.configurations { + if let Some(ref log_group) = &config_group.log { + return log_group.segment_max_bytes.clone(); + } + } + None + } +} + +// --------------------------------------- +// Unit Tests +// >> utils::init_logger(); +// >> RUST_LOG=spu_server=trace cargo test +// --------------------------------------- + +#[cfg(test)] +pub mod test { + use types::defaults::CONFIG_FILE_EXTENTION; + use super::*; + + /// Use a base path and defaults to stitch together th spu configuration file + pub fn config_file(path: &String) -> PathBuf { + let mut file_path = PathBuf::from(path); + file_path.push(SPU_CONFIG_FILE); + file_path.set_extension(CONFIG_FILE_EXTENTION); + file_path + } + + #[test] + fn test_default_spu_config_ok() { + let spu_config_path = config_file(&"./test-data/config".to_owned()); + + // test file generator + assert_eq!( + spu_config_path.clone().to_str().unwrap(), + "./test-data/config/spu_server.toml" + ); + + // test read & parse + let result = SpuConfigFile::from_file(spu_config_path); + assert!(result.is_ok()); + + // compare with expected result + let expected = SpuConfigFile { + version: "1.0".to_owned(), + spu: Some(SpuGroup { + id: Some(5050), + rack: Some("rack-1".to_owned()), + }), + servers: Some(ServersGroup { + public: Some(ServerGroup { + host: "127.0.0.1".to_owned(), + port: 5555, + }), + private: Some(ServerGroup { + host: "127.0.0.1".to_owned(), + port: 5556, + }), + }), + controller: Some(ControllerGroup { + host: "127.0.0.1".to_owned(), + port: 5554, + retry_timeout_ms: Some(2000), + }), + configurations: Some(ConfigurationsGroup { + replication: Some(ReplicationGroup { + min_in_sync_replicas: Some(3), + }), + log: Some(LogGroup { + base_dir: Some(PathBuf::from("/tmp/data_streams")), + size: Some("2Gi".to_owned()), + index_max_bytes: Some(888888), + index_max_interval_bytes: Some(2222), + segment_max_bytes: Some(9999999), + }), + }), + }; + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_default_spu_config_min() { + let mut spu_config_path = PathBuf::new(); + spu_config_path.push("./test-data/config/spu_server_small.toml"); + + // test read & parse + let result = SpuConfigFile::from_file(spu_config_path); + assert!(result.is_ok()); + + // compare with expected result + let expected = SpuConfigFile { + version: "1.0".to_owned(), + spu: Some(SpuGroup { + id: Some(12), + rack: None, + }), + servers: None, + controller: Some(ControllerGroup { + host: "1.1.1.1".to_owned(), + port: 2323, + retry_timeout_ms: None, + }), + configurations: None, + }; + assert_eq!(result.unwrap(), expected); + } + + #[test] + fn test_default_spu_config_not_found() { + let mut spu_config_path = PathBuf::new(); + spu_config_path.push("./test-data/config/unknown.toml"); + let result = SpuConfigFile::from_file(spu_config_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "No such file or directory (os error 2)" + ); + } + + #[test] + fn test_invalid_spu_config_file() { + let mut spu_config_path = PathBuf::new(); + spu_config_path.push("./test-data/config/spu_invalid.toml"); + let result = SpuConfigFile::from_file(spu_config_path); + + // expecting error + assert!(result.is_err()); + assert_eq!( + format!("{}", result.unwrap_err()), + "./test-data/config/spu_invalid.toml: missing field `version`" + ); + } +} diff --git a/spu-server/src/controllers/follower_replica/api_key.rs b/spu-server/src/controllers/follower_replica/api_key.rs new file mode 100644 index 0000000000..040d9537e5 --- /dev/null +++ b/spu-server/src/controllers/follower_replica/api_key.rs @@ -0,0 +1,15 @@ + +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum KfFollowerPeerApiEnum { + SyncRecords = 0 +} + +impl Default for KfFollowerPeerApiEnum { + fn default() -> KfFollowerPeerApiEnum { + KfFollowerPeerApiEnum::SyncRecords + } +} diff --git a/spu-server/src/controllers/follower_replica/follower_controller.rs b/spu-server/src/controllers/follower_replica/follower_controller.rs new file mode 100644 index 0000000000..8d09d26379 --- /dev/null +++ b/spu-server/src/controllers/follower_replica/follower_controller.rs @@ -0,0 +1,307 @@ + +use std::time::Duration; +use std::net::SocketAddr; +use std::convert::TryInto; + +use log::trace; +use log::error; +use log::debug; + + +use futures::channel::mpsc::Receiver; +use futures::select; +use futures::StreamExt; +use futures::FutureExt; + +use future_helper::spawn; +use future_helper::sleep; +use kf_socket::KfSocket; +use kf_socket::KfSink; +use kf_socket::KfSocketError; +use kf_protocol::api::RequestMessage; +use internal_api::messages::Replica; +use types::SpuId; +use types::log_on_err; +use storage::FileReplica; +use metadata::spu::SpuSpec; + + +use crate::controllers::leader_replica::UpdateOffsetRequest; +use crate::services::internal::FetchStreamRequest; +use crate::core::spus::SharedSpuLocalStore; +use crate::core::SharedSpuConfig; + +use super::FollowerReplicaControllerCommand; +use super::FollowerReplicaState; +use super::KfFollowerPeerApiEnum; +use super::DefaultSyncRequest; +use super::FollowerPeerRequest; +use super::SharedFollowersState; + +/// time to resync follower offsets to leader +const LEADER_RECONCILIATION_INTERVAL_SEC: u64 = 60; // 1 min + +/// Controller for managing follower replicas +/// There is a controller for follower groups (group by leader SPU) +pub struct ReplicaFollowerController { + leader_id: SpuId, + spu_localstore: SharedSpuLocalStore, + followers_state: SharedFollowersState, + receiver: Receiver, + config: SharedSpuConfig +} + +impl ReplicaFollowerController { + + + pub fn new( + leader_id: SpuId, + receiver: Receiver, + spu_localstore: SharedSpuLocalStore, + followers_state: SharedFollowersState, + config: SharedSpuConfig + ) -> Self { + Self { + leader_id, + spu_localstore, + receiver, + followers_state, + config + } + } +} + +impl ReplicaFollowerController { + + pub fn run(self) { + + spawn(self.dispatch_loop()); + } + + async fn dispatch_loop(mut self) { + + debug!("starting follower replica controller for leader spu: {}",self.leader_id); + loop { + + if let Some(socket) = self.create_socket_to_leader().await { + + // send initial fetch stream request + debug!("established connection to leader: {}",self.leader_id); + match self.stream_loop(socket).await { + Ok(terminate_flag) => { + if terminate_flag { + trace!("end command has received, terminating connection to leader: {}",self.leader_id); + break; + } + }, + Err(err) => error!("connection error, connecting to leader: {} err: {:#?}",self.leader_id,err) + } + + debug!("lost connection to leader: {}, sleeping 5 seconds and will retry it",self.leader_id); + // 5 seconds is heuratic value, may change in the future or could be dynamic + // depends on backoff algorithm + sleep(Duration::from_secs(5)).await; + + } else { + debug!("TODO: describe more where this can happen"); + break; + } + } + debug!("shutting down follower controller: {}",self.leader_id); + + } + + async fn stream_loop(&mut self,mut socket: KfSocket) -> Result { + + self.send_fetch_stream_request(&mut socket).await?; + let (mut sink,mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + // sync offsets + self.sync_all_offsets_to_leader(&mut sink).await; + + loop { + + log::trace!("waiting for Peer Request from leader: {}",self.leader_id); + + select! { + _ = (sleep(Duration::from_secs(LEADER_RECONCILIATION_INTERVAL_SEC))).fuse() => { + debug!("timer fired - kickoff sync offsets to leader {}",self.leader_id); + self.sync_all_offsets_to_leader(&mut sink).await; + }, + + cmd_msg = self.receiver.next() => { + if let Some(cmd) = cmd_msg { + match cmd { + FollowerReplicaControllerCommand::AddReplica(replica) => { + debug!("leader: {}, adding replica: {}",self.leader_id,replica); + self.update_replica(replica).await; + self.sync_all_offsets_to_leader(&mut sink).await; + }, + FollowerReplicaControllerCommand::UpdateReplica(replica) => { + self.update_replica(replica).await; + self.sync_all_offsets_to_leader(&mut sink).await; + } + } + } else { + debug!("mailbox to this controller: {} has been closed, shutting controller down",self.leader_id); + return Ok(true) + } + }, + api_msg = api_stream.next().fuse() => { + + if let Some(req_msg_res) = api_msg { + match req_msg_res { + Ok(req_msg) => { + match req_msg { + FollowerPeerRequest::SyncRecords(sync_request) => self.write_to_follower_replica(&mut sink,sync_request.request).await, + } + + }, + Err(err) => { + log::trace!("error decoding request: {}, terminating connection",err); + return Ok(false) + } + } + + } else { + trace!("leader socket has terminated"); + return Ok(false); + + } + } + + } + } + } + + + /// get available spu, this is case where follower request is received before rest of spu arrives from SC. + /// TODO: remove wait call + async fn get_spu(&self) -> SpuSpec { + + loop { + if let Some(spu) = self.spu_localstore.spec(&self.leader_id){ + return spu + } + + trace!("leader spu spec: {} is not available, waiting 1 second",self.leader_id); + sleep(Duration::from_millis(1000)).await; + trace!("awake from sleep, checking spus: {}",self.leader_id); + } + } + + + async fn write_to_follower_replica(&self,sink: &mut KfSink,req: DefaultSyncRequest) { + + debug!("handling sync request from leader: {}, req {}",self.leader_id,req); + + let offsets = self.followers_state.send_records(req).await; + self.sync_offsets_to_leader(sink,offsets).await; + + } + + + /// connect to leader, if can't connect try until we succeed + /// or if we received termination message + async fn create_socket_to_leader(&mut self) -> Option { + + let leader_spu = self.get_spu().await; + debug!("trying to resolve leader: {} addr: {}",leader_spu.id,leader_spu.private_endpoint.host); + let addr: SocketAddr = leader_spu.private_server_address().try_into().expect("addr should succeed"); + debug!("resolved leader: {} addr: {}",leader_spu.id,addr); + loop { + + trace!("trying to create socket to leader: {}",self.leader_id); + let connect_future = KfSocket::fusable_connect(&addr); + + select! { + msg = self.receiver.next() => { + if let Some(cmd) = msg { + match cmd { + FollowerReplicaControllerCommand::AddReplica(replica) => self.update_replica(replica).await, + FollowerReplicaControllerCommand::UpdateReplica(replica) => self.update_replica(replica).await + } + } else { + error!("mailbox seems terminated, we should termina also"); + return None + } + }, + socket_res = connect_future.fuse() => { + match socket_res { + Ok(socket) => { + trace!("connected to leader: {}",self.leader_id); + return Some(socket) + } + Err(err) => error!("error connecting to leader: {}",err) + } + + trace!("sleeping 5 seconds to connect to leader: {}",self.leader_id); + sleep(Duration::from_secs(5)).await; + } + + } + + + + } + } + + + /// send request to establish peer to peer communication to leader + async fn send_fetch_stream_request(&self, socket: &mut KfSocket) -> Result<(),KfSocketError>{ + + let local_spu_id = self.config.id(); + trace!("sending fetch stream for leader: {} for follower: {}",self.leader_id,local_spu_id); + let mut fetch_request = FetchStreamRequest::default(); + fetch_request.spu_id = local_spu_id; + let mut message = RequestMessage::new_request(fetch_request); + message + .get_mut_header() + .set_client_id(format!("peer spu: {}",local_spu_id)); + + let response = socket.send(&message).await?; + trace!("fetch stream response: {:#?}",response); + debug!("established peer to peer channel to leader: {} from follower: {}",self.leader_id,local_spu_id); + Ok(()) + } + + /// create new replica if doesn't exist yet + async fn update_replica(&self, replica_msg: Replica) { + + debug!("received update replica {} from leader: {}",replica_msg,self.leader_id); + + let replica_key = replica_msg.id.clone(); + if self.followers_state.has_replica(&replica_key) { + debug!("has already follower replica: {}, igoring",replica_key); + } else { + let log = &self.config.storage().new_config(); + match FollowerReplicaState::new(self.config.id(),replica_msg.leader,&replica_key,&log).await { + Ok(replica_state) => { + self.followers_state.insert_replica(replica_state); + }, + Err(err) => error!("error creating follower replica: {}, errr: {:#?}",replica_key,err) + } + } + } + + /// send offset to leader, so it can chronize + async fn sync_all_offsets_to_leader(&self, sink: &mut KfSink) { + + self.sync_offsets_to_leader(sink,self.followers_state.replica_offsets(&self.leader_id)).await; + } + + /// send follower offset to leader + async fn sync_offsets_to_leader(&self, sink: &mut KfSink,offsets: UpdateOffsetRequest) { + + let req_msg = RequestMessage::new_request(offsets) + .set_client_id(format!("follower_id: {}",self.config.id())); + + trace!("sending offsets: {:#?} to leader: {}",&req_msg,self.leader_id); + + log_on_err!(sink.send_request(&req_msg).await,"error sending request to leader {}"); + debug!("synced follower offset: {} to leader: {}",self.config.id(),self.leader_id); + } + + +} diff --git a/spu-server/src/controllers/follower_replica/mod.rs b/spu-server/src/controllers/follower_replica/mod.rs new file mode 100644 index 0000000000..9dd3e00a67 --- /dev/null +++ b/spu-server/src/controllers/follower_replica/mod.rs @@ -0,0 +1,25 @@ +mod state; +mod follower_controller; +mod api_key; +mod peer_api; +mod sync; + +pub(crate) use self::follower_controller::ReplicaFollowerController; +pub use self::state::FollowersState; +pub use self::state::FollowerReplicaState; +pub use self::state::SharedFollowersState; +pub use self::api_key::KfFollowerPeerApiEnum; +pub use self::peer_api::FollowerPeerRequest; +pub use self::sync::PeerFileTopicReponse; +pub use self::sync::PeerFilePartitionResponse; +pub use self::sync::DefaultSyncRequest; +pub use self::sync::FileSyncRequest; + +use internal_api::messages::Replica; + + +#[derive(Debug)] +pub enum FollowerReplicaControllerCommand { + AddReplica(Replica), + UpdateReplica(Replica) +} \ No newline at end of file diff --git a/spu-server/src/controllers/follower_replica/peer_api.rs b/spu-server/src/controllers/follower_replica/peer_api.rs new file mode 100644 index 0000000000..52a5676004 --- /dev/null +++ b/spu-server/src/controllers/follower_replica/peer_api.rs @@ -0,0 +1,53 @@ +use std::io::Error as IoError; +use std::convert::TryInto; + +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; + +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::RequestHeader; + +use super::KfFollowerPeerApiEnum; +use super::DefaultSyncRequest; + + +#[derive(Debug,Encode)] +pub enum FollowerPeerRequest { + SyncRecords(RequestMessage) +} + + +impl Default for FollowerPeerRequest { + fn default() -> FollowerPeerRequest { + FollowerPeerRequest::SyncRecords(RequestMessage::::default()) + } +} + + + +impl KfRequestMessage for FollowerPeerRequest { + + type ApiKey = KfFollowerPeerApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf + { + + trace!("decoding with header: {:#?}",header); + let version = header.api_version(); + match header.api_key().try_into()? { + KfFollowerPeerApiEnum::SyncRecords => Ok(FollowerPeerRequest::SyncRecords(RequestMessage::new(header,DefaultSyncRequest::decode_from(src, + version)?))) + } + + } + +} + diff --git a/spu-server/src/controllers/follower_replica/state.rs b/spu-server/src/controllers/follower_replica/state.rs new file mode 100644 index 0000000000..63c69a7ef0 --- /dev/null +++ b/spu-server/src/controllers/follower_replica/state.rs @@ -0,0 +1,372 @@ +use std::sync::RwLock; +use std::sync::Arc; +use std::fmt::Debug; +use std::collections::HashMap; +use std::collections::HashSet; + +use log::debug; +use log::trace; +use log::error; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::channel; +use chashmap::CHashMap; +use chashmap::ReadGuard; +use chashmap::WriteGuard; + +use metadata::partition::ReplicaKey; +use kf_protocol::api::DefaultRecords; +use storage::FileReplica; +use storage::ConfigOption; +use storage::StorageError; +use storage::ReplicaStorage; +use types::SpuId; +use utils::SimpleConcurrentBTreeMap; + +use crate::core::storage::create_replica_storage; +use crate::controllers::leader_replica::UpdateOffsetRequest; +use crate::controllers::leader_replica::ReplicaOffsetRequest; +use super::FollowerReplicaControllerCommand; +use super::DefaultSyncRequest; + +pub type SharedFollowersState = Arc>; + +/// Maintains state for followers +/// Each follower controller maintains by SPU +#[derive(Debug)] +pub struct FollowersState { + mailboxes: SimpleConcurrentBTreeMap>, + replica_keys: RwLock>>, // replicas maintained by follower controller + replicas: CHashMap>, +} + + +impl FollowersState { + + pub fn new() -> Self { + FollowersState { + mailboxes: SimpleConcurrentBTreeMap::new(), + replica_keys: RwLock::new(HashMap::new()), + replicas: CHashMap::new() + } + } + + pub fn new_shared() -> SharedFollowersState { + Arc::new(Self::new()) + } + + #[allow(dead_code)] + pub fn followers_count(&self,leader: &SpuId) -> usize { + + let keys_lock = self.replica_keys.read().unwrap(); + if let Some(keys) = keys_lock.get(leader) { + keys.len() + } else { + 0 + } + } + + + pub fn has_replica(&self, key: &ReplicaKey) -> bool { + self.replicas.contains_key(key) + } + + pub fn get_replica( + &self, + key: &ReplicaKey, + ) -> Option>> { + self.replicas.get(key) + } + + pub fn get_mut_replica( + &self, + key: &ReplicaKey, + ) -> Option>> { + self.replicas.get_mut(key) + } + + + /// remove followe replica + /// if there are no more replicas per leader + /// then we shutdown controller + pub fn remove_replica(&self,leader: &SpuId, key: &ReplicaKey) -> Option> { + + // remove replica from managed list for replica controller + debug!("removing follower replica: {}, leader: {}",key,leader); + let lock = self.mailboxes.read(); + drop(lock); + let mut keys_lock = self.replica_keys.write().unwrap(); + let mut empty = false; + if let Some(keys_by_spu) = keys_lock.get_mut(leader) { + keys_by_spu.remove(key); + if keys_by_spu.len() == 0 { + empty = true; + } + } else { + empty = true; // case where we dont find leader as well + } + + let old_replica = self.replicas.remove(key); + + // if replica is empty then we need to terminate the leader + if empty { + debug!("no more followers for follower controller: {}, terminating it",leader); + if let Some(mut old_mailbox) = self.mailboxes.write().remove(leader) { + debug!("removed mailbox for follower controller and closing it {}",leader); + old_mailbox.close_channel(); + } else { + error!("there was no mailbox to close controller: {}",leader); + } + } + + old_replica + + } + + + /// insert new mailbox and return receiver + /// this is called by sc dispatcher + pub(crate) fn insert_mailbox(&self, spu: SpuId) -> + (Sender, + Receiver) + { + let (sender, receiver) = channel(10); + let mut write_mailbox = self.mailboxes.write(); + debug!("inserting mailbox for follower controller: {}",spu); + if let Some(mut old_mailbox) = write_mailbox.insert(spu, sender.clone()) { + debug!("there was old mailbox: {}, terminating it",spu); + old_mailbox.close_channel(); + } + (sender,receiver) + } + + pub(crate) fn mailbox(&self, spu: &SpuId) -> Option> { + self.mailboxes + .read() + .get(spu) + .map(|mailbox| mailbox.clone()) + } + + +} + +impl FollowersState where S: Debug { + + /// insert new replica, once replica has been insert, need to update the leader + /// this is called by follower controller + pub fn insert_replica(&self, state: FollowerReplicaState) { + trace!("inserting new follower replica: {:#?}", state); + let mut keys_lock = self.replica_keys.write().unwrap(); + if let Some(keys_by_pus) = keys_lock.get_mut(&state.leader) { + keys_by_pus.insert(state.replica.clone()); + } else { + let mut keys = HashSet::new(); + keys.insert(state.replica.clone()); + keys_lock.insert(state.leader,keys); + } + + self.replicas.insert(state.replica.clone(),state); + } + +} + + + +impl FollowersState { + + + + /// write records from leader to followe replica + /// return updated offsets + pub(crate) async fn send_records(&self, req: DefaultSyncRequest) -> UpdateOffsetRequest { + let mut offsets = UpdateOffsetRequest::default(); + for topic_request in req.topics { + let topic = &topic_request.name; + for partition_request in topic_request.partitions { + let rep_id = partition_request.partition_index; + let replica_key = ReplicaKey::new(topic.clone(), rep_id); + trace!("sync request for replica: {}", replica_key); + if let Some(mut replica) = self.get_mut_replica(&replica_key) { + match replica.send_records(partition_request.records).await { + Ok(_) => { + trace!( + "successfully written send to follower replica: {}", + replica_key + ); + let end_offset = replica.storage().get_leo(); + let high_watermark = partition_request.high_watermark; + if end_offset == high_watermark { + trace!("follower replica: {} end offset is same as leader highwater, updating ",end_offset); + if let Err(err) = replica + .mut_storage() + .update_high_watermark(high_watermark) + .await + { + error!("error writing replica high watermark: {}", err); + } + } else { + trace!("replica: {} high watermark is not same as leader high watermark: {}",replica_key,end_offset); + } + drop(replica); + self.add_replica_offset_to(&replica_key, &mut offsets); + } + Err(err) => error!( + "problem writing replica: {}, error: {:#?}", + replica_key, err + ), + } + } else { + error!( + "unable to find follower replica for writing: {}", + replica_key + ); + } + } + } + offsets + } + + /// offsets for all replicas + pub(crate) fn replica_offsets(&self,leader: &SpuId) -> UpdateOffsetRequest { + + let replica_indexes = self.replica_keys.read().unwrap(); + + let mut offsets = UpdateOffsetRequest::default(); + + if let Some(keys) = replica_indexes.get(leader) { + for replica_id in keys { + self.add_replica_offset_to(replica_id, &mut offsets); + } + } + offsets + } + + fn add_replica_offset_to( + &self, + replica_id: &ReplicaKey, + offsets: &mut UpdateOffsetRequest, + ) { + if let Some(replica) = self.get_replica(replica_id) { + let storage = replica.storage(); + + let mut replica_request = ReplicaOffsetRequest::default(); + replica_request.replica = replica_id.clone(); + replica_request.leo = storage.get_leo(); + replica_request.hw = storage.get_hw(); + offsets.replicas.push(replica_request); + } else { + error!( + "no follow replica found: {}, should not be possible", + replica_id, + ); + } + } +} + +/// State for Follower Replica Controller. +/// +#[derive(Debug)] +pub struct FollowerReplicaState { + leader: SpuId, + replica: ReplicaKey, + storage: S +} + +impl FollowerReplicaState { + + pub fn storage(&self) -> &S { + &self.storage + } + + pub fn mut_storage(&mut self) -> &mut S { + &mut self.storage + } + + pub fn storage_owned(self) -> S { + self.storage + } +} + +impl FollowerReplicaState +{ + pub async fn new<'a>( + local_spu: SpuId, + leader: SpuId, + replica: &'a ReplicaKey, + config: &'a ConfigOption, + ) -> Result { + debug!("adding follower replica for: {}", replica); + + let storage = create_replica_storage(local_spu, replica, &config).await?; + + Ok(Self { + leader, + replica: replica.clone(), + storage, + }) + } + + pub async fn send_records(&mut self, records: DefaultRecords) -> Result<(), StorageError> { + trace!( + "writing records to follower replica: {}, leader: {}", + self.replica, + self.leader + ); + self.storage.send_records(records, false).await + } + + +} + + +#[cfg(test)] +mod test { + + use super::FollowerReplicaState; + use super::FollowersState; + + #[derive(Debug)] + struct FakeStorage { + } + + #[test] + fn test_insert_and_remove_state() { + + let f1 = FollowerReplicaState { + leader: 10, + replica: ("topic",0).into(), + storage: FakeStorage{} + }; + + let k1 = f1.replica.clone(); + + + let f2 = FollowerReplicaState { + leader: 20, + replica: ("topic2",0).into(), + storage: FakeStorage{} + }; + + let f3 = FollowerReplicaState { + leader: 10, + replica: ("topic",1).into(), + storage: FakeStorage{} + }; + + + let states = FollowersState::new(); + states.insert_replica(f1); + states.insert_replica(f2); + states.insert_replica(f3); + + + assert_eq!(states.followers_count(&10),2); + assert_eq!(states.followers_count(&20),1); + assert_eq!(states.followers_count(&30),0); + + + let old_state = states.remove_replica(&10,&k1).expect("old state exists"); + assert_eq!(old_state.leader,10); + } + +} \ No newline at end of file diff --git a/spu-server/src/controllers/follower_replica/sync.rs b/spu-server/src/controllers/follower_replica/sync.rs new file mode 100644 index 0000000000..62e3e40afe --- /dev/null +++ b/spu-server/src/controllers/follower_replica/sync.rs @@ -0,0 +1,183 @@ +use std::fmt::Debug; +use std::fmt::Display; +use std::fmt; +use std::io::Error as IoError; +use std::marker::PhantomData; + +use bytes::BytesMut; +use log::trace; + +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::Version; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::DefaultRecords; +use kf_protocol::api::Request; +use kf_protocol::api::ErrorCode; +use kf_socket::KfFileRecordSet; +use kf_socket::StoreValue; +use kf_socket::FileWrite; +use storage::SlicePartitionResponse; +use future_aio::fs::AsyncFileSlice; + +use super::KfFollowerPeerApiEnum; + +pub type FileSyncRequest = SyncRequest; +pub type DefaultSyncRequest = SyncRequest; +pub type PeerFilePartitionResponse = PeerFetchablePartitionResponse; +pub type PeerFileTopicReponse = PeerFetchableTopicResponse; + +/// used for sending records and commits +/// repurpose topic response since it has records and commit offsets +#[derive(Default,Encode,Decode,Debug)] +pub struct SyncRequest where R: Encoder + Decoder + Debug { + pub topics: Vec> +} + + +impl fmt::Display for SyncRequest where R: Encoder + Decoder + Debug + Display { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"[")?; + for topic in &self.topics { + write!(f,"{},",topic)?; + } + write!(f,"]") + + } +} + + + + + +// Request trait +// Note that DEFAULT_API_VERSION is 7 which is required in order to map all fields for file encoding +// TODO: come up with unify encoding +impl Request for SyncRequest where R: Encoder + Decoder + Debug { + const API_KEY: u16 = KfFollowerPeerApiEnum::SyncRecords as u16; + const DEFAULT_API_VERSION: i16 = 7; + type Response = SyncResponse; +} + +#[derive(Default,Encode,Decode,Debug)] +pub struct SyncResponse{} + + +/// allow sync request to be encoded for zerocopy +impl FileWrite for FileSyncRequest { + + fn file_encode<'a: 'b,'b>(&'a self, src: &mut BytesMut, data: &'b mut Vec>,version: Version) -> Result<(), IoError> { + trace!("file encoding for FileSyncRequest version: {}",version); + self.topics.file_encode(src,data,version)?; + Ok(()) + } + +} + + +#[derive(Encode, Decode, Default, Debug)] +pub struct PeerFetchableTopicResponse +where + R: Encoder + Decoder + Default + Debug, +{ + pub name: String, + pub partitions: Vec>, + pub data: PhantomData, +} + + + +impl fmt::Display for PeerFetchableTopicResponse where R: Encoder + Decoder + Default + Debug + Display { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"{} [",self.name)?; + for partition in &self.partitions { + write!(f,"{},",partition)?; + } + write!(f,"]") + + } +} + + + + +#[derive(Encode, Decode, Default, Debug)] +pub struct PeerFetchablePartitionResponse +where + R: Encoder + Decoder + Default + Debug, +{ + pub partition_index: i32, + pub error_code: ErrorCode, + pub high_watermark: i64, + pub last_stable_offset: i64, + pub records: R, +} + + +impl fmt::Display for PeerFetchablePartitionResponse where + R: Encoder + Decoder + Default + Debug + Display +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"p: {}, hw: {} {}",self.partition_index,self.high_watermark,self.records) + } +} + + + +impl FileWrite for PeerFileTopicReponse { + fn file_encode<'a: 'b, 'b>( + &'a self, + src: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + trace!("file encoding fetch topic response"); + self.name.encode(src, version)?; + self.partitions.file_encode(src, data, version)?; + Ok(()) + } +} + + +impl FileWrite for PeerFilePartitionResponse { + fn file_encode<'a: 'b, 'b>( + &'a self, + src: &mut BytesMut, + data: &'b mut Vec>, + version: Version, + ) -> Result<(), IoError> { + trace!("file encoding fetch partition response"); + self.partition_index.encode(src, version)?; + self.error_code.encode(src, version)?; + self.high_watermark.encode(src, version)?; + self.last_stable_offset.encode(src, version)?; + self.records.file_encode(src, data, version)?; + Ok(()) + } +} + + +impl SlicePartitionResponse for PeerFilePartitionResponse { + + fn set_hw(&mut self, offset: i64) { + self.high_watermark = offset; + } + + fn set_slice(&mut self, slice: AsyncFileSlice) { + self.records = slice.into(); + } + + fn set_error_code(&mut self, error: ErrorCode) { + self.error_code = error; + } + + fn set_last_stable_offset(&mut self,offset: i64) { + self.last_stable_offset = offset; + } + + fn set_log_start_offset(&mut self, _offset: i64) { + } + + +} diff --git a/spu-server/src/controllers/leader_replica/actions.rs b/spu-server/src/controllers/leader_replica/actions.rs new file mode 100644 index 0000000000..d1f17797f9 --- /dev/null +++ b/spu-server/src/controllers/leader_replica/actions.rs @@ -0,0 +1,44 @@ + +use internal_api::messages::Replica; +use kf_protocol::api::Offset; +use types::SpuId; + + +#[derive(Debug)] +pub enum LeaderReplicaControllerCommand { + UpdateReplicaFromSc(Replica), + EndOffsetUpdated, + FollowerOffsetUpdate(FollowerOffsetUpdate) +} + + +#[derive(Debug)] +pub struct FollowerOffsetUpdate { + pub follower_id: SpuId, + pub leo: Offset, // log end offset + pub hw: Offset // high water mark +} + +impl FollowerOffsetUpdate { + + #[allow(dead_code)] + pub fn new(follower_id: SpuId,leo: Offset,hw: Offset) -> Self { + + assert!(hw <= leo,"high watermark is always less than end offset" ); + Self { + follower_id, + leo, + hw + } + } +} + +impl From<(SpuId,Offset,Offset)> for FollowerOffsetUpdate { + fn from(value: (SpuId,Offset,Offset)) -> Self { + FollowerOffsetUpdate { + follower_id: value.0, + leo: value.1, + hw: value.2 + } + } +} \ No newline at end of file diff --git a/spu-server/src/controllers/leader_replica/api_key.rs b/spu-server/src/controllers/leader_replica/api_key.rs new file mode 100644 index 0000000000..7f76ef7e0c --- /dev/null +++ b/spu-server/src/controllers/leader_replica/api_key.rs @@ -0,0 +1,17 @@ + +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum KfLeaderPeerApiEnum { + UpdateOffsets = 0 +} + + +impl Default for KfLeaderPeerApiEnum { + fn default() -> KfLeaderPeerApiEnum { + KfLeaderPeerApiEnum::UpdateOffsets + } +} diff --git a/spu-server/src/controllers/leader_replica/connection.rs b/spu-server/src/controllers/leader_replica/connection.rs new file mode 100644 index 0000000000..f48ef1d20e --- /dev/null +++ b/spu-server/src/controllers/leader_replica/connection.rs @@ -0,0 +1,102 @@ + + +use log::trace; +use log::error; +use log::debug; +use log::warn; +use kf_socket::KfSocketError; +use kf_socket::KfStream; +use kf_socket::KfSocket; +use kf_service::api_loop; +use types::SpuId; + +use crate::core::DefaultSharedGlobalContext; + +use super::LeaderReplicaControllerCommand; +use super::FollowerOffsetUpdate; +use super::KfLeaderPeerApiEnum; +use super::LeaderPeerRequest; +use super::UpdateOffsetRequest; +use super::ReplicaOffsetRequest; + +/// Handle connection from follower to leader +pub struct LeaderConnection { + ctx: DefaultSharedGlobalContext, + follower_id: SpuId +} + +impl LeaderConnection { + + /// manage connection from follower + pub async fn handle(ctx: DefaultSharedGlobalContext, follower_id: SpuId, socket: KfSocket) -> Result<(),KfSocketError> { + + let (sink,stream) = socket.split(); + ctx.follower_sinks().insert_sink(follower_id,sink); + + let connection = Self { + ctx: ctx.clone(), + follower_id + }; + + connection.main_loop(stream).await?; + + ctx.follower_sinks().clear_sink(&follower_id); + Ok(()) + } + + async fn main_loop(&self, mut stream: KfStream) -> Result<(),KfSocketError> { + + trace!("starting connection handling from follower: {} for leader: {}",self.follower_id,self.ctx.local_spu_id()); + + let mut api_stream = stream.api_stream::(); + + api_loop!( + api_stream, + LeaderPeerRequest::UpdateOffsets(request) => { + self.route_offset_request(request.request).await + } + ); + + Ok(()) + + } + + /// route offset update request from follower to replica leader controller + async fn route_offset_request(&self,request: UpdateOffsetRequest) { + + debug!("receive offset request from follower: {}",self.follower_id); + for replica in request.replicas { + route_replica_offset(self.ctx.clone(), self.follower_id, replica).await + } + } + +} + + +/// send route replica offsets to leader replica controller +/// it spawn request +async fn route_replica_offset(ctx: DefaultSharedGlobalContext,follower_id: SpuId,replica: ReplicaOffsetRequest) { + + + let replica_key = replica.replica; + let follower_update = FollowerOffsetUpdate { + follower_id: follower_id, + leo: replica.leo, + hw: replica.hw + }; + + + match ctx.leaders_state().send_message(&replica_key,LeaderReplicaControllerCommand::FollowerOffsetUpdate(follower_update)).await { + Ok(success_stat) => { + if success_stat { + trace!("send offset data to: replica leader: {}",replica_key); + } else { + warn!("replica leader: {} was not found",replica_key); // this could happen when leader controller is not happen + } + }, + Err(err) => error!("Error writing message to replica {}, err: {}",replica_key,err) + } + + + } + diff --git a/spu-server/src/controllers/leader_replica/leader_controller.rs b/spu-server/src/controllers/leader_replica/leader_controller.rs new file mode 100644 index 0000000000..7e66dfadb6 --- /dev/null +++ b/spu-server/src/controllers/leader_replica/leader_controller.rs @@ -0,0 +1,171 @@ +use std::sync::Arc; +use std::time::Duration; + +use log::debug; +use log::trace; +use log::warn; +use futures::channel::mpsc::Receiver; +use futures::future::FutureExt; +use futures::future::join; +use futures::select; +use futures::stream::StreamExt; + +use future_helper::spawn; +use future_helper::sleep; +use metadata::partition::ReplicaKey; +use storage::FileReplica; +use types::SpuId; +use kf_socket::ExclusiveKfSink; + +use crate::core::SharedSpuSinks; + + +use super::LeaderReplicaControllerCommand; +use super::FollowerOffsetUpdate; +use super::SharedReplicaLeadersState; + +/// time for complete reconcillation with followers +pub const FOLLOWER_RECONCILIATION_INTERVAL_SEC: u64 = 300; // 5 min + +/// Controller for managing leader replica. +/// Each leader replica controller is spawned and managed by master controller to ensure max parallism. +pub struct ReplicaLeaderController { + #[allow(dead_code)] + local_spu: SpuId, + id: ReplicaKey, + controller_receiver: Receiver, + leaders_state: SharedReplicaLeadersState, + follower_sinks: SharedSpuSinks, + sc_sink: Arc +} + +impl ReplicaLeaderController { + + + pub fn new( + local_spu: SpuId, + id: ReplicaKey, + controller_receiver: Receiver, + leaders_state: SharedReplicaLeadersState, + follower_sinks: SharedSpuSinks, + sc_sink: Arc + ) -> Self + { + Self { + local_spu, + id, + controller_receiver, + leaders_state, + follower_sinks, + sc_sink + } + + } +} + + +impl ReplicaLeaderController +{ + + pub fn run(self) { + spawn(self.dispatch_loop()); + } + + async fn dispatch_loop(mut self) { + + debug!("starting leader controller for: {}",self.id); + self.send_status_to_sc().await; + self.sync_followers().await; + + loop { + debug!("waiting for next command"); + + select! { + + _ = (sleep(Duration::from_secs(FOLLOWER_RECONCILIATION_INTERVAL_SEC))).fuse() => { + debug!("timer fired - kickoff follower reconcillation"); + self.sync_followers().await; + }, + + controller_req = self.controller_receiver.next() => { + if let Some(command) = controller_req { + + match command { + LeaderReplicaControllerCommand::EndOffsetUpdated => { + trace!("leader replica endoffset has updated, update the follower if need to be"); + join(self.send_status_to_sc(),self.sync_followers()).await; + }, + + LeaderReplicaControllerCommand::FollowerOffsetUpdate(offsets) => { + debug!("Offset update from follower: {:#?} for leader: {}", offsets, self.id); + self.update_follower_offsets(offsets).await; + }, + + LeaderReplicaControllerCommand::UpdateReplicaFromSc(replica) => { + debug!("update replica from sc: {}",replica.id); + } + } + } else { + debug!( + "mailbox has terminated for replica leader: {}, terminating loop", + self.id + ); + } + } + } + + } + } + + /// update the follower offsets + async fn update_follower_offsets(&self, offsets: FollowerOffsetUpdate) { + if let Some(mut leader_replica) = self.leaders_state.get_mut_replica(&self.id) { + let follower_id = offsets.follower_id; + let (update_status,sync_follower) = leader_replica.update_follower_offsets(offsets); + + join( + async { + if update_status { + leader_replica.send_status_to_sc(&self.sc_sink).await; + } + }, + async { + if let Some(follower_info) = sync_follower { + leader_replica.sync_follower( + &self.follower_sinks, + follower_id, + &follower_info).await; + } + } + ).await; + + } else { + warn!("no replica is found: {} for update follower offsets", self.id); + } + } + + + /// update the follower with my state + async fn sync_followers(&self) { + if let Some(leader_replica) = self.leaders_state.get_replica(&self.id) { + leader_replica.sync_followers(&self.follower_sinks).await; + } else { + warn!("no replica is found: {} for sync followers", self.id); + } + } + + /// send status back to sc + async fn send_status_to_sc(&self) { + + if let Some(leader_replica) = self.leaders_state.get_replica(&self.id) { + leader_replica.send_status_to_sc(&self.sc_sink).await; + } else { + warn!("no replica is found: {} for send status back", self.id); + } + + } + +} + + + diff --git a/spu-server/src/controllers/leader_replica/leaders_state.rs b/spu-server/src/controllers/leader_replica/leaders_state.rs new file mode 100644 index 0000000000..77e95341dc --- /dev/null +++ b/spu-server/src/controllers/leader_replica/leaders_state.rs @@ -0,0 +1,260 @@ +use std::sync::Arc; + +use chashmap::CHashMap; +use chashmap::ReadGuard; +use chashmap::WriteGuard; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::SendError; +use futures::SinkExt; +use log::debug; +use log::warn; +use log::trace; +use log::error; + +use metadata::partition::ReplicaKey; +use kf_protocol::api::DefaultRecords; +use storage::FileReplica; +use kf_socket::FilePartitionResponse; +use kf_protocol::api::Offset; +use kf_protocol::api::Isolation; +use kf_protocol::api::ErrorCode; +use utils::SimpleConcurrentBTreeMap; + +use crate::InternalServerError; + +use super::LeaderReplicaState; +use super::LeaderReplicaControllerCommand; + +pub type SharedReplicaLeadersState = Arc>; + +/// State for Replica Leaders +/// It is used by Replica Leader Controller supervisor +#[derive(Debug)] +pub struct ReplicaLeadersState { + replicas: CHashMap>, + mailboxes: SimpleConcurrentBTreeMap> +} + +impl Default for ReplicaLeadersState { + fn default() -> Self { + ReplicaLeadersState { + replicas: CHashMap::default(), + mailboxes: SimpleConcurrentBTreeMap::new() + } + } +} + +impl ReplicaLeadersState { + + pub fn new_shared() -> Arc> { + Arc::new(Self::default()) + } + + pub fn has_replica(&self, key: &ReplicaKey) -> bool { + self.replicas.contains_key(key) + } + + pub fn get_replica( + &self, + key: &ReplicaKey, + ) -> Option>> { + self.replicas.get(key) + } + + pub fn get_mut_replica( + &self, + key: &ReplicaKey, + ) -> Option>> { + self.replicas.get_mut(key) + } + + + pub fn insert_replica( + &self, + key: ReplicaKey, + leader: LeaderReplicaState, + mailbox: Sender, + ) -> Option> { + trace!("adding insert leader state: {}", key); + if let Some(mut old_mailbox) = self.mailboxes.write().insert(key.clone(), mailbox) { + error!("closing left over mailbox for leader"); + old_mailbox.close_channel(); + } + self.replicas.insert(key, leader) + } + + /// remove leader replica + /// we also remove mailbox and close it's channel which will terminated the controller + pub fn remove_replica( + &self, + key: &ReplicaKey + ) -> Option> { + if let Some(replica) = self.replicas.remove(key) { + if let Some(mut mailbox) = self.mailboxes.write().remove(key) { + debug!("closing old leader mailbox: {}",key); + mailbox.close_channel(); + } else { + error!("no mailbox found for removing: {}",key); + } + Some(replica) + } else { + error!("leader replica: {} is not founded",key); + None + } + } + + + + pub async fn send_message( + &self, + replica: &ReplicaKey, + command: LeaderReplicaControllerCommand, + ) -> Result { + match self.mailbox(replica) { + Some(mut mailbox) => { + trace!( + "sending message to leader replica: {:#?} controller: {:#?}", + replica, + command + ); + mailbox.send(command).await?; + Ok(true) + } + None => Ok(false), + } + } + + + #[allow(dead_code)] + pub fn mailbox(&self, key: &ReplicaKey) -> Option> { + self.mailboxes + .read() + .get(key) + .map(|mailbox_guard| mailbox_guard.clone()) + } +} + + +impl ReplicaLeadersState { + + /// write records to response + /// + /// # Arguments + /// + /// * `ctx` - Shared Context, this is thread safe + /// * `rep_id` - replication id + /// * `offset` - starting offset + /// * `isolation` - isolation + /// * `partition_response` - response + pub async fn read_records( + &self, + rep_id: &ReplicaKey, + offset: Offset, + isolation: Isolation, + response: &mut FilePartitionResponse + ) { + if let Some(leader_replica) = self.get_replica(rep_id) { + leader_replica + .read_records(offset, isolation, response) + .await; + } else { + warn!("no replica is found: {}", rep_id); + response.error_code = ErrorCode::NotLeaderForPartition; + } + } + + + /// write new record anod notify the leader replica controller + /// TODO: may replica should be moved it's own map + pub async fn send_records( + &self, + rep_id: &ReplicaKey, + records: DefaultRecords, + update_highwatermark: bool, + ) -> Result { + + if let Some(mut leader_replica) = self.get_mut_replica(rep_id) { + leader_replica + .send_records(records, update_highwatermark) + .await?; + self.send_message( + rep_id, + LeaderReplicaControllerCommand::EndOffsetUpdated, + ) + .await + .map_err(|err| err.into()) + } else { + warn!("no replica is found: {}", rep_id); + Ok(false) + } + } +} + +/* +/// encapsulate LeaderReplicaState that has readable lock +struct ReadableLeaderReplicaState<'a,S>(ReadGuard<'a,ReplicaKey, LeaderReplicaState>); + +impl <'a,S>ReadableLeaderReplicaState<'a,S> { + + + +} + + +impl <'a,S>From>> for ReadableLeaderReplicaState<'a,S> { + fn from(replica_state: ReadGuard<'a,ReplicaKey, LeaderReplicaState>) -> Self { + Self(replica_state) + } +} +*/ + + +#[cfg(test)] +mod test_channel { + + use std::time::Duration; + + use futures::channel::mpsc::Sender; + use futures::channel::mpsc::Receiver; + use futures::channel::mpsc::channel; + use futures::future::join; + use futures::SinkExt; + use futures::StreamExt; + + use future_helper::sleep; + use future_helper::test_async; + + async fn receiver_tst(mut receiver: Receiver) { + // sleep to let sender send messages + assert!(receiver.next().await.is_some()); + // wait until sender send all 3 and terminate sender + sleep(Duration::from_millis(10)).await; + assert!(receiver.next().await.is_some()); + assert!(receiver.next().await.is_some()); + assert!(receiver.next().await.is_none()); + } + + async fn sender_test(orig_mailbox: Sender) { + let mut mailbox = orig_mailbox.clone(); + assert!(!mailbox.is_closed()); + sleep(Duration::from_millis(1)).await; + mailbox.send(10).await.expect("send"); + mailbox.send(11).await.expect("send"); + mailbox.send(12).await.expect("send"); + mailbox.disconnect(); + assert!(mailbox.is_closed()); + // wait 30 millisecond to allow test of receiver + sleep(Duration::from_millis(30)).await; + } + + // test send and disconnect + #[test_async] + async fn test_event_shutdown() -> Result<(), ()> { + let (sender, receiver) = channel::(10); + + join(sender_test(sender), receiver_tst(receiver)).await; + + Ok(()) + } + +} diff --git a/spu-server/src/controllers/leader_replica/mod.rs b/spu-server/src/controllers/leader_replica/mod.rs new file mode 100644 index 0000000000..6d980ebfa2 --- /dev/null +++ b/spu-server/src/controllers/leader_replica/mod.rs @@ -0,0 +1,20 @@ +mod leader_controller; +mod leaders_state; +mod replica_state; +mod connection; +mod api_key; +mod peer_api; +mod update_offsets; +mod actions; + +pub use self::leader_controller::ReplicaLeaderController; +pub use leaders_state::ReplicaLeadersState; +pub use leaders_state::SharedReplicaLeadersState; +pub use self::replica_state::LeaderReplicaState; +pub use self::connection::LeaderConnection; +pub use self::api_key::KfLeaderPeerApiEnum; +pub use self::peer_api::LeaderPeerRequest; +pub use self::update_offsets::UpdateOffsetRequest; +pub use self::update_offsets::ReplicaOffsetRequest; +pub use self::actions::FollowerOffsetUpdate; +pub use self::actions::LeaderReplicaControllerCommand; diff --git a/spu-server/src/controllers/leader_replica/peer_api.rs b/spu-server/src/controllers/leader_replica/peer_api.rs new file mode 100644 index 0000000000..947aec7b3f --- /dev/null +++ b/spu-server/src/controllers/leader_replica/peer_api.rs @@ -0,0 +1,54 @@ +use std::io::Error as IoError; +use std::convert::TryInto; + +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::RequestHeader; + +use super::KfLeaderPeerApiEnum; +use super::UpdateOffsetRequest; + + +#[derive(Debug,Encode)] +pub enum LeaderPeerRequest { + UpdateOffsets(RequestMessage) +} + + + +impl Default for LeaderPeerRequest { + fn default() -> LeaderPeerRequest { + LeaderPeerRequest::UpdateOffsets(RequestMessage::::default()) + } +} + + + +impl KfRequestMessage for LeaderPeerRequest { + + type ApiKey = KfLeaderPeerApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf + { + + trace!("decoding with header: {:#?}",header); + let version = header.api_version(); + match header.api_key().try_into()? { + KfLeaderPeerApiEnum::UpdateOffsets => Ok(LeaderPeerRequest::UpdateOffsets(RequestMessage::new(header, + UpdateOffsetRequest::decode_from(src,version)?))) + } + + } + +} + + diff --git a/spu-server/src/controllers/leader_replica/replica_state.rs b/spu-server/src/controllers/leader_replica/replica_state.rs new file mode 100644 index 0000000000..89091f64e4 --- /dev/null +++ b/spu-server/src/controllers/leader_replica/replica_state.rs @@ -0,0 +1,467 @@ +use std::collections::BTreeMap; + +use log::debug; +use log::trace; +use log::error; +use log::warn; + +use kf_socket::SinkPool; +use kf_protocol::api::DefaultRecords; +use kf_protocol::api::Offset; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::Isolation; +use metadata::partition::ReplicaKey; +use internal_api::messages::Replica; +use internal_api::UpdateLrsRequest; +use storage::FileReplica; +use storage::ConfigOption; +use storage::StorageError; +use types::SpuId; +use types::log_on_err; +use storage::SlicePartitionResponse; +use storage::ReplicaStorage; +use kf_socket::ExclusiveKfSink; + +use crate::core::storage::create_replica_storage; +use crate::controllers::follower_replica::FileSyncRequest; +use crate::controllers::follower_replica::PeerFileTopicReponse; +use crate::controllers::follower_replica::PeerFilePartitionResponse; + +use super::FollowerOffsetUpdate; + +#[derive(Debug, Clone, PartialEq)] +pub struct FollowerReplicaInfo { + hw: Offset, + leo: Offset, +} + + +impl Default for FollowerReplicaInfo { + fn default() -> Self { + Self { + hw: -1, + leo: -1 + } + } +} + +impl FollowerReplicaInfo { + + pub fn new(leo: Offset, hw: Offset) -> Self { + assert!(leo >= hw, "end offset >= high watermark"); + Self { + leo, + hw, + } + } + + pub fn hw(&self) -> Offset { + self.hw + } + + pub fn leo(&self) -> Offset { + self.leo + } + + pub fn is_same(&self,hw: Offset,leo: Offset) -> bool { + self.hw == hw && self.leo == leo + } + + // is valid as long as it's offset are not at default + pub fn is_valid(&self) -> bool { + self.hw != -1 && self.leo != -1 + } + + + +} + +impl From<(Offset, Offset)> for FollowerReplicaInfo { + fn from(value: (Offset, Offset)) -> Self { + Self::new(value.0, value.1) + } +} + + +/// Leader Replica state +#[derive(Debug)] +pub struct LeaderReplicaState { + replica_id: ReplicaKey, + leader_id: SpuId, + followers: BTreeMap, + storage: S, +} + +impl LeaderReplicaState { + /// create new, followers_id contains leader id + pub fn new(replica_id: R, leader_id: SpuId, storage: S, follower_ids: Vec) -> Self + where + R: Into, + { + let mut state = Self { + replica_id: replica_id.into(), + leader_id, + followers: BTreeMap::new(), + storage, + }; + state.add_follower_replica(follower_ids); + state + } + + + pub fn replica_id(&self) -> &ReplicaKey { + &self.replica_id + } + + pub fn storage(&self) -> &S { + &self.storage + } + + #[allow(dead_code)] + pub fn mut_storage(&mut self) -> &mut S { + &mut self.storage + } + + /// probably only used in the test + #[allow(dead_code)] + pub(crate) fn followers(&self, spu: &SpuId) -> Option { + self.followers + .get(spu) + .map(|val| val.clone()) + } + + + /// if replica id's doesn't exists, then add, otherwise ignore it + fn add_follower_replica(&mut self, follower_ids: Vec) { + + let leader_id = self.leader_id; + for id in follower_ids.into_iter().filter(|id| *id != leader_id) { + if self.followers.contains_key(&id) { + warn!("try to add existing follower id : {} to replica: {}, ignoring", id,self.replica_id); + } else { + trace!( + "inserting: new follower idx for leader: {}, replica: {}, follower: {}", + leader_id, + self.replica_id, + id + ); + self.followers.insert(id, FollowerReplicaInfo::default()); + } + + } + } + + +} + +impl LeaderReplicaState +where + S: ReplicaStorage, +{ + + pub fn leo(&self) -> Offset { + self.storage.get_leo() + } + + pub fn hw(&self) -> Offset { + self.storage.get_hw() + } + + /// update followers offset, return (status_needs_to_changed,follower to be synced) + /// + /// // case 1: follower offset has same value as leader + /// // leader: leo: 2, hw: 2, follower: leo: 2, hw: 2 + /// // Input: leo 2, hw: 2, this happens during follower resync. + /// // Expect: no changes, + /// + /// // case 2: follower offset is same as previous + /// // leader: leo: 2, hw: 2, follower: leo: 1, hw: 1 + /// // Input: leo: 1, hw:1, + /// // Expect, no status but follower sync + /// // + /// // case 3: different follower offset + /// // leader: leo: 3, hw: 3, follower: leo: 1, hw: 1 + /// // Input: leo: 2, hw: 2, + /// // Expect, status change, follower sync + pub fn update_follower_offsets(&mut self, offset: F) -> (bool,Option) + where + F: Into, + { + let follower_offset = offset.into(); + // if update offset is greater than leader than something is wrong, in this case + // we truncate the the follower offset + let follower_id = follower_offset.follower_id; + let mut follower_info = FollowerReplicaInfo::new(follower_offset.leo,follower_offset.hw); + + let leader_leo = self.leo(); + let leader_hw = self.hw(); + + if follower_info.leo > leader_leo { + warn!("offset leo: {} is greater than leader leo{} ",follower_info.leo,leader_leo); + follower_info.leo = leader_leo; + } + + let changed = + if let Some(old_info) = self.followers.insert(follower_id,follower_info.clone()) { + old_info != follower_info + } else { + false + }; + + ( + changed, + if leader_leo != follower_info.leo || leader_hw != follower_info.hw { + Some(follower_info) + } else { + None + } + ) + } + + + /// compute list of followers that need to be sync + /// this is done by checking diff of end offset and high watermark + fn need_follower_updates(&self) -> Vec<(SpuId, FollowerReplicaInfo)> { + + let leo = self.leo(); + let hw = self.hw(); + + trace!("computing follower offset for leader: {}, replica: {}, end offset: {}, high watermarkK {}",self.leader_id,self.replica_id,leo,hw); + + self.followers.iter() + .filter( | (_, follower_info) | follower_info.is_valid() && !follower_info.is_same(hw,leo)) + .map( |(follower_id,follower_info)| { + debug!("replica: {}, follower: {} needs to be updated",self.replica_id,follower_id); + trace!("follow: {} has different hw: {:#?}",follower_id,follower_info); + (*follower_id, follower_info.clone()) + }).collect() + + } + + /// convert myself as + fn as_lrs_request(&self) -> UpdateLrsRequest { + + let leader = (self.leader_id,self.storage.get_hw(),self.storage.get_leo()).into(); + let replicas = self.followers.iter() + .map(|(follower_id,follower_info)| (*follower_id,follower_info.hw(),follower_info.leo()).into()) + .collect(); + + UpdateLrsRequest::new(self.replica_id.clone(),leader,replicas) + } + + pub async fn send_status_to_sc(&self,sc_sink: &ExclusiveKfSink) { + let mut message = RequestMessage::new_request(self.as_lrs_request()); + message + .get_mut_header() + .set_client_id(format!("spu: {}, replica: {}",self.leader_id,self.replica_id)); + + log_on_err!(sc_sink.send_request(&message).await); + debug!("sent replica status to sc: {}, replica: {}",self.leader_id,self.replica_id); + } +} + + + + +impl LeaderReplicaState { + + /// create new replica state using file replica + pub async fn create_file_replica( + leader: Replica, + config: &ConfigOption, + ) -> Result { + trace!( + "creating new leader replica state: {:#?} using file replica", + leader + ); + + let storage = create_replica_storage(leader.leader, &leader.id, &config).await?; + + Ok(Self::new( + leader.id, + leader.leader, + storage, + leader.replicas, + )) + } + + + /// sync specific follower + pub async fn sync_follower(&self, sinks: &SinkPool,follower_id: SpuId,follower_info: &FollowerReplicaInfo) { + + if let Some(mut sink) = sinks.get_sink(&follower_id) { + + trace!( + "sink is found for follower: {}, ready to build sync records", + follower_id + ); + let mut sync_request = FileSyncRequest::default(); + let mut topic_response = PeerFileTopicReponse::default(); + topic_response.name = self.replica_id.topic.to_owned(); + let mut partition_response = PeerFilePartitionResponse::default(); + partition_response.partition_index = self.replica_id.partition; + self.read_records( + follower_info.leo, + Isolation::ReadUncommitted, + &mut partition_response, + ).await; + partition_response.last_stable_offset = self.leo(); + partition_response.high_watermark = self.hw(); + topic_response.partitions.push(partition_response); + sync_request.topics.push(topic_response); + + let request = RequestMessage::new_request(sync_request).set_client_id(format!( + "leader: {}, replica: {}", + self.leader_id, self.replica_id + )); + debug!( + "sending records follower: {}, response: {}", + follower_id, + request + ); + if let Err(err) = sink + .encode_file_slices(&request, request.header.api_version()) + .await + { + error!("error sending file slice: {:#?}", err); + } + } else { + warn!("no sink exits for follower: {}, skipping ", follower_id); + } + + } + + /// synchronize + pub async fn sync_followers(&self, sinks: &SinkPool) { + + let follower_sync = self.need_follower_updates(); + + for (follower_id, follower_info) in follower_sync { + self.sync_follower(sinks,follower_id,&follower_info).await; + } + } + + pub async fn read_records

( + &self, + offset: Offset, + isolation: Isolation, + partition_response: &mut P, + ) where + P: SlicePartitionResponse, + { + self.storage.read_records_with_isolation(offset,isolation,partition_response).await + } + + pub async fn send_records( + &mut self, + records: DefaultRecords, + update_highwatermark: bool, + ) -> Result<(), StorageError> { + trace!( + "writing records to leader: {} replica: {}, ", + self.leader_id, + self.replica_id + ); + self.storage + .send_records(records, update_highwatermark) + .await + } + + #[allow(dead_code)] + pub fn live_replicas(&self) -> Vec { + self.followers.keys().cloned().collect() + } + #[allow(dead_code)] + pub fn leader_id(&self) -> SpuId { + self.leader_id + } +} + +#[cfg(test)] +mod test { + + use storage::ReplicaStorage; + use kf_protocol::api::Offset; + + use super::LeaderReplicaState; + + struct MockReplica { + hw: Offset, + leo: Offset, + } + + impl MockReplica { + fn new(leo: Offset, hw: Offset) -> Self { + MockReplica { + hw, + leo, + } + } + } + + impl ReplicaStorage for MockReplica { + + fn get_hw(&self) -> Offset { + self.hw + } + + fn get_leo(&self) -> Offset { + self.leo + } + } + + #[test] + fn test_follower_update() { + utils::init_logger(); + let mock_replica = MockReplica::new(20, 10); // eof, hw + + // inserting new replica state, this should set follower offset to -1,-1 as inital state + let mut replica_state = LeaderReplicaState::new(("test", 1), 5000, mock_replica, vec![5001]); + let follower_info = replica_state.followers.get(&5001).expect("follower should exists"); + assert_eq!(follower_info.hw,-1); + assert_eq!(follower_info.leo,-1); + + + // follower sends update with it's current state 10,10 + // this should trigger status update and follower sync + assert_eq!(replica_state.update_follower_offsets((5001,10,10)),(true,Some((10,10).into()))); + + + // follower resync which sends same offset status, in this case no update but should trigger resync + assert_eq!(replica_state.update_follower_offsets((5001,10,10)),(false,Some((10,10).into()))); + + // finally follower updates the offset, this should trigger update but no resync + assert_eq!(replica_state.update_follower_offsets((5001,20,10)),(true,None)); + + // follower resync with same value, in this case no update and no resync + assert_eq!(replica_state.update_follower_offsets((5001,20,10)),(false,None)); + } + + + #[test] + fn test_leader_update() { + + utils::init_logger(); + let mock_replica = MockReplica::new(20, 10); // eof, hw + + // inserting new replica state, this should set follower offset to -1,-1 as inital state + let mut replica_state = LeaderReplicaState::new(("test", 1), 5000, mock_replica, vec![5001]); + assert_eq!(replica_state.need_follower_updates().len(), 0); + + // update high watermark of our replica to same as endoffset + + replica_state.mut_storage().hw = 20; + + assert_eq!(replica_state.need_follower_updates().len(), 0); + + assert_eq!(replica_state.update_follower_offsets((5001,10,10)),(true,Some((10,10).into()))); + let updates = replica_state.need_follower_updates(); + assert_eq!(updates.len(), 1); + assert_eq!(updates[0], (5001, (10, 10).into())); + + assert_eq!(replica_state.update_follower_offsets((5001,20,20)),(true,None)); + assert_eq!(replica_state.need_follower_updates().len(), 0); + + } + +} diff --git a/spu-server/src/controllers/leader_replica/update_offsets.rs b/spu-server/src/controllers/leader_replica/update_offsets.rs new file mode 100644 index 0000000000..9ff78ce2a0 --- /dev/null +++ b/spu-server/src/controllers/leader_replica/update_offsets.rs @@ -0,0 +1,29 @@ +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use kf_protocol::api::Request; +use kf_protocol::api::Offset; +use metadata::partition::ReplicaKey; + +use super::KfLeaderPeerApiEnum; + +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateOffsetRequest { + pub replicas: Vec, +} + +impl Request for UpdateOffsetRequest { + const API_KEY: u16 = KfLeaderPeerApiEnum::UpdateOffsets as u16; + const DEFAULT_API_VERSION: i16 = 0; + type Response = UpdateOffsetResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct ReplicaOffsetRequest { + pub replica: ReplicaKey, + pub leo: Offset, + pub hw: Offset, +} + +// no content, this is one way request +#[derive(Decode, Encode, Default, Debug)] +pub struct UpdateOffsetResponse {} diff --git a/spu-server/src/controllers/mod.rs b/spu-server/src/controllers/mod.rs new file mode 100644 index 0000000000..eba348ea9b --- /dev/null +++ b/spu-server/src/controllers/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod follower_replica; +pub(crate) mod leader_replica; +pub(crate) mod sc; \ No newline at end of file diff --git a/spu-server/src/controllers/sc/action.rs b/spu-server/src/controllers/sc/action.rs new file mode 100644 index 0000000000..a36007fba1 --- /dev/null +++ b/spu-server/src/controllers/sc/action.rs @@ -0,0 +1,7 @@ +use metadata::partition::ReplicaKey; + +#[derive(Debug)] +pub enum SupervisorCommand { + #[allow(dead_code)] + ReplicaLeaderTerminated(ReplicaKey), +} diff --git a/spu-server/src/controllers/sc/dispatcher.rs b/spu-server/src/controllers/sc/dispatcher.rs new file mode 100644 index 0000000000..faa33552fb --- /dev/null +++ b/spu-server/src/controllers/sc/dispatcher.rs @@ -0,0 +1,607 @@ +use std::time::Duration; +use std::process; +use std::io::Error as IoError; +use std::sync::Arc; +use std::net::SocketAddr; +use std::convert::TryInto; + +use log::info; +use log::trace; +use log::error; +use log::debug; +use log::warn; +use types::print_cli_err; + +use futures::channel::mpsc::Receiver; +use futures::channel::mpsc::Sender; +use futures::channel::mpsc::channel; +use futures::StreamExt; +use futures::FutureExt; +use futures::select; +use futures::sink::SinkExt; + +use future_helper::spawn; +use future_helper::sleep; +use internal_api::InternalSpuApi; +use internal_api::InternalSpuRequest; +use internal_api::RegisterSpuRequest; +use internal_api::UpdateSpuRequest; +use internal_api::UpdateReplicaRequest; +use internal_api::UpdateAllRequest; +use internal_api::messages::Replica; +use kf_protocol::api::RequestMessage; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use kf_socket::ExclusiveKfSink; +use storage::FileReplica; +use metadata::partition::ReplicaKey; +use types::log_on_err; +use utils::actions::Actions; + +use crate::core::SharedGlobalContext; +use crate::core::SpecChange; +use crate::controllers::follower_replica::ReplicaFollowerController; +use crate::controllers::follower_replica::FollowerReplicaControllerCommand; +use crate::controllers::leader_replica::ReplicaLeaderController; +use crate::controllers::leader_replica::LeaderReplicaState; +use crate::controllers::leader_replica::LeaderReplicaControllerCommand; +use crate::InternalServerError; + +use super::SupervisorCommand; + +/// Controller for handling connection to SC +/// including registering and reconnect +pub struct ScDispatcher { + termination_receiver: Receiver, + #[allow(dead_code)] + termination_sender: Sender, + #[allow(dead_code)] + supervisor_command_sender: Sender, + supervisor_command_receiver: Receiver, + ctx: SharedGlobalContext, +} + +impl ScDispatcher { + + pub fn new( + ctx: SharedGlobalContext + ) -> Self { + + let (termination_sender,termination_receiver) = channel(1); + let (supervisor_command_sender,supervisor_command_receiver) = channel(100); + Self { + termination_receiver, + termination_sender, + supervisor_command_sender, + supervisor_command_receiver, + ctx + } + } +} + +impl ScDispatcher { + + + /// start the controller with ctx and receiver + pub fn run(self) { + + spawn(self.dispatch_loop()); + } + + async fn dispatch_loop(mut self) { + info!("starting SC Dispatcher"); + + loop { + if let Some(mut socket) = self.create_socket_to_sc().await { + trace!( + "established connection to sc for spu: {}", + self.ctx.local_spu_id() + ); + + // register and exit on error + match self.send_spu_registeration(&mut socket).await { + Ok(_) => {} + Err(err) => { + print_cli_err!(format!("cannot register with sc: {}", err)); + process::exit(0x0100); + } + } + + // continously process updates from and send back status to SC + match self.sc_request_loop(socket).await { + Ok(_) => {} + Err(err) => warn!("error, connecting to sc: {:#?}", err), + } + + // We lost connection to sc. Retry again + // Currently we use 3 seconds to retry but this should be using backoff algorithm + sleep(Duration::from_millis(3000)).await + + } + } + } + + /// dispatch sc request + async fn sc_request_loop(&mut self, socket: KfSocket) -> Result<(), KfSocketError> { + let (sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + let shared_sink = Arc::new(ExclusiveKfSink::new(sink)); + loop { + select! { + sc_request = api_stream.next().fuse() => { + + if let Some(sc_msg) = sc_request { + if let Ok(req_message) = sc_msg { + match req_message { + + InternalSpuRequest::UpdateAllRequest(request) => { + if let Err(err) = self.handle_sync_all_request(request,shared_sink.clone()).await { + error!("error handling all request from sc {}", err); + } + }, + InternalSpuRequest::UpdateReplicaRequest(request) => { + if let Err(err) = self.handle_update_replica_request(request,shared_sink.clone()).await { + error!("error handling update replica request: {}",err); + } + }, + InternalSpuRequest::UpdateSpuRequest(request) => { + if let Err(err) = self.handle_update_spu_request(request,shared_sink.clone()).await { + error!("error handling update spu request: {}",err); + } + } + } + + } else { + debug!("no more sc msg content, end"); + break; + } + + } else { + debug!("sc connection terminated"); + break; + } + + }, + super_command = self.supervisor_command_receiver.next().fuse() => { + + } + + } + + } + + Ok(()) + } + + /// register local spu to sc + async fn send_spu_registeration( + &self, + socket: &mut KfSocket, + ) -> Result { + + let local_spu_id = self.ctx.local_spu_id(); + + debug!("spu '{}' registration request", local_spu_id); + + let register_req = RegisterSpuRequest::new(local_spu_id); + let mut message = RequestMessage::new_request(register_req); + message + .get_mut_header() + .set_client_id(format!("spu: {}", local_spu_id)); + + let response = socket.send(&message).await?; + + trace!("register response: {:#?}", response); + + let register_resp = &response.response; + if register_resp.is_error() { + warn!( + "spu '{}' registration failed: {}", + local_spu_id, + register_resp.error_message() + ); + + Ok(false) + } else { + debug!("spu '{}' registration Ok", local_spu_id); + + Ok(true) + } + } + + /// connect to sc if can't connect try until we succeed + /// or if we received termination message + async fn create_socket_to_sc(&mut self) -> Option { + let spu_id = self.ctx.local_spu_id(); + let sc_endpoint = self.ctx.config().sc_endpoint(); + + debug!("trying to resolve sc endpoint: {}",sc_endpoint); + let addr: SocketAddr = sc_endpoint.clone().try_into().expect("sc endpoint should be resolving"); + debug!("sc endpoint resolved to: {}",addr); + + let wait_interval = self.ctx.config().sc_retry_ms; + loop { + trace!( + "trying to create socket to sc: {:#?} for spu: {}", + addr, + spu_id + ); + let connect_future = KfSocket::fusable_connect(&addr); + + select! { + socket_res = connect_future.fuse() => { + match socket_res { + Ok(socket) => { + debug!("connected to sc for spu: {}",spu_id); + return Some(socket) + } + Err(err) => warn!("error connecting to sc: {}",err) + } + + trace!("sleeping {} ms to connect to sc: {}",wait_interval,spu_id); + sleep(Duration::from_millis(wait_interval as u64)).await; + }, + _ = self.termination_receiver.next() => { + info!("termination message received"); + return None + } + } + } + } + + + /// Bulk Update Handler sent by Controller + /// + async fn handle_sync_all_request( + &mut self, + req_msg: RequestMessage, + shared_sc_sink: Arc + ) -> Result<(), IoError> { + + let (_, request) = req_msg.get_header_request(); + + debug!( + "RCVD-Req << (CTRL): UpdateAll({} spus, {} replicas)", + request.spus.len(), + request.replicas.len(), + ); + + + let spu_actions = self.ctx.spu_localstore().sync_all(request.spus); + + trace!("all spu actions detail: {:#?}",spu_actions); + + /* + * For now, there are nothing to do. + for spu_action in spu_actions.into_iter() { + + match spu_action { + + SpecChange::Add(_) => {}, + SpecChange::Mod(_,_) => {}, + SpecChange::Delete(_) => {} + + } + + } + */ + + let replica_actions = self.ctx.replica_localstore().sync_all(request.replicas); + self.apply_replica_actions(replica_actions,shared_sc_sink).await; + Ok(()) + } + + + /// + /// Follower Update Handler sent by a peer Spu + /// + async fn handle_update_replica_request( + &mut self, + req_msg: RequestMessage, + shared_sc_sink: Arc + ) -> Result<(), IoError> { + + let (_, request) = req_msg.get_header_request(); + + debug!("received replica update from sc: {:#?}",request); + + let replica_actions = self.ctx.replica_localstore().apply_changes(request.replicas().messages); + self.apply_replica_actions(replica_actions,shared_sc_sink).await; + Ok(()) + } + + + /// + /// Follower Update Handler sent by a peer Spu + /// + async fn handle_update_spu_request( + &mut self, + req_msg: RequestMessage, + _shared_sc_sink: Arc + ) -> Result<(), IoError> { + + let (_, request) = req_msg.get_header_request(); + + debug!("received spu update from sc: {:#?}",request); + + let _spu_actions = self.ctx.spu_localstore().apply_changes(request.spus()); + Ok(()) + } + + + async fn apply_replica_actions( + &self, + actions: Actions>, + shared_sc_sink: Arc + ) { + + if actions.count() == 0 { + debug!("no replica actions to process. ignoring"); + return; + } + + trace!("applying replica leader {} actions", actions.count()); + + let local_id = self.ctx.local_spu_id(); + + for replica_action in actions.into_iter() { + + trace!("applying replica action: {:#?}", replica_action); + + match replica_action { + SpecChange::Add(new_replica) => { + if new_replica.leader == local_id { + self.add_leader_replica( + new_replica, + shared_sc_sink.clone() + ) + .await; + } else { + self.add_follower_replica( + new_replica + ).await; + } + }, + SpecChange::Delete(deleted_replica) => { + if deleted_replica.leader == local_id { + self.remove_leader_replica(&deleted_replica.id); + } else { + self.remove_follower_replica(deleted_replica); + } + }, + SpecChange::Mod(new_replica,old_replica) => { + trace!("replica changed, old: {:#?}, new: {:#?}",new_replica,old_replica); + + // check for leader change + if new_replica.leader != old_replica.leader { + if new_replica.leader == local_id { + // we become leader + self.promote_replica(new_replica,old_replica,shared_sc_sink.clone()); + } else { + // we are follower + // if we were leader before, we demote out self + if old_replica.leader == local_id { + self.demote_replica(new_replica).await; + } else { + // we stay as follower but we switch to new leader + debug!("still follower but switching leader: {}",new_replica); + self.remove_follower_replica(old_replica); + self.add_follower_replica(new_replica).await; + } + } + } else { + if new_replica.leader == local_id { + self.update_leader_replica( + new_replica + ) + .await; + } else { + self.update_follower_replica(new_replica).await; + } + } + } + } + + } + } + + + async fn add_leader_replica( + &self, + replica: Replica, + shared_sc_sink: Arc) + { + + debug!("adding new leader replica: {}",replica); + + let storage_log = self.ctx.config().storage().new_config(); + let replica_id = replica.id.clone(); + + match LeaderReplicaState::create_file_replica(replica, &storage_log).await { + Ok(leader_replica) => { + debug!("file replica for leader is created: {}",storage_log); + self.spawn_leader_controller(replica_id,leader_replica,shared_sc_sink); + }, + Err(err) => { + error!("error creating storage foer leader replica {:#?}",err); + // TODO: send status back to SC + } + } + + } + + + async fn update_leader_replica( + &self, + replica: Replica, + ) { + debug!("updating leader controller: {}", replica.id); + + if self.ctx.leaders_state().has_replica(&replica.id) { + + debug!("leader replica was found, sending replica info: {}",replica); + + match self.ctx.leaders_state().send_message( + &replica.id, + LeaderReplicaControllerCommand::UpdateReplicaFromSc(replica.clone()), + ) + .await + { + Ok(status) => { + if !status { + error!("leader controller mailbox: {} was not founded",replica.id); + } + } + Err(err) => error!("error sending external command: {:#?} to replica controller: {}", err,replica.id), + } + } else { + error!("leader controller was not found: {}",replica.id) + } + } + + + /// spwan new leader controller + fn spawn_leader_controller( + &self, + replica_id: ReplicaKey, + leader_state: LeaderReplicaState, + shared_sc_sink: Arc) + { + + debug!("spawning new leader controller for {}",replica_id); + + let (sender, receiver) = channel(10); + + if let Some(old_replica) = self.ctx.leaders_state().insert_replica(replica_id.clone(),leader_state, sender) { + error!("there was existing replica when creating new leader replica: {}",old_replica.replica_id()); + } + + let leader_controller = ReplicaLeaderController::new( + self.ctx.local_spu_id(), + replica_id, + receiver, + self.ctx.leader_state_owned(), + self.ctx.followers_sink_owned(), + shared_sc_sink + ); + leader_controller.run(); + + } + + + pub fn remove_leader_replica( + &self, + id: &ReplicaKey) { + + debug!("removing leader replica: {}", id); + + if self.ctx.leaders_state().remove_replica(id).is_none() { + error!("fails to find leader replica: {} when removing",id); + } + } + + + + /// Promote follower replica as leader, + /// This is done in 3 steps + /// // 1: Remove follower replica from followers state + /// // 2: Terminate followers controller if need to be (if there are no more follower replicas for that controller) + /// // 3: Start leader controller + pub fn promote_replica( + &self, + new_replica: Replica, + old_replica: Replica, + shared_sc_sink: Arc) + { + + debug!("promoting replica: {} from: {}",new_replica,old_replica); + + if let Some(follower_replica) = self.ctx.followers_state().remove_replica(&old_replica.leader,&old_replica.id) { + + debug!("old follower replica exists, converting to leader: {}",old_replica.id); + + let leader_state = LeaderReplicaState::new( + new_replica.id.clone(), + new_replica.leader, + follower_replica.storage_owned(), + new_replica.replicas + ); + + self.spawn_leader_controller(new_replica.id,leader_state,shared_sc_sink); + + } + } + + + /// Demote leader replica as follower. + /// This only happens on manual election + pub async fn demote_replica(&self,replica: Replica) { + + debug!("demoting replica: {}",replica); + + if let Some(leader_replica_state) = self.ctx.leaders_state().remove_replica(&replica.id) { + drop(leader_replica_state); + // for now, we re-scan file replica + self.add_follower_replica(replica).await; + } else { + error!("leader controller was not found: {}",replica.id) + } + } + + + + + /// add new follower controller and it's mailbox + async fn add_follower_replica(&self,replica: Replica) { + + let leader = &replica.leader; + debug!("trying to adding follower replica: {}",replica); + + if let Some(mut sender) = self.ctx.followers_state().mailbox(leader) { + debug!("existing follower controller exists: {}, send request to controller",replica); + log_on_err!(sender.send(FollowerReplicaControllerCommand::AddReplica(replica)).await) + } else { + // we need to spin new follower controller + debug!("no existing follower controller exists for {},need to spin up",replica); + let (mut sender,receiver) = self.ctx.followers_state().insert_mailbox(*leader); + let follower_controller = ReplicaFollowerController::new( + *leader, + receiver, + self.ctx.spu_localstore_owned(), + self.ctx.followers_state_owned(), + self.ctx.config_owned() + ); + follower_controller.run(); + log_on_err!(sender.send(FollowerReplicaControllerCommand::AddReplica(replica)).await); + } + } + + /// update follower replida + async fn update_follower_replica(&self,replica: Replica) { + + let leader = &replica.leader; + debug!("trying to adding follower replica: {}",replica); + + if let Some(mut sender) = self.ctx.followers_state().mailbox(leader) { + debug!("existing follower controller exists: {}, send update request to controller",replica); + log_on_err!(sender.send(FollowerReplicaControllerCommand::UpdateReplica(replica)).await) + } else { + error!("no follower controller found: {}",replica); + } + } + + + + + fn remove_follower_replica(&self,replica: Replica) { + + debug!("removing follower replica: {}",replica); + if self.ctx.followers_state().remove_replica(&replica.leader,&replica.id).is_none() { + error!("there was no follower replica: {}",replica); + } + + } + + +} diff --git a/spu-server/src/controllers/sc/mod.rs b/spu-server/src/controllers/sc/mod.rs new file mode 100644 index 0000000000..5d7430c125 --- /dev/null +++ b/spu-server/src/controllers/sc/mod.rs @@ -0,0 +1,6 @@ + +mod dispatcher; +mod action; + +pub use dispatcher::ScDispatcher; +pub use action::SupervisorCommand; \ No newline at end of file diff --git a/spu-server/src/core/global_context.rs b/spu-server/src/core/global_context.rs new file mode 100644 index 0000000000..5198d57d9f --- /dev/null +++ b/spu-server/src/core/global_context.rs @@ -0,0 +1,113 @@ +//! +//! # Global Context +//! +//! Global Context stores entities that persist through system operation. +//! +use std::sync::Arc; +use std::fmt::Debug; + +use kf_socket::SharedSinkPool; +use kf_socket::SinkPool; +use types::SpuId; +use storage::ReplicaStorage; + +use crate::config::SpuConfig; +use crate::controllers::leader_replica::SharedReplicaLeadersState; +use crate::controllers::follower_replica::FollowersState; +use crate::controllers::follower_replica::SharedFollowersState; +use crate::controllers::leader_replica::ReplicaLeadersState; +use super::spus::SharedSpuLocalStore; +use super::SharedReplicaLocalStore; +use super::spus::SpuLocalStore; +use super::replica::ReplicaStore; +use super::SharedSpuConfig; + +#[derive(Debug)] +pub struct GlobalContext { + config: SharedSpuConfig, + spu_localstore: SharedSpuLocalStore, + replica_localstore: SharedReplicaLocalStore, + leaders_state: SharedReplicaLeadersState, + followers_state: SharedFollowersState, + follower_sinks: SharedSinkPool +} + +// ----------------------------------- +// Global Contesxt - Implementation +// ----------------------------------- + +impl GlobalContext where S: ReplicaStorage + Debug { + + + pub fn new_shared_context(spu_config: SpuConfig) -> Arc { + + Arc::new(GlobalContext::new(spu_config)) + } + + + pub fn new(spu_config: SpuConfig) -> Self + { + + GlobalContext { + spu_localstore: SpuLocalStore::new_shared(), + replica_localstore: ReplicaStore::new_shared(), + config: Arc::new(spu_config), + follower_sinks: SinkPool::new_shared(), + leaders_state: ReplicaLeadersState::new_shared(), + followers_state: FollowersState::new_shared() + } + } + + + pub fn spu_localstore_owned(&self) -> SharedSpuLocalStore { + self.spu_localstore.clone() + } + + + /// retrieves local spu id + pub fn local_spu_id(&self) -> SpuId { + self.config.id + } + + pub fn spu_localstore(&self) -> &SpuLocalStore { + &self.spu_localstore + } + + pub fn replica_localstore(&self) -> &ReplicaStore { + &self.replica_localstore + } + + pub fn follower_sinks(&self) -> &SinkPool { + &self.follower_sinks + } + + pub fn followers_sink_owned(&self) -> SharedSinkPool { + self.follower_sinks.clone() + } + + pub fn leaders_state(&self) -> &ReplicaLeadersState { + &self.leaders_state + } + + pub fn leader_state_owned(&self) -> SharedReplicaLeadersState { + self.leaders_state.clone() + } + + pub fn followers_state(&self) -> &FollowersState { + &self.followers_state + } + + pub fn followers_state_owned(&self) -> SharedFollowersState { + self.followers_state.clone() + } + + pub fn config(&self) -> &SpuConfig { + &self.config + } + + pub fn config_owned(&self) -> SharedSpuConfig { + self.config.clone() + } + + +} diff --git a/spu-server/src/core/mod.rs b/spu-server/src/core/mod.rs new file mode 100644 index 0000000000..27cff43a4e --- /dev/null +++ b/spu-server/src/core/mod.rs @@ -0,0 +1,26 @@ + +mod global_context; +mod store; +pub(crate) mod storage; + +pub mod spus; +pub mod replica; + +pub use self::global_context::GlobalContext; +pub use self::store::Spec; +pub use self::store::LocalStore; +pub use self::store::SpecChange; + +pub use self::spus::SpuLocalStore; +pub use self::replica::SharedReplicaLocalStore; + +use std::sync::Arc; +use ::storage::FileReplica; +use kf_socket::SinkPool; +use types::SpuId; +use crate::config::SpuConfig; + +pub type SharedGlobalContext = Arc>; +pub type DefaultSharedGlobalContext = SharedGlobalContext; +pub type SharedSpuSinks = Arc>; +pub type SharedSpuConfig = Arc; diff --git a/spu-server/src/core/replica/metadata.rs b/spu-server/src/core/replica/metadata.rs new file mode 100644 index 0000000000..02fecf6a36 --- /dev/null +++ b/spu-server/src/core/replica/metadata.rs @@ -0,0 +1,25 @@ + +use internal_api::messages::Replica; +use metadata::partition::ReplicaKey; + +use crate::core::Spec; +use crate::core::LocalStore; + + +impl Spec for Replica { + + const LABEL: &'static str = "Replica"; + + type Key = ReplicaKey; + + fn key(&self) -> &Self::Key { + &self.id + } + + fn key_owned(&self) -> Self::Key { + self.id.clone() + } +} + +pub type ReplicaStore = LocalStore; + diff --git a/spu-server/src/core/replica/mod.rs b/spu-server/src/core/replica/mod.rs new file mode 100644 index 0000000000..c4c627ca68 --- /dev/null +++ b/spu-server/src/core/replica/mod.rs @@ -0,0 +1,7 @@ +mod metadata; + +pub use self::metadata::ReplicaStore; + +use std::sync::Arc; + +pub type SharedReplicaLocalStore = Arc; \ No newline at end of file diff --git a/spu-server/src/core/spus/metadata.rs b/spu-server/src/core/spus/metadata.rs new file mode 100644 index 0000000000..831f2d036e --- /dev/null +++ b/spu-server/src/core/spus/metadata.rs @@ -0,0 +1,130 @@ +// +// Peer Spus (all spus in the system, received from Sc) +// >>> define what each element of SPU is used for +// + + +use metadata::spu::SpuSpec; +use types::SpuId; + +use crate::core::Spec; +use crate::core::LocalStore; + + +impl Spec for SpuSpec { + + const LABEL: &'static str = "SPU"; + + type Key = SpuId; + + fn key(&self) -> &Self::Key { + &self.id + } + + fn key_owned(&self) -> Self::Key { + self.id + } + + +} + +pub type SpuLocalStore = LocalStore; + + +impl SpuLocalStore { + + + #[cfg(test)] + pub fn indexed_by_id(&self) -> std::collections::BTreeMap { + let mut map: std::collections::BTreeMap = std::collections::BTreeMap::new(); + + for spu in self.inner_store().read().values() { + map.insert(spu.id, spu.clone()); + } + + map + } + + + #[cfg(test)] + pub fn bulk_add(self,spus: Vec) -> Self { + for id in spus { + self.insert(id.into()); + } + self + } + +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use std::collections::BTreeMap; + + use crate::core::SpuLocalStore; + + #[test] + fn test_indexed_by_id() { + + let spus = SpuLocalStore::default() + .bulk_add(vec![ + 5000, + 5001, + 5002, + ]); + + // run test + let indexed_spus = spus.indexed_by_id(); + + // check result + let mut expected_indexed_spus = BTreeMap::default(); + expected_indexed_spus.insert(5000, spus.spec(&5000).unwrap()); + expected_indexed_spus.insert(5001, spus.spec(&5001).unwrap()); + expected_indexed_spus.insert(5002, spus.spec(&5002).unwrap()); + + assert_eq!(indexed_spus, expected_indexed_spus); + } + + #[test] + fn test_peer_spu_routines() { + + let spus = SpuLocalStore::default() + .bulk_add(vec![ + 5000, + 5001, + 5002 + ]); + + let spu_5000 = spus.spec(&5000).unwrap(); + let spu_5001 = spus.spec(&5001).unwrap(); + let spu_5002 = spus.spec(&5002).unwrap(); + + // test >> count() + let count = spus.count(); + assert_eq!(count, 3); + + // test >> names() + let names = spus.all_keys(); + assert_eq!(names, vec![5000,5001,5002]); + + // test >> all_spus() + let all_spus = spus.all_values(); + assert_eq!(all_spus.len(), 3); + assert_eq!(all_spus[0], spu_5000); + assert_eq!(all_spus[1], spu_5001); + assert_eq!(all_spus[2], spu_5002); + + + + // test >> delete() + spus.delete(&5001); + let remaining_spus = spus.all_values(); + assert_eq!(remaining_spus.len(), 2); + assert_eq!(remaining_spus[0], spu_5000); + assert_eq!(remaining_spus[1], spu_5002); + } + +} diff --git a/spu-server/src/core/spus/mod.rs b/spu-server/src/core/spus/mod.rs new file mode 100644 index 0000000000..26734c09c4 --- /dev/null +++ b/spu-server/src/core/spus/mod.rs @@ -0,0 +1,9 @@ +mod metadata; + + + +pub use self::metadata::SpuLocalStore; + +use std::sync::Arc; + +pub type SharedSpuLocalStore = Arc; diff --git a/spu-server/src/core/storage/mod.rs b/spu-server/src/core/storage/mod.rs new file mode 100644 index 0000000000..ef57f6731e --- /dev/null +++ b/spu-server/src/core/storage/mod.rs @@ -0,0 +1,21 @@ +use storage::ConfigOption; +use storage::FileReplica; +use storage::StorageError; +use metadata::partition::ReplicaKey; +use types::SpuId; + +fn default_config(spu_id: SpuId, config: &ConfigOption) -> ConfigOption { + let base_dir = config.base_dir.join(format!("spu-logs-{}", spu_id)); + let new_config = config.clone(); + new_config.base_dir(base_dir) +} + +/// Create new replica storage. Each replica is stored with 'spu' prefix +pub(crate) async fn create_replica_storage( + local_spu: SpuId, + replica: &ReplicaKey, + base_config: &ConfigOption, +) -> Result { + let config = default_config(local_spu, base_config); + FileReplica::create(replica.topic.clone(), replica.partition as u32, 0, &config).await +} diff --git a/spu-server/src/core/store.rs b/spu-server/src/core/store.rs new file mode 100644 index 0000000000..102babde51 --- /dev/null +++ b/spu-server/src/core/store.rs @@ -0,0 +1,330 @@ +// +// Peer Spus (all spus in the system, received from Sc) +// >>> define what each element of SPU is used for +// +use std::sync::Arc; +use std::fmt::Display; +use std::fmt::Debug; + +use log::trace; +use log::debug; +use log::error; + +use internal_api::messages::Message; +use internal_api::messages::MsgType; +use kf_protocol::{Decoder, Encoder}; + +use utils::actions::Actions; +use utils::SimpleConcurrentBTreeMap; + +pub trait Spec { + + const LABEL: &'static str; + type Key: Ord + Clone + ToString; + + fn key(&self) -> &Self::Key; + + fn key_owned(&self) -> Self::Key; + +} + +#[derive(Debug,PartialEq,Clone)] +pub enum SpecChange +{ + Add(S), + Mod(S ,S), // new, old + Delete(S) +} + + +#[derive(Debug)] +pub struct LocalStore(SimpleConcurrentBTreeMap) + where S: Spec; + + +// ----------------------------------- +// PeerSpus +// ----------------------------------- + +impl Default for LocalStore + where S: Spec +{ + fn default() -> Self { + Self(SimpleConcurrentBTreeMap::new()) + } +} + +impl ::std::cmp::PartialEq for LocalStore + where S: Spec + PartialEq +{ + fn eq(&self, other: &Self) -> bool { + for (name, local_spu) in self.0.read().iter() { + let other_list = other.0.read(); + let other_spu = match other_list.get(name) { + Some(val) => val, + None => return false, + }; + if local_spu != other_spu { + return false; + } + } + true + } +} + +impl LocalStore + where S: Spec +{ + + + #[allow(unused)] + pub fn inner_store(&self) -> &SimpleConcurrentBTreeMap { + &self.0 + } + + pub fn new_shared() -> Arc { + Arc::new(Self::default()) + } + + /// insert new, if there is existing entry, return it + pub fn insert(&self,spec: S) -> Option { + self.0.write().insert(spec.key_owned(),spec) + } + + + pub fn delete(&self, id: &S::Key) -> Option { + self.0.write().remove(id) + } + + #[allow(dead_code)] + pub fn contains_key(&self, key: &S::Key) -> bool { + self.0.read().contains_key(key) + } + + #[allow(dead_code)] + pub fn count(&self) -> i32 { + self.0.read().len() as i32 + } + +} + + +impl LocalStore + where S: Spec + Clone + PartialEq + Debug + Encoder + Decoder , + S::Key : Display +{ + /// Sync with source of truth. + /// Returns diff as Change + pub fn sync_all(&self, source_specs: Vec) -> Actions> { + + let (mut add_cnt, mut mod_cnt, mut del_cnt, mut skip_cnt) = (0, 0, 0, 0); + let mut local_keys = self.all_keys(); + let mut actions = Actions::default(); + + debug!("apply all <{}> {} commands", S::LABEL,source_specs.len()); + + for new_spu in source_specs { + + let id = new_spu.key_owned(); + + if let Some(old_spu) = self.insert(new_spu.clone()) { + if old_spu == new_spu { + trace!("no changes: {}",new_spu.key()); + } else { + actions.push(SpecChange::Mod(new_spu,old_spu)); + mod_cnt += 1; + } + local_keys.retain( |s| *s != id); // remove visited id + } else { + actions.push(SpecChange::Add(new_spu)); + add_cnt += 1; + } + } + + // remove any unseen id + for old_id in local_keys { + if let Some(old_spu) = self.delete(&old_id) { + del_cnt += 1; + actions.push(SpecChange::Delete(old_spu)); + } else { + error!("delete failed during apply all spu"); + skip_cnt += 1; + } + } + + trace!( + "Apply All <{}> Spec changes: [add:{}, mod:{}, del:{}, skip:{}]", + S::LABEL, + add_cnt, + mod_cnt, + del_cnt, + skip_cnt + ); + + actions + } + + + /// apply changes coming from sc which generates spec change actions + pub fn apply_changes(&self, changes: Vec>) -> Actions> { + + let (mut add_cnt, mut mod_cnt, mut del_cnt, mut skip_cnt) = (0, 0, 0, 0); + debug!("apply update <{}> {} requests", S::LABEL,changes.len()); + let mut actions = Actions::default(); + + for change in changes { + + match change.header { + + MsgType::UPDATE => { + let new_spu = change.content; + if let Some(old_spu) = self.insert(new_spu.clone()) { + if old_spu == new_spu { + trace!("no changes: {}",new_spu.key()); + } else { + actions.push(SpecChange::Mod(new_spu,old_spu)); + mod_cnt += 1; + } + } else { + actions.push(SpecChange::Add(new_spu)); + add_cnt += 1; + } + }, + MsgType::DELETE => { + let delete_spu = change.content; + if let Some(old_spu) = self.delete(delete_spu.key()) { + del_cnt += 1; + actions.push(SpecChange::Delete(old_spu)); + } else { + error!("delete failed: {}",delete_spu.key()); + skip_cnt += 1; + } + } + + } + } + + debug!( + "Apply <{}> Spec changes: [add:{}, mod:{}, del:{}, skip:{}]", + S::LABEL, + add_cnt, + mod_cnt, + del_cnt, + skip_cnt + ); + + actions + } + + + + pub fn spec(&self, key: &S::Key) -> Option { + match self.0.read().get(key) { + Some(spu) => Some(spu.clone()), + None => None, + } + } + + + pub fn all_keys(&self) -> Vec { + self.0.read().keys().cloned().collect() + } + + + #[allow(dead_code)] + pub fn all_values(&self) -> Vec { + self.0.read().values().cloned().collect() + } + + +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + + use metadata::spu::SpuSpec; + use internal_api::messages::SpuMsg; + + use crate::core::SpuLocalStore; + use crate::core::SpecChange; + + #[test] + fn test_sync_all() { + + let spu_store = SpuLocalStore::default() + .bulk_add(vec![ + 5000, + 5001, + 5003 + ]); + + let source = vec![ + 5000.into(), + 5002.into(), + SpuSpec::new(5001) + .set_custom() + ]; + + // should generate new(5002),mod(5001),del(5003) + let mut actions = spu_store.sync_all(source); + assert_eq!(actions.count(),3); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Add(5002.into())); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Mod( + SpuSpec::new(5001) + .set_custom(), + 5001.into() + )); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Delete(5003.into())); + + assert_eq!(spu_store.count(),3); + assert_eq!(spu_store.spec(&5000).unwrap(),5000.into()); + assert_eq!(spu_store.spec(&5001).unwrap(), + SpuSpec::new(5001) + .set_custom()); + assert_eq!(spu_store.spec(&5002).unwrap(),5002.into()); + + } + + #[test] + fn test_apply_changes() { + + let spu_store = SpuLocalStore::default() + .bulk_add(vec![ + 5000, + 5001, + 5003 + ]); + + + let changes = vec![ + SpuMsg::update(5002.into()), + SpuMsg::update( SpuSpec::new(5001) + .set_custom()), + SpuMsg::delete(5003.into()) + ]; + + // should generate new(5002),mod(5001),del(5003) + let mut actions = spu_store.apply_changes(changes); + assert_eq!(actions.count(),3); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Add(5002.into())); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Mod( + SpuSpec::new(5001) + .set_custom(), + 5001.into() + )); + assert_eq!(actions.pop_front().unwrap(),SpecChange::Delete(5003.into())); + + assert_eq!(spu_store.count(),3); + assert_eq!(spu_store.spec(&5000).unwrap(),5000.into()); + assert_eq!(spu_store.spec(&5001).unwrap(), + SpuSpec::new(5001) + .set_custom()); + assert_eq!(spu_store.spec(&5002).unwrap(),5002.into()); + + } + +} diff --git a/spu-server/src/core/stream/fetch_stream.rs b/spu-server/src/core/stream/fetch_stream.rs new file mode 100644 index 0000000000..237ec4bb62 --- /dev/null +++ b/spu-server/src/core/stream/fetch_stream.rs @@ -0,0 +1,256 @@ +use std::collections::HashMap; +use std::io::Cursor; +use std::io::Error as IoError; +use std::sync::Arc; +use std::sync::RwLock; +use std::time::SystemTime; + +use bytes::BytesMut; +use chashmap::CHashMap; +use chashmap::ReadGuard; +use futures::future::TryFutureExt; +use futures::sink::SinkExt; +use futures::stream::StreamExt; +use log::debug; +use log::error; +use log::trace; + +use types::SpuId; +use future_helper::spawn; +use internal_api::InternalApiRequest; +use internal_api::InternalApiResponse; +use internal_api::InternalKafkaApiEnum; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use crate::KfTcpStreamSplit; +use crate::KfTcpStreamSplitSink; +use crate::KfTcpStreamSplitStream; + +pub enum StreamError { + #[allow(dead_code)] + NoFollower, + IoError(IoError), +} + +impl From for StreamError { + fn from(error: IoError) -> Self { + StreamError::IoError(error) + } +} + +unsafe impl Sync for FollowerInfo {} + +#[derive(Debug)] +pub struct FollowerInfo { + pub spu_id: SpuId, + pub last_time: SystemTime, + pub ack_count: u16, +} + +impl FollowerInfo {} + +// split followers info and sink so there is less lock contention +#[derive(Debug)] +pub struct SpuStreams { + followers_info: CHashMap, + sinks: CHashMap, + followers: RwLock>, +} + +impl SpuStreams { + pub fn new() -> Self { + Self { + followers_info: CHashMap::new(), + sinks: CHashMap::new(), + followers: RwLock::new(HashMap::new()), + } + } + + async fn watch_changes( + self: Arc, + spu_id: SpuId, + mut stream: KfTcpStreamSplitStream, + ) -> Result<(), IoError> { + debug!("fetch stream: starting watching changes: {}", spu_id); + while let Some(result) = stream.next().await { + let bytes = result?; + debug!( + "fetch stream: received from spu: {}, ack bytes: {} ", + spu_id, + bytes.len() + ); + { + let follower_info = self.followers_info.get_mut(&spu_id); + debug!("fetch stream: retrieved follower info"); + if let Some(mut follower_info) = follower_info { + follower_info.last_time = SystemTime::now(); + follower_info.ack_count = follower_info.ack_count + 1; + trace!( + "fetch stream: follower: {}, ack: {}", + spu_id, + follower_info.ack_count + ); + + } else { + error!( + "fetch stream: got ack from follower doesn't exist anymore: {}, finishing", + spu_id + ); + return Ok(()); + } + } + + trace!("fetch stream: finishing ack. waiting for next client request"); + } + + debug!("fetch stream: follower stream terminated. finishing"); + + Ok(()) + } + + #[allow(dead_code)] + fn handle_request_from_follower(&self, req_bytes: BytesMut) -> Result<(), IoError> { + let mut src = Cursor::new(&req_bytes); + let req_msg: RequestMessage = + RequestMessage::decode_from(&mut src)?; + + match req_msg.request { + InternalApiRequest::FetchAckRequest(_req) => { + debug!("got ping request"); + } + _ => error!("unreg request: {:#?}", req_msg), + } + + Ok(()) + } + + // add follower stream for spu + pub fn add(self: Arc, spu_id: SpuId, split: KfTcpStreamSplit) { + debug!("adding fetch stream spu: {}", spu_id); + let (sink, stream) = split.as_tuple(); + + let info = FollowerInfo { + spu_id, + last_time: SystemTime::now(), + ack_count: 0, + }; + + let mut follower = self + .followers + .write() + .expect("follower lock must always lock"); + follower.insert(spu_id, true); + self.followers_info.insert(spu_id, info); + self.sinks.insert(spu_id, sink); + + let my_clone = self.clone(); + spawn(my_clone.watch_changes(spu_id, stream).map_err(|err| { + error!("fetch stream: error watching streams: {}", err); + () + })); + } + + pub fn get_followers_info(&self, spu_id: &SpuId) -> Option> { + self.followers_info.get(spu_id) + } + + // send to internal sink + pub async fn send_request( + self: Arc, + spu_id: SpuId, + response: ResponseMessage, + ) -> Result<(), StreamError> { + if let Some(mut sink) = self.sinks.get_mut(&spu_id) { + debug!("fetch stream: sending response to spu: {}", spu_id); + sink.send(response.as_bytes()?).await?; + trace!("fetch stream: sending response to spu finished"); + Ok(()) + } else { + Err(StreamError::NoFollower) + } + } +} + +#[cfg(test)] +mod test { + + use std::io::Error as IoError; + use std::net::SocketAddr; + use std::sync::Arc; + use std::time::Duration; + + use futures::future::FutureExt; + use futures::stream::StreamExt; + use log::debug; + + use types::SpuId; + use future_aio::net::AsyncTcpListener; + use future_aio::net::AsyncTcpStream; + use future_helper::sleep; + use future_helper::test_async; + + use super::SpuStreams; + use crate::KfTcpStreamSplit; + + /// create server and create client stream + async fn create_server(listener: AsyncTcpListener, client_count: u16) -> Result<(), IoError> { + debug!("server: successfully binding. waiting for incoming"); + let mut incoming = listener.incoming(); + let mut count = 0; + let mut tcp_streams: Vec = Vec::new(); + while let Some(stream) = incoming.next().await { + debug!("server: got connection from client: {}", count); + let tcp_stream = stream?; + tcp_streams.push(tcp_stream); + yield; + count = count + 1; + if count >= client_count { + break; + } + } + + debug!("server: sleeping for 1 second to give client chances"); + sleep(Duration::from_micros(1000)).await.expect("panic"); + Ok(()) as Result<(), IoError> + } + + #[test_async] + async fn test_stream_add() -> Result<(), IoError> { + //utils::init_logger(); + + let count = 5; + + let bk_stream = Arc::new(SpuStreams::new()); + + // create fake server, anything will do since we only + // care creating tcp stream + let addr = "127.0.0.1:29998".parse::().expect("parse"); + let listener = AsyncTcpListener::bind(&addr)?; + + let server_ft = create_server(listener, count); + + let client_ft = async { + debug!("client: sleep to give server chance to come up"); + for i in 0..count { + debug!("client: trying to connect {}", i); + let tcp_stream = AsyncTcpStream::connect(&addr).await?; + debug!("client: connected, adding to bk stream: {}", i); + let split: KfTcpStreamSplit = tcp_stream.split(); + bk_stream.clone().add(i as SpuId, split); + } + + debug!("client sleeping for second to give server chance to catch up"); + sleep(Duration::from_micros(200)).await.expect("panic"); + + Ok(()) as Result<(), IoError> + }; + + client_ft.join(server_ft).await; + + Ok(()) + } + +} diff --git a/spu-server/src/core/stream/leader_stream.rs b/spu-server/src/core/stream/leader_stream.rs new file mode 100644 index 0000000000..57aee0e42a --- /dev/null +++ b/spu-server/src/core/stream/leader_stream.rs @@ -0,0 +1,135 @@ +/// maintain connection to leader + +use std::io::Error as IoError; +use std::io::Cursor; +use std::sync::Arc; +use std::sync::RwLock; +use std::collections::HashMap; + +use log::error; +use log::debug; +use futures::sink::SinkExt; +use futures::stream::StreamExt; +use futures::future::TryFutureExt; +use chashmap::CHashMap; +use chashmap::WriteGuard; + +use types::SpuId; +use kf_protocol::Encoder; +use kf_protocol::Decoder; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use internal_api::InternalApiResponse; +use internal_api::InternalApiRequest; +use internal_api::InternalKafkaApiEnum; +use internal_api::FetchAckResponse; +use future_helper::spawn; + +use crate::KfTcpStreamSplit; +use crate::KfTcpStreamSplitSink; +use crate::KfTcpStreamSplitStream; + +pub enum StreamError { + #[allow(dead_code)] + NoFollower, + IoError(IoError) +} + +impl From for StreamError { + fn from(error: IoError) -> Self { + StreamError::IoError(error) + } +} + + + + +// split followers info and sink so there is less lock contention +#[derive(Debug)] +pub struct LeaderStreams { + sinks: CHashMap, + leaders: RwLock> +} + +impl LeaderStreams { + + #[allow(dead_code)] + pub fn new() -> Self { + + Self { + sinks: CHashMap::new(), + leaders: RwLock::new(HashMap::new()) + } + } + + /// watch changes in the stream. in this case, we receive the batch request, + /// first we will send ack, then save the batch + #[allow(dead_code)] + async fn watch_changes(self: Arc, leader_id: SpuId,mut stream: KfTcpStreamSplitStream) -> Result<(),IoError>{ + + debug!("fetch stream: starting watching changes: {}",leader_id); + while let Some(result) = stream.next().await { + + let bytes = result?; + + debug!("fetch stream: received from spu: {},ack bytes: {} ",leader_id,bytes.len()); + // decode request + let mut src = Cursor::new(&bytes); + let _request = RequestMessage::::decode_from(&mut src); + + if let Some(sink) = self.sinks.get_mut(&leader_id) { + + debug!("sending ack back to response: {}",leader_id); + if let Err(err) = send_ack(leader_id,sink).await { + error!("error sending ack back: {}",err); + } + + } else { + error!("sink for leader not found: {}",leader_id); + } + + + } + + debug!("fetch stream: follower stream terminated. finishing"); + + Ok(()) + } + + + // add stream to connect to leader + #[allow(dead_code)] + pub fn add(self: Arc, leader_id: SpuId, split: KfTcpStreamSplit) { + + debug!("adding fetch stream spu: {}",leader_id); + let (sink,stream) = split.as_tuple(); + + let mut leader = self.leaders.write().expect("leader lock must always lock"); + leader.insert(leader_id,true); + self.sinks.insert(leader_id,sink); + + + let my_clone = self.clone(); + spawn(my_clone.watch_changes(leader_id,stream).map_err(|err| { + error!("fetch stream: error watching streams: {}",err); + () + })); + + } + + +} + + +#[allow(dead_code)] + async fn send_ack(leader_id: SpuId, mut sink: WriteGuard) -> Result<(),IoError> { + + let mut ack_message = ResponseMessage::::default(); + let mut response = FetchAckResponse::default(); + response.spu_id = leader_id; + ack_message.response = InternalApiResponse::FetchAckResponse(response); + + debug!("sending ack back to response: {}",leader_id); + sink.send(ack_message.as_bytes()?).await +} + diff --git a/spu-server/src/core/stream/mod.rs b/spu-server/src/core/stream/mod.rs new file mode 100644 index 0000000000..e24201817e --- /dev/null +++ b/spu-server/src/core/stream/mod.rs @@ -0,0 +1,6 @@ + +mod fetch_stream; +mod leader_stream; + +pub use self::fetch_stream::SpuStreams; +pub use self::fetch_stream::StreamError; diff --git a/spu-server/src/error.rs b/spu-server/src/error.rs new file mode 100644 index 0000000000..dd4376f440 --- /dev/null +++ b/spu-server/src/error.rs @@ -0,0 +1,53 @@ + +use std::fmt; + +use futures::channel::mpsc::SendError; +use types::PartitionError; +use storage::StorageError; +use kf_socket::KfSocketError; + +#[derive(Debug)] +pub enum InternalServerError { + StorageError(StorageError), + PartitionError(PartitionError), + SendError(SendError), + SocketError(KfSocketError), +} + + +impl fmt::Display for InternalServerError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::StorageError(err) => write!(f, "{}", err), + Self::PartitionError(err) => write!(f,"{}",err), + Self::SendError(err) => write!(f,"{}",err), + Self::SocketError(err) => write!(f,"{}",err), + } + } +} + + +impl From for InternalServerError { + fn from(error: StorageError) -> Self { + InternalServerError::StorageError(error) + } +} + +impl From for InternalServerError { + fn from(error: PartitionError) -> Self { + InternalServerError::PartitionError(error) + } +} + +impl From for InternalServerError { + fn from(error: SendError) -> Self { + InternalServerError::SendError(error) + } +} + + +impl From for InternalServerError { + fn from(error: KfSocketError) -> Self { + InternalServerError::SocketError(error) + } +} diff --git a/spu-server/src/lib.rs b/spu-server/src/lib.rs new file mode 100644 index 0000000000..f2114a454e --- /dev/null +++ b/spu-server/src/lib.rs @@ -0,0 +1,20 @@ +#![feature(generators)] +#![recursion_limit = "256"] + +mod error; +mod start; +mod config; +mod core; +mod services; +mod controllers; + +//#[cfg(test)] +//mod tests; + +use start::main_loop; +use self::error::InternalServerError; + +pub fn start_main() { + utils::init_logger(); + main_loop(); +} diff --git a/spu-server/src/main.rs b/spu-server/src/main.rs new file mode 100644 index 0000000000..f78f7642f2 --- /dev/null +++ b/spu-server/src/main.rs @@ -0,0 +1,5 @@ +use spu_server::start_main; + +fn main() { + start_main(); +} diff --git a/spu-server/src/services/internal/api.rs b/spu-server/src/services/internal/api.rs new file mode 100644 index 0000000000..5e943b3df5 --- /dev/null +++ b/spu-server/src/services/internal/api.rs @@ -0,0 +1,70 @@ + +use std::io::Error as IoError; +use std::convert::TryInto; + +use log::trace; + +use kf_protocol::bytes::Buf; +use kf_protocol::Decoder; +use kf_protocol::derive::Encode; +use kf_protocol::derive::Decode; + + +use kf_protocol::api::KfRequestMessage; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::RequestHeader; + +use super::fetch_stream_request::FetchStreamRequest; + +#[derive(PartialEq, Debug, Encode, Decode, Clone, Copy)] +#[repr(u16)] +pub enum KfSPUPeerApiEnum { + FetchStream = 0 +} + +impl Default for KfSPUPeerApiEnum { + fn default() -> KfSPUPeerApiEnum { + KfSPUPeerApiEnum::FetchStream + } +} + + + + +#[derive(Debug,Encode)] +pub enum SpuPeerRequest { + FetchStream(RequestMessage) +} + + + +impl Default for SpuPeerRequest { + fn default() -> SpuPeerRequest { + SpuPeerRequest::FetchStream(RequestMessage::::default()) + } +} + + + +impl KfRequestMessage for SpuPeerRequest { + + type ApiKey = KfSPUPeerApiEnum; + + fn decode_with_header(src: &mut T, header: RequestHeader) -> Result + where + Self: Default + Sized, + Self::ApiKey: Sized, + T: Buf + { + + trace!("decoding with header: {:#?}",header); + let version = header.api_version(); + match header.api_key().try_into()? { + KfSPUPeerApiEnum::FetchStream => Ok(SpuPeerRequest::FetchStream(RequestMessage::new(header,FetchStreamRequest::decode_from(src, + version)?))) + } + + } + +} + diff --git a/spu-server/src/services/internal/fetch_stream.rs b/spu-server/src/services/internal/fetch_stream.rs new file mode 100644 index 0000000000..7d68dd0cc2 --- /dev/null +++ b/spu-server/src/services/internal/fetch_stream.rs @@ -0,0 +1,33 @@ +use log::debug; + +use kf_protocol::api::RequestMessage; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; + +use crate::core::DefaultSharedGlobalContext; +use crate::controllers::leader_replica::LeaderConnection; +use super::FetchStreamRequest; +use super::FetchStreamResponse; + +pub(crate) async fn handle_fetch_stream_request( + req_msg: RequestMessage, + ctx: DefaultSharedGlobalContext, + mut socket: KfSocket, +) -> Result<(), KfSocketError> { + + let request = &req_msg.request; + let follower_id = request.spu_id; + debug!( + "internal service: respond to fetch stream request, follower: {}", + follower_id + ); + + let response = FetchStreamResponse::new(follower_id); + let res_msg = req_msg.new_response(response); + socket.get_mut_sink().send_response(&res_msg,req_msg.header.api_version()).await?; + + LeaderConnection::handle(ctx,follower_id,socket).await?; + + Ok(()) as Result<(), KfSocketError> +} + diff --git a/spu-server/src/services/internal/fetch_stream_request.rs b/spu-server/src/services/internal/fetch_stream_request.rs new file mode 100644 index 0000000000..fcfcc4e5ed --- /dev/null +++ b/spu-server/src/services/internal/fetch_stream_request.rs @@ -0,0 +1,29 @@ +use kf_protocol::api::Request; +use kf_protocol::derive::Decode; +use kf_protocol::derive::Encode; +use types::SpuId; + +use super::KfSPUPeerApiEnum; + +#[derive(Decode, Encode, Debug, Default)] +pub struct FetchStreamRequest { + pub spu_id: SpuId, + pub min_bytes: i32, + pub max_bytes: i32, +} + +impl Request for FetchStreamRequest { + const API_KEY: u16 = KfSPUPeerApiEnum::FetchStream as u16; + type Response = FetchStreamResponse; +} + +#[derive(Decode, Encode, Default, Debug)] +pub struct FetchStreamResponse { + pub spu_id: SpuId, +} + +impl FetchStreamResponse { + pub fn new(spu_id: SpuId) -> Self { + FetchStreamResponse { spu_id } + } +} diff --git a/spu-server/src/services/internal/mod.rs b/spu-server/src/services/internal/mod.rs new file mode 100644 index 0000000000..2976ceb9b6 --- /dev/null +++ b/spu-server/src/services/internal/mod.rs @@ -0,0 +1,31 @@ +mod api; +mod fetch_stream; +mod service_impl; +mod fetch_stream_request; + +use log::info; +use std::net::SocketAddr; + +use kf_service::KfApiServer; +use service_impl::SpunternalService; + +use crate::core::DefaultSharedGlobalContext; + +pub use self::fetch_stream_request::FetchStreamRequest; +pub use self::fetch_stream_request::FetchStreamResponse; +pub use self::api::KfSPUPeerApiEnum; +pub use self::api::SpuPeerRequest; + +pub(crate) type InternalApiServer = KfApiServer< + SpuPeerRequest, + KfSPUPeerApiEnum, + DefaultSharedGlobalContext, + SpunternalService>; + +// start server +pub fn create_internal_server(addr: SocketAddr, ctx: DefaultSharedGlobalContext) -> InternalApiServer + { + info!("starting SPU: {} at internal service at: {}", ctx.local_spu_id(),addr); + + KfApiServer::new(addr, ctx, SpunternalService::new()) +} diff --git a/spu-server/src/services/internal/service_impl.rs b/spu-server/src/services/internal/service_impl.rs new file mode 100644 index 0000000000..45c8b1360b --- /dev/null +++ b/spu-server/src/services/internal/service_impl.rs @@ -0,0 +1,60 @@ +use std::sync::Arc; + +use futures::future::BoxFuture; + +use kf_service::api_loop; +use kf_service::KfService; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; + +use super::SpuPeerRequest; +use super::KfSPUPeerApiEnum; + +use super::fetch_stream::handle_fetch_stream_request; +use crate::core::DefaultSharedGlobalContext; + +pub struct SpunternalService {} + +impl SpunternalService { + pub fn new() -> Self { + Self {} + } + + async fn handle( + self: Arc, + context: DefaultSharedGlobalContext, + socket: KfSocket, + ) -> Result<(), KfSocketError> { + let (sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + api_loop!( + api_stream, + + SpuPeerRequest::FetchStream(request) => { + + drop(api_stream); + let orig_socket: KfSocket = (sink,stream).into(); + handle_fetch_stream_request(request, context, orig_socket).await?; + return Ok(()); + + } + ); + + Ok(()) + } +} + +impl KfService for SpunternalService { + type Context = DefaultSharedGlobalContext; + type Request = SpuPeerRequest; + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn respond( + self: Arc, + context: DefaultSharedGlobalContext, + socket: KfSocket, + ) -> Self::ResponseFuture { + Box::pin(self.handle(context, socket)) + } +} diff --git a/spu-server/src/services/mod.rs b/spu-server/src/services/mod.rs new file mode 100644 index 0000000000..d9c34c9311 --- /dev/null +++ b/spu-server/src/services/mod.rs @@ -0,0 +1,6 @@ +pub(crate) mod public; + +pub mod internal; + +pub use self::internal::create_internal_server; +pub use self::public::create_public_server; diff --git a/spu-server/src/services/public/api_versions.rs b/spu-server/src/services/public/api_versions.rs new file mode 100644 index 0000000000..db1b3486d1 --- /dev/null +++ b/spu-server/src/services/public/api_versions.rs @@ -0,0 +1,56 @@ +use std::io::Error; +use log::debug; + +use spu_api::SpuApiKey; +use spu_api::versions::ApiVersionKey; +use spu_api::versions::ApiVersionsRequest; +use spu_api::versions::ApiVersionsResponse; +use spu_api::spus::FlvFetchLocalSpuRequest; +use spu_api::offsets::FlvFetchOffsetsRequest; + +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use kf_protocol::api::Request; +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::message::fetch::DefaultKfFetchRequest; + +pub async fn handle_kf_lookup_version_request( + request: RequestMessage, +) -> Result, Error> { + debug!("generating api response"); + + let mut response = ApiVersionsResponse::default(); + + // Kafka + response + .api_keys + .push(make_version_key( + SpuApiKey::KfProduce, + DefaultKfProduceRequest::MIN_API_VERSION, + DefaultKfProduceRequest::MAX_API_VERSION)); + response + .api_keys + .push(make_version_key(SpuApiKey::KfFetch, + DefaultKfFetchRequest::MIN_API_VERSION, + DefaultKfFetchRequest::MAX_API_VERSION)); + + // Fluvio + response + .api_keys + .push(make_version_key(SpuApiKey::FlvFetchLocalSpu, FlvFetchLocalSpuRequest::DEFAULT_API_VERSION, FlvFetchLocalSpuRequest::DEFAULT_API_VERSION)); + response + .api_keys + .push(make_version_key(SpuApiKey::FlvFetchOffsets, FlvFetchOffsetsRequest::DEFAULT_API_VERSION, FlvFetchOffsetsRequest::DEFAULT_API_VERSION)); + + Ok(request.new_response(response)) +} + +/// Build version key object +fn make_version_key(key: SpuApiKey, min_version: i16, max_version: i16) -> ApiVersionKey { + let api_key = key as i16; + ApiVersionKey { + api_key, + min_version, + max_version, + } +} diff --git a/spu-server/src/services/public/fetch_handler.rs b/spu-server/src/services/public/fetch_handler.rs new file mode 100644 index 0000000000..50f0042eb0 --- /dev/null +++ b/spu-server/src/services/public/fetch_handler.rs @@ -0,0 +1,59 @@ +use log::trace; + +use kf_socket::KfSink; +use kf_socket::KfSocketError; +use kf_protocol::api::RequestMessage; +use metadata::partition::ReplicaKey; +use kf_socket::FileFetchResponse; +use kf_socket::KfFileFetchRequest; +use kf_socket::FilePartitionResponse; +use kf_socket::FileTopicResponse; + +use crate::core::DefaultSharedGlobalContext; + + +pub async fn handle_fetch_request( + request: RequestMessage, + ctx: DefaultSharedGlobalContext, + sink: &mut KfSink, +) -> Result<(), KfSocketError> { + let (header, fetch_request) = request.get_header_request(); + + let mut fetch_response = FileFetchResponse::default(); + + for topic_request in &fetch_request.topics { + let topic = &topic_request.name; + + let mut topic_response = FileTopicResponse::default(); + topic_response.name = topic.clone(); + + for partition_req in &topic_request.fetch_partitions { + let partition = &partition_req.partition_index; + let fetch_offset = partition_req.fetch_offset; + let rep_id = ReplicaKey::new(topic.clone(), *partition); + let mut partition_response = FilePartitionResponse::default(); + partition_response.partition_index = *partition; + + ctx.leaders_state().read_records( + &rep_id, + fetch_offset, + fetch_request.isolation_level.clone(), + &mut partition_response, + ) + .await; + + topic_response.partitions.push(partition_response); + } + + fetch_response.topics.push(topic_response); + } + + let response = + RequestMessage::::response_with_header(&header, fetch_response); + trace!("sending back file fetch response: {:#?}",response); + sink.encode_file_slices(&response, header.api_version()) + .await?; + trace!("finish sending fetch response"); + + Ok(()) +} diff --git a/spu-server/src/services/public/local_spu_request.rs b/spu-server/src/services/public/local_spu_request.rs new file mode 100644 index 0000000000..a72c9b5b9b --- /dev/null +++ b/spu-server/src/services/public/local_spu_request.rs @@ -0,0 +1,35 @@ +use std::io::Error as IoError; + +use log::trace; + +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; + +use spu_api::spus::FlvFetchLocalSpuRequest; +use spu_api::spus::FlvFetchLocalSpuResponse; +use spu_api::spus::EndPointMetadata; + +use crate::core::DefaultSharedGlobalContext; + +pub async fn handle_spu_request( + req_msg: RequestMessage, + ctx: DefaultSharedGlobalContext, +) -> Result, IoError> { + let mut response = FlvFetchLocalSpuResponse::default(); + let config = ctx.config(); + + response.id = config.id(); + response.managed = !config.is_custom(); + response.public_ep = EndPointMetadata { + host: config.public_server_addr().host.clone(), + port: config.public_server_addr().port, + }; + response.private_ep = EndPointMetadata { + host: config.private_server_addr().host.clone(), + port: config.private_server_addr().port, + }; + response.rack = config.rack().clone(); + + trace!("fetch local spu res {:#?}", response); + Ok(req_msg.new_response(response)) +} diff --git a/spu-server/src/services/public/mod.rs b/spu-server/src/services/public/mod.rs new file mode 100644 index 0000000000..97a076637f --- /dev/null +++ b/spu-server/src/services/public/mod.rs @@ -0,0 +1,30 @@ +mod api_versions; +mod service_impl; +mod produce_handler; +mod fetch_handler; +mod local_spu_request; +mod offset_request; + +use log::info; +use std::net::SocketAddr; + +use kf_service::KfApiServer; +use service_impl::PublicService; +use spu_api::PublicRequest; +use spu_api::SpuApiKey; + +use crate::core::DefaultSharedGlobalContext; + +pub(crate) type PublicApiServer = KfApiServer< + PublicRequest, + SpuApiKey, + DefaultSharedGlobalContext, + PublicService>; + +// start server +pub fn create_public_server(addr: SocketAddr, ctx: DefaultSharedGlobalContext) -> PublicApiServer +{ + info!("starting SPU: {} at public service at: {}", ctx.local_spu_id(),addr); + + KfApiServer::new(addr, ctx, PublicService::new()) +} diff --git a/spu-server/src/services/public/offset_request.rs b/spu-server/src/services/public/offset_request.rs new file mode 100644 index 0000000000..b7ea73e9da --- /dev/null +++ b/spu-server/src/services/public/offset_request.rs @@ -0,0 +1,55 @@ +use std::io::Error as IoError; + +use log::trace; + +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use spu_api::offsets::FlvFetchOffsetsRequest; +use spu_api::offsets::FetchOffsetTopicResponse; +use spu_api::offsets::FlvFetchOffsetsResponse; +use spu_api::offsets::FetchOffsetPartitionResponse; +use metadata::partition::ReplicaKey; +use kf_protocol::api::FlvErrorCode; +use storage::ReplicaStorage; + +use crate::core::DefaultSharedGlobalContext; + +pub async fn handle_offset_request( + req_msg: RequestMessage, + ctx: DefaultSharedGlobalContext, +) -> Result, IoError> { + let request = req_msg.request(); + trace!("handling flv fetch request: {:#?}", request); + + let mut response = FlvFetchOffsetsResponse::default(); + + for topic_request in &request.topics { + let topic = &topic_request.name; + + let mut topic_response = FetchOffsetTopicResponse::default(); + topic_response.name = topic.clone(); + + for partition_req in &topic_request.partitions { + let partition = &partition_req.partition_index; + let mut partition_response = FetchOffsetPartitionResponse::default(); + partition_response.partition_index = *partition; + let rep_id = ReplicaKey::new(topic.clone(), *partition); + if let Some(replica) = ctx.leaders_state().get_replica(&rep_id) { + trace!("offset fetch request for replica found: {}", rep_id); + let storage = replica.storage(); + partition_response.error_code = FlvErrorCode::None; + partition_response.start_offset = storage.get_log_start_offset(); + partition_response.last_stable_offset = storage.get_hw(); + } else { + trace!("offset fetch request is not found: {}", rep_id); + partition_response.error_code = FlvErrorCode::PartitionNotLeader; + } + + topic_response.partitions.push(partition_response); + } + + response.topics.push(topic_response); + } + + Ok(req_msg.new_response(response)) +} diff --git a/spu-server/src/services/public/produce_handler.rs b/spu-server/src/services/public/produce_handler.rs new file mode 100644 index 0000000000..ded79e178a --- /dev/null +++ b/spu-server/src/services/public/produce_handler.rs @@ -0,0 +1,76 @@ +use std::io::Error; + +use log::warn; +use log::trace; +use log::error; + +use kf_protocol::api::ErrorCode; +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::message::produce::KfProduceResponse; +use kf_protocol::message::produce::TopicProduceResponse; +use kf_protocol::message::produce::PartitionProduceResponse; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use metadata::partition::ReplicaKey; + +use crate::core::DefaultSharedGlobalContext; + + +pub async fn handle_produce_request( + request: RequestMessage, + ctx: DefaultSharedGlobalContext, +) -> Result, Error> { + let (header, produce_request) = request.get_header_request(); + trace!("handling produce request: {:#?}", produce_request); + + let mut response = KfProduceResponse::default(); + + //let ack = produce_request.acks; + + for topic_request in produce_request.topics { + let topic = &topic_request.name; + trace!("handling produce request for topic{}", topic); + + let mut topic_response = TopicProduceResponse::default(); + topic_response.name = topic.to_owned(); + + for partition_request in topic_request.partitions { + let rep_id = ReplicaKey::new(topic.clone(), partition_request.partition_index); + + trace!("handling produce request for replia: {}", rep_id); + + let mut partition_response = PartitionProduceResponse::default(); + partition_response.partition_index = rep_id.partition; + + match ctx.leaders_state().send_records( + &rep_id, + partition_request.records, + true, + ) + .await + { + Ok(found_flag) => { + if found_flag { + trace!("records has successfull processed for: {}", rep_id); + partition_response.error_code = ErrorCode::None; + } else { + warn!("no replica found: {}", rep_id); + partition_response.error_code = ErrorCode::NotLeaderForPartition; + } + } + Err(err) => { + error!("error: {:#?} writing to replica: {}", err, rep_id); + partition_response.error_code = ErrorCode::KafkaStorageError; + } + } + + topic_response.partitions.push(partition_response); + } + + response.responses.push(topic_response); + } + + trace!("produce request completed"); + + Ok(RequestMessage::::response_with_header(&header, response)) +} diff --git a/spu-server/src/services/public/service_impl.rs b/spu-server/src/services/public/service_impl.rs new file mode 100644 index 0000000000..5d4b741a37 --- /dev/null +++ b/spu-server/src/services/public/service_impl.rs @@ -0,0 +1,92 @@ + +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::future::FutureExt; + +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use kf_service::call_service; +use kf_service::KfService; +use kf_service::api_loop; +use spu_api::SpuApiKey; +use spu_api::PublicRequest; + +use crate::core::DefaultSharedGlobalContext; +use super::api_versions::handle_kf_lookup_version_request; +use super::produce_handler::handle_produce_request; +use super::fetch_handler::handle_fetch_request; +use super::local_spu_request::handle_spu_request; +use super::offset_request::handle_offset_request; + +pub struct PublicService { +} + +impl PublicService { + + pub fn new() -> Self { + PublicService{} + } + + async fn handle(self: Arc, context: DefaultSharedGlobalContext, socket: KfSocket) -> Result<(),KfSocketError> { + + let (mut sink,mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + api_loop!( + api_stream, + + // Mixed + PublicRequest::ApiVersionsRequest(request) => call_service!( + request, + handle_kf_lookup_version_request(request), + sink, + "kf api version handler" + ), + + // Kafka + PublicRequest::KfProduceRequest(request) => call_service!( + request, + handle_produce_request(request,context.clone()), + sink, + "ks produce request handler" + ), + PublicRequest::KfFileFetchRequest(request) => handle_fetch_request(request,context.clone(),&mut sink).await?, + + // Fluvio + PublicRequest::FlvFetchLocalSpuRequest(request) => call_service!( + request, + handle_spu_request(request,context.clone()), + sink, + "handling local spu request" + ), + PublicRequest::FlvFetchOffsetsRequest(request) => call_service!( + request, + handle_offset_request(request,context.clone()), + sink, + "handling offset fetch request" + ) + ); + + Ok(()) + + + } +} + + +impl KfService for PublicService { + type Context = DefaultSharedGlobalContext; + type Request = PublicRequest; + type ResponseFuture = BoxFuture<'static,Result<(),KfSocketError>>; + + fn respond( + self: Arc, + context: DefaultSharedGlobalContext, + socket: KfSocket + ) -> Self::ResponseFuture { + + self.handle(context,socket).boxed() + } + +} diff --git a/spu-server/src/start.rs b/spu-server/src/start.rs new file mode 100644 index 0000000000..7a872892e6 --- /dev/null +++ b/spu-server/src/start.rs @@ -0,0 +1,72 @@ +use log::info; +use log::debug; + +use future_helper::run; +use storage::FileReplica; + +use crate::config::process_spu_cli_or_exit; +use crate::config::SpuConfig; +use crate::services::create_internal_server; +use crate::services::create_public_server; +use crate::services::internal::InternalApiServer; +use crate::services::public::PublicApiServer; +use crate::core::DefaultSharedGlobalContext; +use crate::core::GlobalContext; +use crate::controllers::sc::ScDispatcher; + +type FileReplicaContext = GlobalContext; + +pub fn main_loop() { + // parse configuration (program exits on error) + let spu_config = process_spu_cli_or_exit(); + + info!( + "starting {}-spu services (id:{})", + spu_config.type_label(), + spu_config.id + ); + + debug!("spu config: {:#?}",spu_config); + + run(async { + let (_ctx, internal_server, public_server) = create_services(spu_config, true, true); + + let _public_shutdown = internal_server.unwrap().run(); + let _private_shutdown = public_server.unwrap().run(); + }); +} + +/// create server and spin up services, but don't run server +pub fn create_services( + local_spu: SpuConfig, + internal: bool, + public: bool, +) -> ( + DefaultSharedGlobalContext, + Option, + Option, +) { + let ctx = FileReplicaContext::new_shared_context(local_spu); + + let public_ep_addr = ctx.config().public_socket_addr().clone(); + let private_ep_addr = ctx.config().private_socket_addr().clone(); + + let public_server = if public { + Some(create_public_server(public_ep_addr, ctx.clone())) + } else { + None + }; + + let internal_server = if internal { + Some(create_internal_server(private_ep_addr, ctx.clone())) + } else { + None + }; + + + let sc_dispatcher = ScDispatcher::new(ctx.clone()); + + sc_dispatcher.run(); + + (ctx, internal_server, public_server) +} diff --git a/spu-server/src/tests/fixture/generator.rs b/spu-server/src/tests/fixture/generator.rs new file mode 100644 index 0000000000..0ccb0ba963 --- /dev/null +++ b/spu-server/src/tests/fixture/generator.rs @@ -0,0 +1,123 @@ +use std::io::Error as IoError; +use std::path::PathBuf; +use std::path::Path; +use std::env::temp_dir; +use std::convert::TryInto; +use std::sync::Arc; + +use metadata::spu::Endpoint as MetadatEndPoint; + + +use metadata::spu::SpuSpec; +use utils::fixture::ensure_clean_dir; +use types::socket_helpers::EndPoint; + +use crate::core::LocalSpu; +use crate::config::SpuConfig; +use crate::core::GlobalContext; +use crate::core::DefaultSharedGlobalContext; +use crate::core::Receivers; +use crate::services::internal::InternalApiServer; +use crate::start::create_services; +use super::mock_sc::ScGlobalContext; +use super::mock_sc::SharedScContext; +use super::SpuTestRunner; +use super::SpuTest; +use super::mock_sc::MockScServer; + +#[derive(Default)] +pub struct TestGenerator { + + base_id: i32, + base_port: u16, + base_dir: PathBuf +} + + +impl TestGenerator { + + pub fn set_base_id(mut self,id: i32) -> Self { + self.base_id = id; + self + } + + pub fn set_base_port(mut self,port: u16) -> Self { + self.base_port = port; + self + } + + pub fn set_base_dir

(mut self,dir: P) -> Self where P: AsRef { + self.base_dir = temp_dir().join(dir); + self + } + + pub fn sc_endpoint(&self) -> EndPoint { + EndPoint::local_end_point(self.base_port) + } + + + pub fn create_spu_spec(&self, spu_index: u16) -> SpuSpec { + + let port = spu_index * 2 + self.base_port + 1; + + SpuSpec { + id: self.base_id + spu_index as i32, + public_endpoint: MetadatEndPoint { + port, + ..Default::default() + }, + private_endpoint: MetadatEndPoint { + port: port + 1, + ..Default::default() + }, + ..Default::default() + } + } + + pub fn init(self) -> Self { + ensure_clean_dir(&self.base_dir); + self + } + + + fn convert_to_spu(&self,spu: &SpuSpec) -> Result { + + let mut config: SpuConfig = spu.try_into()?; + config.log.base_dir = self.base_dir.clone(); + config.sc_retry_ms = 10; + config.sc_endpoint = EndPoint::local_end_point(self.base_port); + Ok(config.into()) + } + + + /// create server with and start controller + pub fn create_spu_server(&self, spu: &SpuSpec) -> Result<(InternalApiServer, DefaultSharedGlobalContext), IoError> { + + let local_spu = self.convert_to_spu(spu)?; + let (ctx,internal_server,public_server) = create_services(local_spu,true,true); + let _shutdown = public_server.unwrap().run(); + Ok((internal_server.unwrap(),ctx)) + } + + pub fn create_global_context(&self,spu: &SpuSpec) -> Result<(DefaultSharedGlobalContext,Receivers),IoError> { + let local_spu: LocalSpu = self.convert_to_spu(spu)?; + Ok(GlobalContext::new_shared_context(local_spu)) + } + + + + /// create mock sc server which only run internal services. + pub fn create_sc_server(&self,test_runner: Arc>) -> (SharedScContext,MockScServer) + where T: SpuTest + Sync + Send + 'static + { + + + let sc_contxt = ScGlobalContext::new_shared_context(); + let local_endpoint = EndPoint::local_end_point(self.base_port); + let server = sc_contxt.clone().create_server(local_endpoint.addr, test_runner); + (sc_contxt,server) + } + + +} + diff --git a/spu-server/src/tests/fixture/mock_sc.rs b/spu-server/src/tests/fixture/mock_sc.rs new file mode 100644 index 0000000000..cfb2b755b0 --- /dev/null +++ b/spu-server/src/tests/fixture/mock_sc.rs @@ -0,0 +1,110 @@ + + +use std::net::SocketAddr; +use std::sync::Arc; + + +use log::info; +use log::debug; +use futures::future::BoxFuture; +use futures::future::FutureExt; + + +use internal_api::InternalScKey; +use internal_api::InternalScRequest; +use internal_api::RegisterSpuResponse; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; +use kf_service::KfApiServer; +use kf_service::KfService; +use kf_service::wait_for_request; + + +use super::SpuTest; +use super::SpuTestRunner; + + +pub type SharedScContext = Arc; + +pub(crate) type MockScServer = KfApiServer< + InternalScRequest, + InternalScKey, + SharedScContext, + MockInternalService>; + + +#[derive(Debug)] +pub struct ScGlobalContext { + +} + +impl ScGlobalContext { + + pub fn new_shared_context() -> SharedScContext { + Arc::new(ScGlobalContext{}) + } + + pub fn create_server(self: Arc,addr: SocketAddr,test_runner: Arc>) -> MockScServer + where T: SpuTest + Sync + Send + 'static { + + info!("starting mock SC service at: {:#?}", addr); + + KfApiServer::new(addr, self.clone(), MockInternalService::new(test_runner)) + } +} + +unsafe impl Sync for MockInternalService{} + +pub struct MockInternalService(Arc>); + + + +impl MockInternalService where T: SpuTest + Sync + Send+ 'static { + pub fn new(test_runner: Arc>) -> Self { + Self(test_runner) + } + + async fn handle( + self: Arc, + _context: SharedScContext, + socket: KfSocket, + ) -> Result<(), KfSocketError> { + + let (mut sink, mut stream) = socket.split(); + let mut api_stream = stream.api_stream::(); + + // wait for spu registeration + let spu_id = wait_for_request!(api_stream, + InternalScRequest::RegisterSpuRequest(req_msg) => { + + let spu_id = req_msg.request.spu(); + debug!("registration req from spu: {}",spu_id); + let response = req_msg.new_response(RegisterSpuResponse{}); + sink.send_response(&response,req_msg.header.api_version()).await?; + spu_id + + } + ); + + self.0.send_metadata_to_spu(&mut sink,spu_id).await.expect("send metadata should work"); + + + Ok(()) + + } +} + + +impl KfService for MockInternalService where T: SpuTest + Sync + Send + 'static { + type Context = SharedScContext; + type Request = InternalScRequest; + type ResponseFuture = BoxFuture<'static,Result<(),KfSocketError>>; + + fn respond( + self: Arc, + context: SharedScContext, + socket: KfSocket, + ) -> Self::ResponseFuture { + self.handle(context, socket).boxed() + } +} diff --git a/spu-server/src/tests/fixture/mod.rs b/spu-server/src/tests/fixture/mod.rs new file mode 100644 index 0000000000..bb39771bfe --- /dev/null +++ b/spu-server/src/tests/fixture/mod.rs @@ -0,0 +1,34 @@ +mod generator; +mod spu_client; +mod mock_sc; +mod test_runner; + +pub(crate) use generator::TestGenerator; +pub(crate) use test_runner::SpuTestRunner; +pub(crate) use spu_client::SpuServer; + +use std::sync::Arc; + +use futures::Future; + +use kf_socket::KfSocketError; +use metadata::partition::ReplicaKey; + +/// Customize System Test +pub trait SpuTest: Sized { + type ResponseFuture: Send + Future>; + + /// environment configuration + fn env_configuration(&self) -> TestGenerator; + + /// number of followers + fn followers(&self) -> usize; + + /// replicas. by default, it's empty + fn replicas(&self) -> Vec { + vec![] + } + + /// main entry point + fn main_test(&self, runner: Arc>) -> Self::ResponseFuture; +} diff --git a/spu-server/src/tests/fixture/spu_client.rs b/spu-server/src/tests/fixture/spu_client.rs new file mode 100644 index 0000000000..19de7e055b --- /dev/null +++ b/spu-server/src/tests/fixture/spu_client.rs @@ -0,0 +1,79 @@ +use std::convert::TryInto; +use std::net::SocketAddr; + +use log::debug; +use futures::channel::mpsc::Sender; + + +use kf_socket::KfSocketError; +use kf_socket::KfSocket; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::ResponseMessage; +use types::SpuId; +use metadata::spu::SpuSpec; + +use super::mock_sc::SharedScContext; + + +pub struct SpuServer(SpuSpec); + +impl SpuServer { + + pub fn new(spec:SpuSpec) -> Self { + Self(spec) + } + + pub fn spec(&self) -> &SpuSpec { + &self.0 + } + + pub fn id(&self) -> SpuId { + self.0.id + } + + #[allow(dead_code)] + pub async fn send_to_internal_server<'a,R>(&'a self, req_msg: &'a RequestMessage) -> Result<(), KfSocketError> where R: Request, + { + debug!( + "client: trying to connect to private endpoint: {:#?}", + self.private_endpoint + ); + let socket: SocketAddr = (&self.spec().private_endpoint).try_into()?; + let mut socket = KfSocket::connect(&socket).await?; + debug!("connected to internal endpoint {:#?}", self.spec().private_endpoint); + let res_msg = socket.send(&req_msg).await?; + debug!("response: {:#?}", res_msg); + Ok(()) + } + + pub async fn send_to_public_server<'a,R>(&'a self, req_msg: &'a RequestMessage) -> Result, KfSocketError> + where + R: Request, + { + debug!( + "client: trying to connect to public endpoint: {:#?}", + self.0.public_endpoint + ); + let socket: SocketAddr = (&self.spec().public_endpoint).try_into()?; + let mut socket = KfSocket::connect(&socket).await?; + debug!("connected to public end point {:#?}", self.spec().public_endpoint); + let res_msg = socket.send(&req_msg).await?; + debug!("response: {:#?}", res_msg); + Ok(res_msg) + } + + +} + +impl From for SpuServer { + fn from(spec: SpuSpec) -> Self { + Self::new(spec) + } +} + + +struct ScServerCtx { + ctx: SharedScContext, + sender: Sender +} diff --git a/spu-server/src/tests/fixture/test_runner.rs b/spu-server/src/tests/fixture/test_runner.rs new file mode 100644 index 0000000000..e0c57dbe13 --- /dev/null +++ b/spu-server/src/tests/fixture/test_runner.rs @@ -0,0 +1,284 @@ +use std::time::Duration; +use std::sync::Arc; +use std::sync::RwLock; + +use log::debug; +use futures::SinkExt; +use futures::future::join3; +use futures::future::join_all; +use futures::channel::mpsc::channel; +use futures::channel::mpsc::Sender; + + +use future_helper::sleep; +use kf_socket::KfSocketError; +use kf_socket::KfSink; +use kf_protocol::api::Offset; +use kf_protocol::api::RequestMessage; +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::message::produce::DefaultKfPartitionRequest; +use kf_protocol::message::produce::DefaultKfTopicRequest; +use kf_protocol::message::fetch::DefaultKfFetchRequest; +use kf_protocol::message::fetch::FetchPartition; +use kf_protocol::message::fetch::KfFetchRequest; +use kf_protocol::message::fetch::FetchableTopic; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::DefaultRecord; +use internal_api::messages::UpdateAllSpusMsg; +use internal_api::messages::UpdateAllSpusContent; +use internal_api::messages::Replica; +use internal_api::UpdateSpuRequest; +use metadata::partition::ReplicaKey; +use types::SpuId; +use metadata::spu::SpuSpec; + +use crate::core::DefaultSharedGlobalContext; +use super::mock_sc::SharedScContext; +use super::SpuTest; +use super::SpuServer; + + +struct ScServerCtx { + ctx: SharedScContext, + sender: Sender +} + + +pub struct SpuTestRunner{ + client_id: String, + spu_server_specs: Vec, + spu_server_ctx: Vec, + spu_senders: Vec>, + sc_ctx: RwLock>, + test: T + +} + +impl SpuTestRunner where T: SpuTest + Send + Sync + 'static { + + + pub async fn run(client_id: String, test: T) -> Result<(),KfSocketError> { + + debug!("starting test harnerss"); + let generator = test.env_configuration(); + + let mut spu_server_ctx = vec![]; + let mut server_futures = vec![]; + let mut spu_senders = vec![]; + let mut spu_server_specs = vec![]; + + for i in 0..test.followers() + 1 { + let spu_spec = generator.create_spu_spec(i as u16); + let (sender,receiver) = channel::(1); + let (server,ctx) = generator.create_spu_server(&spu_spec)?; + server_futures.push(server.run_shutdown(receiver)); + spu_senders.push(sender); + spu_server_ctx.push(ctx); + spu_server_specs.push(spu_spec.into()); + } + + + let runner = SpuTestRunner { + client_id, + spu_server_specs, + spu_server_ctx, + spu_senders, + sc_ctx: RwLock::new(None), + test + }; + + let arc_runner = Arc::new(runner); + + + + let (sender,receiver) = channel::(1); + let (sc_server_ctx,sc_server) = generator.create_sc_server(arc_runner.clone()); + + arc_runner.set_sc_ctx(ScServerCtx { ctx: sc_server_ctx, sender}); + + + join3( + arc_runner.run_test(), + join_all(server_futures), + sc_server.run_shutdown(receiver) + ) + .await; + + Ok(()) + } + + async fn run_test(self: Arc) { + + // wait until controller start up + sleep(Duration::from_millis(50)).await.expect("panic"); + + debug!("starting custom test logic"); + self.test().main_test(self.clone()).await.expect("test should run"); + self.terminate_server().await; + } + + + fn test(&self) -> &T { + &self.test + } + + + // terminating server + async fn terminate_server(&self) { + + // terminate servers + for i in 0..self.spu_server_specs.len() { + let server = &self.spu_server_specs[i]; + let mut sender = self.spu_sender(i); + + debug!("terminating server: {}",server.id()); + sender + .send(true) + .await + .expect("spu shutdown should work"); + + } + + // terminate sc + if let Some(mut sc_sender) = self.sc_sender() { + debug!("terminating sc"); + sc_sender + .send(true) + .await + .expect("sc shutdown should work"); + } + + + } + + fn set_sc_ctx(&self,ctx: ScServerCtx) { + let mut lock = self.sc_ctx.write().unwrap(); + *lock = Some(ctx); + } + + + fn spu_sender(&self,spu: usize) -> Sender { + self.spu_senders[spu].clone() + } + + fn sc_sender(&self) -> Option> { + let lock = self.sc_ctx.read().unwrap(); + lock.as_ref().map(|ctx| ctx.sender.clone() ) + } + + + pub fn leader(&self) -> &SpuServer { + &self.spu_server_specs[0] + } + + pub fn leader_spec(&self) -> &SpuSpec { + self.leader().spec() + } + + pub fn followers_count(&self) -> usize { + self.spu_server_specs.len() -1 + } + + pub fn follower_spec(&self,index: usize) -> &SpuSpec { + self.spu_server_specs[index+1].spec() + } + + pub fn leader_gtx(&self) -> DefaultSharedGlobalContext { + self.spu_server_ctx[0].clone() + } + + pub fn follower_gtx(&self,index: usize ) -> DefaultSharedGlobalContext { + self.spu_server_ctx[index+1].clone() + } + + pub fn spu_metadata(&self) -> UpdateAllSpusContent { + + let mut spu_metadata = UpdateAllSpusContent::default(); + + for server in &self.spu_server_specs { + spu_metadata.mut_add_spu_content(server.spec()); + } + + spu_metadata + } + + pub fn replica_ids(&self) -> Vec { + self.spu_server_specs.iter().map(|follower| follower.spec().id).collect() + } + + pub fn replica_metadata(&self,replica: &ReplicaKey) -> Replica { + + let leader_id = self.leader_spec().id; + + Replica::new( + replica.clone(), + leader_id, + self.replica_ids() + ) + } + + pub async fn send_metadata_to_spu<'a>(&'a self,sink: &'a mut KfSink,target_spu: SpuId) -> Result<(),KfSocketError> { + + let mut spu_metadata = self.spu_metadata(); + + for replica in self.test.replicas() { + spu_metadata.add_replica_by_ref(self.replica_metadata(&replica)); + } + + for server in &self.spu_server_specs { + let spu_id = server.spec().id; + if spu_id == target_spu { + let spu_req_msg = RequestMessage::new_request(UpdateSpuRequest::encode_request( + UpdateAllSpusMsg::with_content(spu_id,spu_metadata.clone()), + )) + .set_client_id(self.client_id.clone()); + debug!("sending spu metadata to spu: {}",spu_id); + sink.send_request(&spu_req_msg).await?; + } + + } + + + Ok(()) + } + + /// create sample message + pub fn create_producer_msg(&self,msg: S, topic: S,partition: i32) -> RequestMessage + where S: Into + { + let msg_string: String = msg.into(); + let record: DefaultRecord = msg_string.into(); + let mut batch = DefaultBatch::default(); + batch.records.push(record); + + let mut topic_request = DefaultKfTopicRequest::default(); + topic_request.name = topic.into(); + let mut partition_request = DefaultKfPartitionRequest::default(); + partition_request.partition_index = partition; + partition_request.records.batches.push(batch); + topic_request.partitions.push(partition_request); + let mut req = DefaultKfProduceRequest::default(); + req.topics.push(topic_request); + + RequestMessage::new_request(req).set_client_id(self.client_id.clone()) + } + + pub fn create_fetch_request(&self,offset: Offset, topic: S, partition: i32 ) -> RequestMessage + where S: Into + { + let mut request: DefaultKfFetchRequest = KfFetchRequest::default(); + let mut part_request = FetchPartition::default(); + part_request.partition_index = partition; + part_request.fetch_offset = offset; + let mut topic_request = FetchableTopic::default(); + topic_request.name = topic.into(); + topic_request.fetch_partitions.push(part_request); + + request.topics.push(topic_request); + + RequestMessage::new_request(request).set_client_id("test_client") + } + + + +} diff --git a/spu-server/src/tests/mod.rs b/spu-server/src/tests/mod.rs new file mode 100644 index 0000000000..1f26546436 --- /dev/null +++ b/spu-server/src/tests/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod fixture; +mod suites; \ No newline at end of file diff --git a/spu-server/src/tests/suites/mod.rs b/spu-server/src/tests/suites/mod.rs new file mode 100644 index 0000000000..2ecd811936 --- /dev/null +++ b/spu-server/src/tests/suites/mod.rs @@ -0,0 +1,3 @@ +mod test_fetch; +mod test_replication; +mod test_offsets; diff --git a/spu-server/src/tests/suites/test_fetch.rs b/spu-server/src/tests/suites/test_fetch.rs new file mode 100644 index 0000000000..a6cff2a693 --- /dev/null +++ b/spu-server/src/tests/suites/test_fetch.rs @@ -0,0 +1,104 @@ +use std::sync::Arc; + +use log::debug; +use futures::future::BoxFuture; +use futures::FutureExt; + +use future_helper::test_async; +use kf_socket::KfSocketError; +use metadata::partition::ReplicaKey; +use kf_protocol::api::DefaultRecord; + +use crate::tests::fixture::TestGenerator; +use crate::tests::fixture::SpuTest; +use crate::tests::fixture::SpuTestRunner; + +const TOPIC_ID: &str = "topic1"; +const PARTITION_ID: i32 = 0; + +fn test_repl_id() -> ReplicaKey { + ReplicaKey::new("topic1", 0) +} + +struct SimpleFetchTest {} + +async fn test_fetch(runner: Arc>) -> Result<(), KfSocketError> { + let _replica = test_repl_id(); + // runner.send_metadata_to_all(&replica).await.expect("send metadata"); + + let produce_req_msg = runner.create_producer_msg("message", TOPIC_ID, PARTITION_ID); + let produce_resp = runner + .leader() + .send_to_public_server(&produce_req_msg) + .await?; + let response = produce_resp.response; + let topic_responses = response.responses; + assert_eq!(topic_responses.len(), 1); + let topic_response = &topic_responses[0]; + assert_eq!(topic_response.name, TOPIC_ID); + let partition_responses = &topic_response.partitions; + assert_eq!(partition_responses.len(), 1); + assert_eq!(partition_responses[0].base_offset, 0); + + // do fetch + + // verify thru follower replica thru fetch request + let fetch_req_msg = runner.create_fetch_request(0, TOPIC_ID, PARTITION_ID); + let fetch_response = runner + .leader() + .send_to_public_server(&fetch_req_msg) + .await?; + debug!("fetch response: {:#?}", fetch_response); + let response = fetch_response.response; + assert_eq!(response.topics.len(), 1); + let topic_response = &response.topics[0]; + assert_eq!(topic_response.name, TOPIC_ID); + assert_eq!(topic_response.partitions.len(), 1); + let partition_response = &topic_response.partitions[0]; + assert_eq!(partition_response.high_watermark, 1); + let batches = &partition_response.records.batches; + assert_eq!(batches.len(), 1); + let batch = &batches[0]; + assert_eq!(batch.records.len(), 1); + let record = &batch.records[0]; + let test_record: DefaultRecord = "message".to_owned().into(); + assert_eq!( + record.value.inner_value_ref(), + test_record.value.inner_value_ref() + ); + + Ok(()) +} + +impl SpuTest for SimpleFetchTest { + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn env_configuration(&self) -> TestGenerator { + TestGenerator::default() + .set_base_id(6000) + .set_base_port(9800) + .set_base_dir("fetch_test") + .init() + } + + fn followers(&self) -> usize { + 0 + } + + fn replicas(&self) -> Vec { + vec![test_repl_id()] + } + + fn main_test(&self, runner: Arc>) -> Self::ResponseFuture { + async move { test_fetch(runner).await }.boxed() + } +} + +#[test_async] +async fn simple_fetch_test() -> Result<(), KfSocketError> { + let test = SimpleFetchTest {}; + SpuTestRunner::run("fetch test".to_owned(), test) + .await + .expect("test runner should not failer"); + Ok(()) +} diff --git a/spu-server/src/tests/suites/test_offsets.rs b/spu-server/src/tests/suites/test_offsets.rs new file mode 100644 index 0000000000..122e38ea4b --- /dev/null +++ b/spu-server/src/tests/suites/test_offsets.rs @@ -0,0 +1,117 @@ +use std::sync::Arc; + +use futures::future::BoxFuture; +use futures::FutureExt; + +use future_helper::test_async; +use kf_socket::KfSocketError; +use metadata::partition::ReplicaKey; +use kf_protocol::api::RequestMessage; +use spu_api::offsets::FlvFetchOffsetsRequest; +use spu_api::offsets::FetchOffsetTopic; +use spu_api::offsets::FetchOffsetPartition; +use kf_protocol::api::FlvErrorCode; + +use crate::tests::fixture::TestGenerator; +use crate::tests::fixture::SpuTest; +use crate::tests::fixture::SpuTestRunner; + +const TOPIC_ID: &str = "topic1"; +const PARTITION_ID: i32 = 0; + +fn test_repl_id() -> ReplicaKey { + ReplicaKey::new("topic1", 0) +} + +struct OffsetsFetchTest {} + +async fn test_fetch(runner: Arc>) -> Result<(), KfSocketError> { + // verify invalid request + let mut request = FlvFetchOffsetsRequest::default(); + let mut topic = FetchOffsetTopic::default(); + topic.name = "dummy".into(); + let mut partition = FetchOffsetPartition::default(); + partition.partition_index = PARTITION_ID; + topic.partitions.push(partition); + request.topics.push(topic); + let req_msg = RequestMessage::new_request(request).set_client_id("offset fetch tester"); + let produce_resp = runner + .leader() + .send_to_public_server(&req_msg) + .await + .expect("offset fetch"); + let topic_responses = produce_resp.response.topics; + assert_eq!(topic_responses.len(), 1); + let topic_response = &topic_responses[0]; + assert_eq!(topic_response.name, "dummy"); + let partition_responses = &topic_response.partitions; + assert_eq!(partition_responses.len(), 1); + let partition = &partition_responses[0]; + assert_eq!(partition.error_code, FlvErrorCode::PartitionNotLeader); + + let produce_req_msg = runner.create_producer_msg("message", TOPIC_ID, PARTITION_ID); + let _response = runner + .leader() + .send_to_public_server(&produce_req_msg) + .await + .expect("producer must not fail"); + + let mut request = FlvFetchOffsetsRequest::default(); + let mut topic = FetchOffsetTopic::default(); + topic.name = TOPIC_ID.into(); + let mut partition = FetchOffsetPartition::default(); + partition.partition_index = PARTITION_ID; + topic.partitions.push(partition); + request.topics.push(topic); + let req_msg = RequestMessage::new_request(request).set_client_id("offset fetch tester"); + let produce_resp = runner + .leader() + .send_to_public_server(&req_msg) + .await + .expect("offset fetch"); + let topic_responses = produce_resp.response.topics; + assert_eq!(topic_responses.len(), 1); + let topic_response = &topic_responses[0]; + assert_eq!(topic_response.name, TOPIC_ID); + let partition_responses = &topic_response.partitions; + assert_eq!(partition_responses.len(), 1); + let partition = &partition_responses[0]; + assert_eq!(partition.error_code, FlvErrorCode::None); + assert_eq!(partition.last_stable_offset, 1); + assert_eq!(partition.start_offset, 0); + + Ok(()) +} + +impl SpuTest for OffsetsFetchTest { + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn env_configuration(&self) -> TestGenerator { + TestGenerator::default() + .set_base_id(6100) + .set_base_port(9900) + .set_base_dir("offset_fetch_test") + .init() + } + + fn followers(&self) -> usize { + 0 + } + + fn replicas(&self) -> Vec { + vec![test_repl_id()] + } + + fn main_test(&self, runner: Arc>) -> Self::ResponseFuture { + async move { test_fetch(runner).await }.boxed() + } +} + +#[test_async] +async fn flv_offset_fetch_test() -> Result<(), KfSocketError> { + let test = OffsetsFetchTest {}; + SpuTestRunner::run("offset fetch test".to_owned(), test) + .await + .expect("test runner should not failer"); + Ok(()) +} diff --git a/spu-server/src/tests/suites/test_replication.rs b/spu-server/src/tests/suites/test_replication.rs new file mode 100644 index 0000000000..63c9d074ac --- /dev/null +++ b/spu-server/src/tests/suites/test_replication.rs @@ -0,0 +1,163 @@ +use std::time::Duration; +use std::sync::Arc; + +use log::debug; +use futures::future::BoxFuture; +use futures::FutureExt; + +use future_helper::test_async; +use future_helper::sleep; +use kf_socket::KfSocketError; +use metadata::partition::ReplicaKey; +use storage::ReplicaStorage; + +use crate::tests::fixture::TestGenerator; +use crate::tests::fixture::SpuTest; +use crate::tests::fixture::SpuTestRunner; + +const TOPIC_ID: &str = "topic1"; +const PARTITION_ID: i32 = 0; + +fn test_repl_id() -> ReplicaKey { + ReplicaKey::new("topic1", 0) +} + +struct FollowReplicationTest { + replicas: usize, + base: u16, +} + +impl FollowReplicationTest { + fn new(replicas: usize, base: u16) -> Self { + Self { replicas, base } + } +} + +// simple replication to a sigle follower +async fn inner_test( + runner: Arc>, +) -> Result<(), KfSocketError> { + let leader_spu = runner.leader_spec(); + let leader_gtx = runner.leader_gtx(); + let _replica = test_repl_id(); + + // wait until all follower has sync up + sleep(Duration::from_millis(150)).await.expect("panic"); + + // verify leader has created replica and received fetch stream from follower + let leaders_state = leader_gtx.leaders_state(); + assert!(leader_gtx.spus().spu(&leader_spu.name()).is_some()); + assert!(leaders_state.get_replica(&test_repl_id()).is_some()); + + // check it has established socket sinks to all followers + for i in 0..runner.followers_count() { + let follower_id = runner.follower_spec(i).id; + assert!(leader_gtx.follower_sinks().get_sink(&follower_id).is_some()); + } + // verify that follower has created connection controller and created follower replica + for i in 0..runner.followers_count() { + let follower_gtx = runner.follower_gtx(i); + let followers_state = follower_gtx.followers_state(); + assert!(followers_state.mailbox(&leader_spu.id).is_some()); + assert!(followers_state.has_controller(&leader_spu.id)); + assert!(followers_state.get_replica(&test_repl_id()).is_some()); + + let follower_replica = followers_state + .get_replica(&test_repl_id()) + .expect("replica should exists"); + assert_eq!(follower_replica.storage().get_end_offset(), 0); + drop(follower_replica); // unlock so rest of test can follow it. + } + + // send records to leader. this will propogate replica to follower + + let produce_req_msg = runner.create_producer_msg("message", TOPIC_ID, PARTITION_ID); + let produce_resp = runner + .leader() + .send_to_public_server(&produce_req_msg) + .await?; + let response = produce_resp.response; + let topic_responses = response.responses; + assert_eq!(topic_responses.len(), 1); + let topic_response = &topic_responses[0]; + assert_eq!(topic_response.name, TOPIC_ID); + let partition_responses = &topic_response.partitions; + assert_eq!(partition_responses.len(), 1); + assert_eq!(partition_responses[0].base_offset, 0); + + debug!("sleep for 50ms to finish replication"); + sleep(Duration::from_millis(150)).await.expect("panic"); + + for i in 0..runner.followers_count() { + let follower_gtx = runner.follower_gtx(i); + let followers_state = follower_gtx.followers_state(); + // verify that follower replica has received replica from leader + let follower_replica = followers_state + .get_replica(&test_repl_id()) + .expect("follower replica should exists"); + assert_eq!(follower_replica.storage().get_end_offset(), 1); + assert_eq!(follower_replica.storage().get_high_watermark(), 1); + + drop(follower_replica); + } + + // verify that leader has update followers offset + let leader_replica = leaders_state + .get_replica(&test_repl_id()) + .expect("leader replica should exists"); + assert_eq!(leader_replica.storage().get_end_offset(), 1); + assert_eq!(leader_replica.storage().get_high_watermark(), 1); + + // verify that leader has it's updated it's followr offset and highwatermark + for i in 0..runner.followers_count() { + let follower_id = runner.follower_spec(i).id; + let follower_info_slot = leader_replica + .followers(&follower_id) + .expect("followers info"); + let follower_info = follower_info_slot.expect("value"); + assert_eq!(follower_info.end_offset(), 1); + assert_eq!(follower_info.high_watermark(), 1); + } + drop(leader_replica); + + Ok(()) +} + +impl SpuTest for FollowReplicationTest { + type ResponseFuture = BoxFuture<'static, Result<(), KfSocketError>>; + + fn env_configuration(&self) -> TestGenerator { + TestGenerator::default() + .set_base_id(self.base as i32) + .set_base_port(self.base) + .set_base_dir(format!("replication_base_{}", self.base)) + .init() + } + + fn followers(&self) -> usize { + self.replicas + } + + fn replicas(&self) -> Vec { + vec![test_repl_id()] + } + + fn main_test(&self, runner: Arc>) -> Self::ResponseFuture { + async move { inner_test(runner).await }.boxed() + } +} + +#[test_async] +async fn follower_replication_test_2() -> Result<(), KfSocketError> { + // Todo: fix the intermittent failures (over 50%) + // SpuTestRunner::run("replication with 2 followers".to_owned(),FollowReplicationTest::new(2,6000)).await.expect("test runner should not failer"); + + Ok(()) +} + +#[test_async] +async fn follower_replication_test_3() -> Result<(), KfSocketError> { + // Todo: fix the intermittent failures (over 50%) + // SpuTestRunner::run("replication with 3 followers".to_owned(),FollowReplicationTest::new(3,6100)).await.expect("test runner should not failer"); + Ok(()) +} diff --git a/spu-server/test-data/config/spu_invalid.toml b/spu-server/test-data/config/spu_invalid.toml new file mode 100644 index 0000000000..7fd6e81011 --- /dev/null +++ b/spu-server/test-data/config/spu_invalid.toml @@ -0,0 +1,5 @@ +unknown = "value" + + + + diff --git a/spu-server/test-data/config/spu_server.toml b/spu-server/test-data/config/spu_server.toml new file mode 100644 index 0000000000..1041fb9fdb --- /dev/null +++ b/spu-server/test-data/config/spu_server.toml @@ -0,0 +1,33 @@ +version = "1.0" + +[spu] +id = 5050 +rack = "rack-1" + +[servers] + +[servers.public] +host = "127.0.0.1" +port = 5555 + +[servers.private] +host = "127.0.0.1" +port = 5556 + +[controller] +host = "127.0.0.1" +port = 5554 +retry_timeout_ms = 2000 + +[configurations] + +[configurations.replication] +min_in_sync_replicas = 3 + +[configurations.log] +base_dir = "/tmp/data_streams" +size = "2Gi" +index_max_bytes = 888888 +index_max_interval_bytes = 2222 +segment_max_bytes = 9999999 + diff --git a/spu-server/test-data/config/spu_server_small.toml b/spu-server/test-data/config/spu_server_small.toml new file mode 100644 index 0000000000..3fc37b358f --- /dev/null +++ b/spu-server/test-data/config/spu_server_small.toml @@ -0,0 +1,9 @@ +version = "1.0" + +[spu] +id = 12 +token_name = "121212" + +[controller] +host = "1.1.1.1" +port = 2323 diff --git a/spu-server/test-data/config/token_secret_bad b/spu-server/test-data/config/token_secret_bad new file mode 100644 index 0000000000..49dbdaaae1 --- /dev/null +++ b/spu-server/test-data/config/token_secret_bad @@ -0,0 +1 @@ +abwqr-?h*&Hafasd \ No newline at end of file diff --git a/spu-server/test-data/config/token_secret_good b/spu-server/test-data/config/token_secret_good new file mode 100644 index 0000000000..3bfb1ed2f1 --- /dev/null +++ b/spu-server/test-data/config/token_secret_good @@ -0,0 +1 @@ +abcdefgh12345678 \ No newline at end of file diff --git a/storage/Cargo.toml b/storage/Cargo.toml new file mode 100644 index 0000000000..9c3e559129 --- /dev/null +++ b/storage/Cargo.toml @@ -0,0 +1,37 @@ +[package] +edition = "2018" +name = "storage" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] + +[[bin]] +name = "storage-cli" +path = "src/bin/cli.rs" +doc = false +required-features = ["cli"] + + +[dependencies] +log = "0.4.6" +libc = "0.2.58" +bytes = "0.4.12" +futures-preview = { version = "0.3.0-alpha.13" } +pin-utils = "0.1.0-alpha.4" +structopt = { version = "0.2.14", optional = true} +serde = { version ="1.0.82", features = ['derive'] } +future-aio = { path = "../future-aio"} +kf-protocol = { path = "../kf-protocol"} +kf-socket = { path = "../kf-socket"} +future-helper = { path = "../future-helper" } +utils = { path= "../utils", optional = false } +types = { path= "../types"} + + +[dev-dependencies] +future-helper = { path = "../future-helper", features=["fixture"]} +utils = { path = "../utils", features = ["fixture"]} + + + +[features] +cli = ["structopt"] \ No newline at end of file diff --git a/storage/rust-toolchain b/storage/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/storage/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/storage/src/batch.rs b/storage/src/batch.rs new file mode 100644 index 0000000000..7b7ddfb4fb --- /dev/null +++ b/storage/src/batch.rs @@ -0,0 +1,325 @@ +use std::io::Error as IoError; +use std::io::Cursor; +use std::io::ErrorKind; +use std::io::SeekFrom; +use std::fmt::Debug; +use std::marker::PhantomData; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + + +use log::trace; +use log::debug; +use futures::Future; +use futures::Stream; +use pin_utils::pin_mut; + +use kf_protocol::api::Batch; +use kf_protocol::api::BatchRecords; +use kf_protocol::api::DefaultBatchRecords; +use kf_protocol::api::BATCH_PREAMBLE_SIZE; +use kf_protocol::api::BATCH_HEADER_SIZE; +use kf_protocol::api::Size; +use kf_protocol::api::Offset; + +use future_aio::fs::AsyncFile; +use crate::StorageError; + +const BATCH_FILE_HEADER_SIZE: usize = BATCH_PREAMBLE_SIZE + BATCH_HEADER_SIZE; + +pub type DefaultFileBatchStream = FileBatchStream; + +/// hold information about position of batch in the file +pub struct FileBatchPos where R: BatchRecords { + inner: Batch, + pos: Size +} + +impl FileBatchPos where R: BatchRecords { + + fn new(inner: Batch,pos: Size) -> Self { + FileBatchPos{ + inner, + pos + } + } + + pub fn get_batch(&self) -> &Batch { + &self.inner + } + + pub fn get_pos(&self) -> Size { + self.pos + } + + pub fn get_base_offset(&self) -> Offset { + self.inner.get_base_offset() + } + + pub fn get_last_offset(&self) -> Offset { + self.inner.get_last_offset() + } + + /// batch length (without preamble) + pub fn len(&self) -> Size { + self.inner.batch_len as Size + } + + /// total batch length including preamble + pub fn total_len(&self) -> Size { + self.len() + BATCH_PREAMBLE_SIZE as Size + } + + pub fn records_remainder_bytes(&self,remainder: usize) -> usize { + self.inner.records.remainder_bytes(remainder) + } + + /// decode next batch from file + pub(crate) async fn from(file: &mut AsyncFile,pos: Size) -> Result>,IoError> { + + let mut bytes = vec![0u8; BATCH_FILE_HEADER_SIZE]; + let read_len = file.read(&mut bytes).await?; + trace!("file batch: read preamble and header {} bytes out of {}", read_len, BATCH_FILE_HEADER_SIZE); + + if read_len == 0 { + trace!("no more bytes,there are no more batches"); + return Ok(None); + } + + if read_len < BATCH_FILE_HEADER_SIZE { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not enought for header", + )) + } + + let mut cursor = Cursor::new(bytes); + let mut batch = Batch::default(); + batch.decode_from_file_buf(&mut cursor,0)?; + let mut file_batch = FileBatchPos::new(batch,pos); + + let remainder = file_batch.len() as usize - BATCH_HEADER_SIZE as usize; + trace!( + "file batch: offset: {}, len: {}, total: {}, remainder: {}, pos: {}", + file_batch.get_batch().get_last_offset_delta(), + file_batch.len(), + file_batch.total_len(), + remainder, + pos + ); + + + if file_batch.records_remainder_bytes(remainder) > 0 { + file_batch.read_records(file,remainder).await? + } else { + file_batch.seek_to_next_batch(file,remainder).await?; + } + + Ok(Some(file_batch)) + + + } + + /// decode the records + async fn read_records<'a>(&'a mut self, file: &'a mut AsyncFile,remainder: usize) -> Result<(),IoError> { + + let mut bytes = vec![0u8; remainder]; + let read_len = file.read(&mut bytes).await?; + trace!("file batch: read records {} bytes out of {}", read_len, remainder); + + if read_len < remainder { + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not enought for records", + )) + } + + let mut cursor = Cursor::new(bytes); + self.inner.records.decode(&mut cursor,0)?; + + Ok(()) + } + + async fn seek_to_next_batch<'a>(&'a self, file: &'a mut AsyncFile,remainder: usize ) -> Result<(), IoError> { + + + if remainder > 0 { + trace!( + "file batch skipping: content {} bytes", + remainder + ); + let seek_position = file.seek(SeekFrom::Current(remainder as Offset)).await?; + trace!("file batch new position: {}", seek_position); + } + + Ok(()) + } + + +} + + +// stream to iterate batch +pub struct FileBatchStream where R: Default + Debug{ + pos: Size, + invalid: Option, + file: AsyncFile, + data: PhantomData + +} + +impl FileBatchStream where R: Default + Debug { + + #[allow(dead_code)] + pub fn new(file: AsyncFile) -> FileBatchStream { + + trace!("opening batch stream on: {}",file); + FileBatchStream { + pos: 0, + file: file.into(), + invalid: None, + data: PhantomData + } + } + + + + + #[allow(dead_code)] + pub async fn new_with_pos(mut file: AsyncFile,pos: Size) -> Result,StorageError> { + + trace!("opening batch stream at: {}",pos); + let seek_position = file.seek(SeekFrom::Start(pos as u64)).await?; + if seek_position != pos as u64{ + return Err(IoError::new( + ErrorKind::UnexpectedEof, + "not enought for position", + ).into()) + } + Ok(FileBatchStream { + pos, + file: file.into(), + invalid: None, + data: PhantomData + }) + } + + /// check if it is invalid + pub fn invalid(self) -> Option { + self.invalid + } + +} + + + +impl FileBatchStream where R: BatchRecords { + + + + // same as next + pub(crate) async fn inner_next(&mut self) -> Option> { + + match FileBatchPos::from(&mut self.file,self.pos).await { + Ok(batch_res) => { + if let Some(ref batch) = batch_res { + self.pos = self.pos + batch.total_len() as Size; + } + batch_res + }, + Err(err) => { + debug!("error getting batch: {}",err); + self.invalid = Some(err); + None + } + } + } + +} + + +impl Stream for FileBatchStream { + + type Item = FileBatchPos; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + + let ft = self.inner_next(); + pin_mut!(ft); + ft.poll(cx) + + } + +} + + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + use std::path::PathBuf; + + use futures::sink::SinkExt; + use futures::stream::StreamExt; + + use future_helper::test_async; + + use crate::ConfigOption; + use crate::StorageError; + use crate::segment::MutableSegment; + use crate::fixture::create_batch; + use crate::fixture::create_batch_with_producer; + use crate::fixture::ensure_new_dir; + + + fn default_option(base_dir: PathBuf) -> ConfigOption { + ConfigOption { + base_dir, + segment_max_bytes: 1000, + index_max_bytes: 1000, + ..Default::default() + } + } + + #[test_async] + async fn test_decode_batch_stream() -> Result<(),StorageError> { + + let test_dir = temp_dir().join("batch-stream"); + ensure_new_dir(&test_dir)?; + + let option = default_option(test_dir.clone()); + + let mut seg_sink = MutableSegment::create(300, &option).await?; + + seg_sink.send(create_batch()).await?; + seg_sink.send(create_batch_with_producer(25,2)).await?; + + + let mut stream = seg_sink.open_default_batch_stream().await.expect("open full batch stream"); + + let batch1 = stream.next().await.expect("batch"); + assert_eq!(batch1.get_batch().get_base_offset(),300); + assert_eq!(batch1.get_batch().get_header().producer_id,12); + assert_eq!(batch1.get_batch().records.len(),2); + assert_eq!(batch1.get_pos(),0); + assert_eq!(batch1.get_batch().records[0].get_offset_delta(),0); + assert_eq!(batch1.get_batch().records[0].value.inner_value_ref(),&Some(vec![10,20])); + assert_eq!(batch1.get_batch().records[1].get_offset_delta(),1); + + let batch2 = stream.next().await.expect("batch"); + assert_eq!(batch2.get_batch().get_base_offset(),302); + assert_eq!(batch2.get_batch().get_header().producer_id,25); + assert_eq!(batch2.get_batch().records.len(),2); + assert_eq!(batch2.get_pos(),79); + assert_eq!(batch2.get_batch().records[0].get_offset_delta(),0); + assert!((stream.next().await).is_none()); + + + Ok(()) + + } + + + +} \ No newline at end of file diff --git a/storage/src/batch_header.rs b/storage/src/batch_header.rs new file mode 100644 index 0000000000..a4c0a41890 --- /dev/null +++ b/storage/src/batch_header.rs @@ -0,0 +1,166 @@ + +use std::io::Error as IoError; +use std::task::Context; +use std::task::Poll; + +use futures::Future; +use futures::Stream; +use std::pin::Pin; + +use pin_utils::pin_mut; + +use kf_protocol::Version; +use kf_protocol::bytes::Buf; +use kf_protocol::bytes::BufMut; +use kf_protocol::api::BatchRecords; +use kf_protocol::Decoder; +use kf_protocol::Encoder; + +use crate::batch::FileBatchStream; +use crate::batch::FileBatchPos; + +pub type BatchHeaderStream = FileBatchStream; + +pub type BatchHeaderPos = FileBatchPos; + + +#[derive(Default,Debug)] +pub struct FileEmptyRecords { + +} + +impl BatchRecords for FileEmptyRecords { + + fn remainder_bytes(&self,_remainder: usize ) -> usize { + 0 + } +} + +// nothing to decode for header +impl Decoder for FileEmptyRecords { + + fn decode(&mut self, _src: &mut T,_version: Version) -> Result<(), IoError> where T: Buf, + { + Ok(()) + } +} + + +// nothing to do decode for header +impl Encoder for FileEmptyRecords { + + fn write_size(&self,_versio: Version) -> usize { + 0 + } + + fn encode(&self, _dest: &mut T,_version: Version) -> Result<(), IoError> where T: BufMut + { + Ok(()) + } +} + + +/// need to create separate implemention of batch stream +/// for specific implemetnation due to problem with compiler +impl Stream for FileBatchStream { + + type Item = BatchHeaderPos; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + + let ft = self.inner_next(); + pin_mut!(ft); + ft.poll(cx) + + } + +} + + + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + + use futures::sink::SinkExt; + use futures::stream::StreamExt; + + use future_aio::fs::AsyncFile; + use future_helper::test_async; + + use crate::fixture::create_batch; + use crate::fixture::create_batch_with_producer; + use crate::fixture::ensure_clean_file; + use crate::mut_records::MutFileRecords; + use crate::ConfigOption; + use crate::StorageError; + + use super::BatchHeaderStream; + use super::BatchHeaderPos; + + const TEST_FILE_NAME: &str = "00000000000000000200.log"; // for offset 200 + + fn default_option() -> ConfigOption { + ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + } + } + + #[test_async] + async fn test_decode_batch_header_simple() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME); + ensure_clean_file(&test_file); + + let options = default_option(); + + let mut msg_sink = MutFileRecords::create(200, &options).await.expect("create sink"); + + msg_sink.send(create_batch()).await.expect("send batch"); + + let mut file = AsyncFile::open(&test_file).await.expect("open test file"); + + let batch_res = BatchHeaderPos::from(&mut file, 0).await.expect("open header"); + + if let Some(batch) = batch_res { + let header = batch.get_batch().get_header(); + assert_eq!(header.producer_id, 12); + } else { + assert!(false, "batch not found"); + } + + Ok(()) + } + + const TEST_FILE_NAME2: &str = "00000000000000000201.log"; // for offset 200 + + #[test_async] + async fn test_decode_batch_header_multiple() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME2); + ensure_clean_file(&test_file); + + let options = default_option(); + + let mut msg_sink = MutFileRecords::create(201, &options).await?; + + msg_sink.send(create_batch()).await?; + msg_sink.send(create_batch_with_producer(25, 2)).await?; + + let file = AsyncFile::open(&test_file).await?; + + let mut stream = BatchHeaderStream::new(file); + + let batch_pos1 = stream.next().await.expect("batch"); + assert_eq!(batch_pos1.get_batch().get_header().producer_id, 12); + assert_eq!(batch_pos1.get_pos(), 0); + let batch_pos2 = stream.next().await.expect("batch"); + assert_eq!(batch_pos2.get_batch().get_header().producer_id, 25); + assert_eq!(batch_pos2.get_pos(), 79); // 2 records + assert!((stream.next().await).is_none()); + + Ok(()) + } + +} diff --git a/storage/src/bin/cli.rs b/storage/src/bin/cli.rs new file mode 100644 index 0000000000..b833b3846c --- /dev/null +++ b/storage/src/bin/cli.rs @@ -0,0 +1,153 @@ +// Storage CLi +#![feature(async_await)] + +use std::path::PathBuf; +use std::io::Error as IoError; + +use structopt::StructOpt; +use futures::stream::StreamExt; + + +use future_aio::fs::AsyncFile; +use future_helper::run_block_on; + +use storage::DefaultFileBatchStream; +use storage::LogIndex; +use storage::StorageError; +use storage::OffsetPosition; + +#[derive(Debug,StructOpt)] +#[structopt(name="storage",about="Flavio Storage CLI")] +enum Main{ + #[structopt(name="log")] + Log(LogOpt), + #[structopt(name="index")] + Index(IndexOpt) +} + + +fn main() { + utils::init_logger(); + + let opt = Main::from_args(); + + let res = match opt { + Main::Log(opt) => dump_log(opt), + Main::Index(opt) => dump_index(opt) + }; + + if let Err(err) = res { + println!("error occured: {:#?}",err) + } + +} + + + +#[derive(Debug,StructOpt)] +pub(crate) struct LogOpt{ + #[structopt(parse(from_os_str))] + file_name: PathBuf +} + + +async fn print_logs(path: PathBuf) -> Result<(),IoError> { + + let file = AsyncFile::open(path).await?; + + let mut batch_stream = DefaultFileBatchStream::new(file); + + // println!("base offset: {}",batch_stream.get_base_offset()); + + while let Some(file_batch) = batch_stream.next().await { + + // let batch_base_offset = batch.get_base_offset(); + let batch = file_batch.get_batch(); + //let header = batch.get_header(); + // let offset_delta = header.last_offset_delta; + + println!("batch offset: {}, len: {}, pos: {}", batch.get_base_offset(),file_batch.len(),file_batch.get_pos()); + + for record in &batch.records { + + println!("record offset: {}",record.get_offset_delta()); + } + + } + + Ok(()) + +} + + + +pub(crate) fn dump_log(opt: LogOpt) -> Result<(), IoError> { + + let file_path = opt.file_name; + + println!("dumping batch: {:#?}",file_path); + + let ft = print_logs(file_path); + let result = run_block_on(ft); + if let Err(err) = result { + println!("error in async: {:#?}",err) + }; + + Ok(()) + +} + + + +#[derive(Debug,StructOpt)] +pub(crate) struct IndexOpt{ + #[structopt(parse(from_os_str))] + file_name: PathBuf +} + +pub(crate) fn dump_index(opt: IndexOpt) -> Result<(), IoError> { + + let file_path = opt.file_name; + + println!("dumping index: {:#?}",file_path); + + let ft = print_index(file_path); + let result = run_block_on(ft); + if let Err(err) = result { + println!("error in async: {:#?}",err) + }; + + Ok(()) + +} + +const MAX: u32 = 100; + +async fn print_index(path: PathBuf) -> Result<(),StorageError> { + + let log = LogIndex::open_from_path(path).await?; + + println!("has {} bytes",log.len()); + let entries = log.len(); + let mut count: u32 = 0; + let mut display: u32 = 0; + for i in 0..entries { + let (offset,pos) = log[i as usize].to_be(); + if offset > 0 && pos > 0 { + count = count + 1; + if count < MAX { + println!("i: {} offset: {} pos: {}",i,offset,pos); + display = display + 1; + } + } + } + + if count > MAX { + println!("there was {} entries only {} was displayed",count,display); + } else { + println!("there was {} entries:",count); + } + + Ok(()) + +} diff --git a/storage/src/checkpoint.rs b/storage/src/checkpoint.rs new file mode 100644 index 0000000000..5f17f0be56 --- /dev/null +++ b/storage/src/checkpoint.rs @@ -0,0 +1,181 @@ +use std::fmt::Display; +use std::io::Cursor; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::io::SeekFrom; + +use bytes::Buf; +use bytes::BufMut; +use futures::io::AsyncReadExt; +use futures::io::AsyncWriteExt; +use log::debug; +use log::trace; + +use future_aio::fs::AsyncFile; + +use crate::ConfigOption; + +pub trait ReadToBuf: Sized { + fn read_from(buf: &mut B) -> Self + where + B: Buf; + + fn write_to(&mut self, buf: &mut B) + where + B: BufMut; +} + +impl ReadToBuf for u64 { + fn read_from(buf: &mut B) -> Self + where + B: Buf, + { + buf.get_u64_be() + } + + fn write_to(&mut self, buf: &mut B) + where + B: BufMut, + { + buf.put_u64_be(*self); + } +} + +impl ReadToBuf for i64 { + fn read_from(buf: &mut B) -> Self + where + B: Buf, + { + buf.get_i64_be() + } + + fn write_to(&mut self, buf: &mut B) + where + B: BufMut, + { + buf.put_i64_be(*self); + } +} + +#[allow(dead_code)] +#[derive(Debug)] +pub struct CheckPoint { + option: ConfigOption, + offset: T, + file: AsyncFile, +} + +impl CheckPoint +where + T: Display + ReadToBuf + Clone + Sized + 'static, +{ + + pub async fn create<'a>( + option: &'a ConfigOption, + name: &'a str, + initial_offset: T, + ) -> Result { + let checkpoint_path = option.base_dir.join(name); + + match AsyncFile::get_metadata(&checkpoint_path).await { + Ok(_) => { + trace!("checkpoint {:#?} exists, reading", checkpoint_path); + let file = AsyncFile::open_read_write(&checkpoint_path).await?; + let mut checkpoint = CheckPoint { + option: option.to_owned(), + file, + offset: initial_offset.clone(), + }; + checkpoint.read().await?; + Ok(checkpoint) + } + Err(_) => { + debug!( + "no existing creating checkpoint {:#?}, creating", + checkpoint_path + ); + let file = AsyncFile::open_read_write(&checkpoint_path).await?; + trace!("file created: {}",file); + let mut checkpoint = CheckPoint { + option: option.to_owned(), + file, + offset: initial_offset.clone(), + }; + checkpoint.write(initial_offset.clone()).await?; + Ok(checkpoint) + } + } + } + + pub fn get_offset(&self) -> &T { + &self.offset + } + + /// read contents of the + async fn read(&mut self) -> Result<(), IoError> { + self.file.seek(SeekFrom::Start(0)).await?; + let mut contents = Vec::new(); + self.file.read_to_end(&mut contents).await.expect("reading to end"); + + if contents.len() != 8 { + return Err(IoError::new( + ErrorKind::InvalidData, + format!("there should be exact 8 bytes but {} bytes available ",contents.len()), + )); + } + + let mut buf = Cursor::new(contents); + self.offset = ReadToBuf::read_from(&mut buf); + Ok(()) + } + + pub(crate) async fn write(&mut self, pos: T) -> Result<(), IoError> { + debug!("writing checkpoint: {}", pos); + self.file.seek(SeekFrom::Start(0)).await?; + let mut contents = Vec::new(); + self.offset = pos; + self.offset.write_to(&mut contents); + self.file.write_all(&contents).await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + use std::io::Error as IoError; + + use future_helper::test_async; + + use super::CheckPoint; + use crate::fixture::ensure_clean_file; + use crate::ConfigOption; + + #[test_async] + async fn checkpoint_test() -> Result<(), IoError> { + let test_file = temp_dir().join("test.chk"); + ensure_clean_file(&test_file); + + + let option = ConfigOption { + base_dir: temp_dir(), + ..Default::default() + }; + let mut ck: CheckPoint = CheckPoint::create(&option, "test.chk", 0).await.expect("create"); + let _ = ck.read().await.expect("do initial read"); + assert_eq!(*ck.get_offset(), 0); + ck.write(10).await.expect("first write"); + ck.write(40).await.expect("2nd write"); + + drop(ck); + + + let mut ck2: CheckPoint = CheckPoint::create(&option, "test.chk", 0).await.expect("restore"); + ck2.read().await?; + assert_eq!(*ck2.get_offset(), 40); + ck2.write(20).await.expect("write aft er reading should work"); + + Ok(()) + } +} diff --git a/storage/src/config.rs b/storage/src/config.rs new file mode 100644 index 0000000000..723dd91464 --- /dev/null +++ b/storage/src/config.rs @@ -0,0 +1,87 @@ +use std::default::Default; +use std::path::PathBuf; +use std::path::Path; +use std::fmt; + +use serde::Deserialize; + +use types::defaults::SPU_LOG_INDEX_MAX_BYTES; +use types::defaults::SPU_LOG_INDEX_MAX_INTERVAL_BYTES; +use types::defaults::SPU_LOG_SEGMENT_MAX_BYTES; + +use kf_protocol::api::Size; + +// common option +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ConfigOption { + #[serde(default = "default_base_dir")] + pub base_dir: PathBuf, + #[serde(default = "default_index_max_bytes")] + pub index_max_bytes: Size, + #[serde(default = "default_index_max_interval_bytes")] + pub index_max_interval_bytes: Size, + #[serde(default = "default_segment_max_bytes")] + pub segment_max_bytes: Size, +} + + +impl fmt::Display for ConfigOption { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f,"storage config at: {:#?}",self.base_dir) + } +} + + + +fn default_base_dir() -> PathBuf { + Path::new("/tmp").to_path_buf() +} + +fn default_index_max_bytes() -> Size { + SPU_LOG_INDEX_MAX_BYTES +} + +fn default_index_max_interval_bytes() -> Size { + SPU_LOG_INDEX_MAX_INTERVAL_BYTES +} + +fn default_segment_max_bytes() -> Size { + SPU_LOG_SEGMENT_MAX_BYTES +} + +impl ConfigOption { + pub fn new(base_dir: PathBuf, index_max_bytes: u32, index_max_interval_bytes: u32, segment_max_bytes: u32) -> Self { + ConfigOption { + base_dir, + index_max_bytes, + index_max_interval_bytes, + segment_max_bytes + } + } + + pub fn base_dir(mut self, dir: PathBuf) -> Self { + self.base_dir = dir; + self + } + + pub fn index_max_bytes(mut self, bytes: Size) -> Self { + self.index_max_bytes = bytes; + self + } + + pub fn segment_max_bytes(mut self, bytes: Size) -> Self { + self.segment_max_bytes = bytes; + self + } +} + +impl Default for ConfigOption { + fn default() -> Self { + ConfigOption { + base_dir: default_base_dir(), + index_max_bytes: default_index_max_bytes(), + index_max_interval_bytes: default_index_max_interval_bytes(), + segment_max_bytes: default_segment_max_bytes(), + } + } +} diff --git a/storage/src/error.rs b/storage/src/error.rs new file mode 100644 index 0000000000..90d948feef --- /dev/null +++ b/storage/src/error.rs @@ -0,0 +1,76 @@ + +use std::io::Error as IoError; +use std::fmt; + +use kf_protocol::api::DefaultBatch; +use future_aio::fs::FileSinkError; +use future_aio::SendFileError; +use kf_socket::KfSocketError; + +use crate::util::OffsetError; +use crate::validator::LogValidationError; + + +#[derive(Debug)] +pub enum StorageError { + IoError(IoError), + NoRoom(DefaultBatch), + OffsetError(OffsetError), + LogValidationError(LogValidationError), + SendFileError(SendFileError), + SocketError(KfSocketError) +} + + +impl fmt::Display for StorageError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::IoError(err) => write!(f, "{}",err), + Self::NoRoom(batch) => write!(f, "No room {:#?}", batch), + Self::OffsetError(err) => write!(f,"{}",err), + Self::LogValidationError(err) => write!(f,"{}",err), + Self::SocketError(err) => write!(f,"{}",err), + Self::SendFileError(err) => write!(f,"{}",err), + } + } +} + + +impl From for StorageError { + fn from(error: IoError) -> Self { + StorageError::IoError(error) + } +} + +impl From for StorageError { + fn from(error: FileSinkError) -> Self { + match error { + FileSinkError::IoError(err) => StorageError::IoError(err), + FileSinkError::MaxLenReached => panic!("no auto conversion for file sink error"), + } + } +} + +impl From for StorageError { + fn from(error: OffsetError) -> Self { + StorageError::OffsetError(error) + } +} + +impl From for StorageError { + fn from(error: LogValidationError) -> Self { + StorageError::LogValidationError(error) + } +} + +impl From for StorageError { + fn from(error: SendFileError) -> Self { + StorageError::SendFileError(error) + } +} + +impl From for StorageError { + fn from(error: KfSocketError) -> Self { + StorageError::SocketError(error) + } +} diff --git a/storage/src/fixture.rs b/storage/src/fixture.rs new file mode 100644 index 0000000000..cae17e5dfd --- /dev/null +++ b/storage/src/fixture.rs @@ -0,0 +1,106 @@ +use log::info; +use std::fs::File; +use std::io; +use std::io::Read; +use std::path::Path; +use std::env::temp_dir; + +use kf_protocol::api::DefaultRecord; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::Size; + +use crate::ConfigOption; + +pub use utils::fixture::ensure_clean_dir; +pub use utils::fixture::ensure_new_dir; +pub use utils::fixture::ensure_clean_file; + + +pub fn create_batch() -> DefaultBatch { + create_batch_with_producer(12,2) +} + +/// create batches with produce and records count +pub fn create_batch_with_producer(producer: i64,records: u16) -> DefaultBatch { + let mut batches = DefaultBatch::default(); + let header = batches.get_mut_header(); + header.magic = 2; + header.producer_id = producer; + header.producer_epoch = -1; + + for _ in 0..records { + let mut record = DefaultRecord::default(); + let bytes: Vec = vec![10, 20]; + record.value = Some(bytes).into(); + batches.add_record(record); + } + + batches +} + + +pub fn read_bytes_from_file

(path: P) -> Result, io::Error> where P: AsRef { + let file_path = path.as_ref(); + info!("test file: {}", file_path.display()); + let mut f = File::open(file_path)?; + let mut buffer = Vec::new(); + f.read_to_end(&mut buffer)?; + Ok(buffer) +} + +pub fn default_option(index_max_interval_bytes: Size ) -> ConfigOption { + + ConfigOption { + segment_max_bytes: 100, + index_max_interval_bytes, + base_dir: temp_dir(), + index_max_bytes: 1000, + ..Default::default() + } +} + + +mod pin_tests { + + use std::pin::Pin; + use pin_utils::pin_mut; + use pin_utils::unsafe_unpinned; + + // impl Unpin for Counter{} + + struct Counter { + total: u16 + } + + impl Counter { + + unsafe_unpinned!(total: u16); + + fn get_total(self: Pin<&mut Self>) -> u16 { + self.total + } + + fn update_total(mut self: Pin<&mut Self>, val: u16) { + *self.as_mut().total() = val; + } + } + + + #[test] + fn test_read_pin() { + let counter = Counter { total: 20}; + pin_mut!(counter); // works with future that requires unpin + assert_eq!(counter.get_total(),20); + + } + + #[test] + fn test_write_pin() { + let counter = Counter { total: 20}; + pin_mut!(counter); // works with future that requires unpin + counter.as_mut().update_total(30); + assert_eq!(counter.get_total(),30); + } + + +} \ No newline at end of file diff --git a/storage/src/index.rs b/storage/src/index.rs new file mode 100644 index 0000000000..70d6b8f01c --- /dev/null +++ b/storage/src/index.rs @@ -0,0 +1,303 @@ +use std::ffi::OsStr; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::mem::size_of; +use std::mem::transmute; +use std::ops::Deref; +use std::path::Path; +use std::slice; + +use libc::c_void; +use log::debug; +use log::trace; +use pin_utils::unsafe_unpinned; + +use future_aio::fs::MemoryMappedFile; +use kf_protocol::api::Offset; +use kf_protocol::api::Size; + +use crate::util::generate_file_name; +use crate::util::log_path_get_offset; +use crate::validator::LogValidationError; +use crate::ConfigOption; +use crate::StorageError; + +/// size of the memory mapped isze +const INDEX_ENTRY_SIZE: Size = (size_of::() * 2) as Size; + +pub const EXTENSION: &str = "index"; + +pub(crate) trait Index { + + fn find_offset(&self, relative_offset: Size) -> Option<(Size,Size)>; + + fn len(&self) -> Size; + + fn entries(&self) -> Size { + self.len() / INDEX_ENTRY_SIZE + } + +} + +pub trait OffsetPosition: Sized { + /// convert to be endian + fn to_be(self) -> Self; + + fn offset(&self) -> Size; + + fn position(&self) -> Size; +} + + +impl OffsetPosition for (Size,Size) { + fn to_be(self) -> Self { + let (offset,pos) = self; + (offset.to_be(),pos.to_be()) + } + + #[inline(always)] + fn offset(&self) -> Size { + self.0.to_be() + } + + #[inline(always)] + fn position(&self) -> Size { + self.1.to_be() + } + +} + + + + +/// Segment index +/// +/// Maps offset into file position (Seek) +/// +/// It is backed by memory mapped file +/// +/// For active segment, index can grow +/// For non active, it is fixed + +// implement index file +pub struct LogIndex { + #[allow(dead_code)] + mmap: MemoryMappedFile, + ptr: *mut c_void, + len: Size, +} + +// const MEM_SIZE: u64 = 1024 * 1024 * 10; //10 MBs + +unsafe impl Send for LogIndex {} + +unsafe impl Sync for LogIndex {} + +impl LogIndex { + unsafe_unpinned!(mmap: MemoryMappedFile); + + pub async fn open_from_offset( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + let index_file_path = generate_file_name(&option.base_dir, base_offset, EXTENSION); + + debug!("opening index mm at: {:#?}", index_file_path); + // make sure it is log file + let (mut m_file, file) = MemoryMappedFile::open( + index_file_path, + INDEX_ENTRY_SIZE as u64 + ).await?; + + let len = (file.metadata().await?).len(); + + trace!("opening memory mapped file with len : {}", len); + + if len > std::u32::MAX as u64 { + return Err(IoError::new( + ErrorKind::InvalidData, + "index file should not exceed u32", + )); + } + let b_slices: &[u8] = m_file.get_mem_file(); + let ptr = unsafe { transmute::<*const u8, *mut c_void>(b_slices.as_ptr()) }; + + Ok(LogIndex { + mmap: m_file, + ptr, + len: len as Size, + }) + } + + pub async fn open_from_path

(path: P) -> Result + where + P: AsRef, + { + let path_ref = path.as_ref(); + let base_offset = log_path_get_offset(path_ref)?; + if path_ref.extension() != Some(OsStr::new(EXTENSION)) { + return Err(StorageError::LogValidationError( + LogValidationError::InvalidExtension, + )); + } + + let mut option = ConfigOption::default(); + option.base_dir = path_ref.parent().unwrap().to_path_buf(); + + LogIndex::open_from_offset(base_offset, &option).await.map_err(|err| err.into()) + } + + + + #[inline] + pub fn ptr(&self) -> *const (Size, Size) { + self.ptr as *const (Size, Size) + } + +} + +impl Index for LogIndex { + + fn find_offset(&self, offset: Size) -> Option<(Size, Size)> { + lookup_entry(self, offset).map(|idx| self[idx]) + } + + fn len(&self) -> Size { + self.len + } + +} + +impl Deref for LogIndex { + type Target = [(Size, Size)]; + + #[inline] + fn deref(&self) -> &[(Size, Size)] { + unsafe { slice::from_raw_parts(self.ptr(), (self.len() / INDEX_ENTRY_SIZE) as usize) } + } +} + +/// find the index of the offset that matches +pub(crate) fn lookup_entry(offsets: &[(Size, Size)], offset: Size) -> Option { + + let first_entry = offsets[0]; + if offset < first_entry.offset() { + trace!("offset: {} is less than: first: {}", offset, first_entry.offset()); + return None; + } + + match offsets.binary_search_by(|entry| entry.offset().cmp(&offset)) { + Ok(idx) => Some(idx), + Err(idx) => Some(idx - 1), + } +} + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use std::env::temp_dir; + use std::io::Error as IoError; + + use crate::fixture::ensure_clean_file; + use future_helper::test_async; + + use super::lookup_entry; + use super::LogIndex; + use crate::mut_index::MutLogIndex; + use crate::ConfigOption; + use super::OffsetPosition; + + const TEST_FILE: &str = "00000000000000000921.index"; + + #[test] + fn test_index_search() { + utils::init_logger(); + + // offset increase by 4000 + let indexes = [ + (3, 10).to_be(), + (7, 350).to_be(), + (9, 400).to_be(), + (13, 600).to_be(), + (15, 8000).to_be(), + (21, 12000).to_be(), + ]; + + assert!(lookup_entry(&indexes, 1).is_none()); + + assert_eq!(lookup_entry(&indexes, 3), Some(0)); + assert_eq!(lookup_entry(&indexes, 10), Some(2)); // (9,400) + assert_eq!(lookup_entry(&indexes, 14), Some(3)); // (13,600) + assert_eq!(lookup_entry(&indexes, 50), Some(5)); // (21,12000) max + } + + fn default_option() -> ConfigOption { + ConfigOption { + segment_max_bytes: 1000, + base_dir: temp_dir(), + index_max_bytes: 1000, + index_max_interval_bytes: 0, + ..Default::default() + } + } + + #[test_async] + async fn test_index_read_offset() -> Result<(), IoError> { + let option = default_option(); + let test_file = option.base_dir.join(TEST_FILE); + ensure_clean_file(&test_file); + + let mut mut_index = MutLogIndex::create(921, &option).await?; + + + + mut_index.send((5, 16, 70)).await?; + mut_index.send((10, 100, 70)).await?; + + + mut_index.shrink().await?; + + + let log_index = LogIndex::open_from_offset(921, &option).await?; + + let offset1 = log_index[0]; + assert_eq!(offset1.offset(), 5); + assert_eq!(offset1.position(), 16); + + let offset2 = log_index[1]; + assert_eq!(offset2.offset(), 10); + assert_eq!(offset2.position(), 100); + + + Ok(()) + } + + /* this is compound test which is not needed. + const TEST_FILE3: &str = "00000000000000000922.index"; + + #[test_async] + async fn test_index_read_findoffset() -> Result<(), IoError> { + let option = default_option(); + let test_file = option.base_dir.join(TEST_FILE3); + ensure_clean_file(&test_file); + + let mut mut_index = MutLogIndex::create(922, &option).await?; + + mut_index.send((100, 16, 70)).await?; + mut_index.send((500, 200, 70)).await?; + mut_index.send((800, 100, 70)).await?; + mut_index.send((1000, 200, 70)).await?; + + mut_index.shrink().await?; + + + let log_index = LogIndex::open_from_offset(922, &option).await?; + assert_eq!(log_index.find_offset(600), Ok(1)); + assert_eq!(log_index.find_offset(2000), Ok(3)); + + Ok(()) + } + */ + +} diff --git a/storage/src/lib.rs b/storage/src/lib.rs new file mode 100644 index 0000000000..de285a4bbb --- /dev/null +++ b/storage/src/lib.rs @@ -0,0 +1,88 @@ +#[cfg(test)] +mod fixture; + +mod batch; +mod batch_header; +mod checkpoint; +mod error; +mod records; +mod index; +mod mut_records; +mod mut_index; +mod range_map; +mod replica; +mod segment; +mod util; +mod validator; +mod config; + +pub use crate::config::ConfigOption; +pub use crate::batch::DefaultFileBatchStream; +pub use crate::batch_header::BatchHeaderPos; +pub use crate::batch_header::BatchHeaderStream; +pub use crate::error::StorageError; +pub use crate::records::FileRecordsSlice; +pub use crate::index::LogIndex; +pub use crate::index::OffsetPosition; +pub use crate::replica::FileReplica; +pub(crate) use crate::segment::SegmentSlice; + + +use kf_protocol::api::ErrorCode; +use kf_protocol::api::Offset; +use future_aio::fs::AsyncFileSlice; +use kf_socket::FilePartitionResponse; + +pub trait Captures<'a> {} +impl<'a, T: ?Sized> Captures<'a> for T {} + +/// output from storage is represented as slice +pub trait SlicePartitionResponse { + + fn set_hw(&mut self, offset: i64); + + fn set_last_stable_offset(&mut self,offset: i64); + + fn set_log_start_offset(&mut self,offset: i64); + + fn set_slice(&mut self, slice: AsyncFileSlice); + + fn set_error_code(&mut self,error: ErrorCode); + +} + + +impl SlicePartitionResponse for FilePartitionResponse { + + fn set_hw(&mut self, offset: i64) { + self.high_watermark = offset; + } + + fn set_last_stable_offset(&mut self,offset: i64) { + self.last_stable_offset = offset; + } + + fn set_log_start_offset(&mut self, offset: i64) { + self.log_start_offset = offset; + } + + fn set_slice(&mut self, slice: AsyncFileSlice) { + self.records = slice.into(); + } + + fn set_error_code(&mut self, error: ErrorCode) { + self.error_code = error; + } + +} + + +pub trait ReplicaStorage { + + /// committed offset + fn get_hw(&self) -> Offset; + + /// offset mark that beggining of uncommitted + fn get_leo(&self) -> Offset; + +} \ No newline at end of file diff --git a/storage/src/mut_index.rs b/storage/src/mut_index.rs new file mode 100644 index 0000000000..287da4922b --- /dev/null +++ b/storage/src/mut_index.rs @@ -0,0 +1,366 @@ +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::mem::size_of; +use std::mem::transmute; +use std::ops::Deref; +use std::ops::DerefMut; +use std::slice; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +use futures::future::Future; +use futures::sink::Sink; +use libc::c_void; +use log::debug; +use log::trace; +use log::error; +use pin_utils::pin_mut; +use pin_utils::unsafe_unpinned; + + +use future_aio::fs::AsyncFile; +use future_aio::fs::MemoryMappedMutFile; +use kf_protocol::api::Offset; +use kf_protocol::api::Size; + +use crate::util::generate_file_name; +use crate::util::log_path_get_offset; +use crate::util::OffsetError; +use crate::ConfigOption; +use crate::index::lookup_entry; +use crate::index::Index; +use crate::index::OffsetPosition; + +/// size of the memory mapped isze +const INDEX_ENTRY_SIZE: Size = (size_of::() * 2) as Size; + +pub const EXTENSION: &str = "index"; + +unsafe impl Sync for MutLogIndex {} + +/// Segment index +/// +/// Maps offset into file position (Seek) +/// +/// It is backed by memory mapped file +/// +/// For active segment, index can grow +/// For non active, it is fixed + +// implement index file +pub struct MutLogIndex { + mmap: MemoryMappedMutFile, + file: AsyncFile, + bytes_delta: Size, + pos: Size, + option: ConfigOption, + ptr: *mut c_void, +} + +// const MEM_SIZE: u64 = 1024 * 1024 * 10; //10 MBs + +unsafe impl Send for MutLogIndex {} + +impl MutLogIndex { + unsafe_unpinned!(mmap: MemoryMappedMutFile); + unsafe_unpinned!(pos: Size); + unsafe_unpinned!(bytes_delta: Size); + + pub async fn create(base_offset: Offset, option: &ConfigOption) -> Result { + let index_file_path = generate_file_name(&option.base_dir, base_offset, EXTENSION); + + // create new memory file and + if option.index_max_bytes == 0 { + return Err(IoError::new(ErrorKind::InvalidInput, "index max bytes must be greater than 0")); + } + + debug!("creating index mm at: {:#?}", index_file_path); + let (mut m_file, file) = MemoryMappedMutFile::create( + &index_file_path, + option.index_max_bytes as u64 + ).await?; + + let b_slices: &[u8] = m_file.get_mut_mem_file(); + let ptr = unsafe { transmute::<*const u8, *mut c_void>(b_slices.as_ptr()) }; + + Ok(MutLogIndex { + mmap: m_file, + file, + pos: 0, + bytes_delta: 0, + option: option.to_owned(), + ptr, + }) + } + + pub async fn open(base_offset: Offset, option: &ConfigOption) -> Result { + let index_file_path = generate_file_name(&option.base_dir, base_offset, EXTENSION); + + // create new memory file and + if option.index_max_bytes == 0 { + return Err(IoError::new(ErrorKind::InvalidInput, "invalid API")); + } + + + // make sure it is log file + let (mut m_file, file) = MemoryMappedMutFile::create( + &index_file_path, + option.index_max_bytes as u64 + ).await?; + + let b_slices: &[u8] = m_file.get_mut_mem_file(); + let ptr = unsafe { transmute::<*const u8, *mut c_void>(b_slices.as_ptr()) }; + + trace!("opening mut index at: {:#?}, pos: {}", index_file_path,0); + + let mut index = MutLogIndex { + mmap: m_file, + file, + pos: 0, + bytes_delta: 0, + option: option.to_owned(), + ptr, + }; + + index.update_pos()?; + + Ok(index) + } + + // shrink index file to last know position + + pub async fn shrink(&mut self) -> Result<(), IoError> { + let len = (self.pos * INDEX_ENTRY_SIZE) as u64; + debug!("shrinking index: {:#?} to {} bytes", self.file, len); + let file = &mut self.file; + pin_mut!(file); + file.set_len(len).await + } + + + #[inline] + pub fn ptr(&self) -> *const (Size, Size) { + self.ptr as *const (Size, Size) + } + + pub fn mut_ptr(&mut self) -> *mut (Size, Size) { + self.ptr as *mut (Size, Size) + } + + + #[allow(dead_code)] + pub fn get_base_offset(&self) -> Result { + log_path_get_offset(&self.file.get_path()) + } + + /// recalculate the + fn update_pos(&mut self) -> Result<(),IoError> { + + let entries = self.entries(); + trace!("updating position with: {}",entries); + + for i in 0..entries { + if self[i as usize].position() == 0 { + trace!("set positioning: {}",i); + self.pos = i; + return Ok(()) + } + } + + Err(IoError::new(ErrorKind::InvalidData, "empty slot was not found")) + + } + +} + +impl Index for MutLogIndex { + + /// find offset indexes using relative offset + fn find_offset(&self, relative_offset: Size) -> Option<(Size,Size)> { + trace!("try to find relative offset: {} index: {}", relative_offset,self.pos); + + if self.pos == 0 { + trace!("no entries, returning none"); + return None + } + let (lower, _) = self.split_at(self.pos as usize); + lookup_entry(lower, relative_offset).map( |idx| self[idx]) + } + + fn len(&self) -> Size { + self.option.index_max_bytes + } + +} + +impl Deref for MutLogIndex { + type Target = [(Size, Size)]; + + #[inline] + fn deref(&self) -> &[(Size, Size)] { + unsafe { slice::from_raw_parts(self.ptr(), (self.len() / INDEX_ENTRY_SIZE) as usize) } + } +} + +impl DerefMut for MutLogIndex { + #[inline] + fn deref_mut(&mut self) -> &mut [(Size, Size)] { + unsafe { + slice::from_raw_parts_mut(self.mut_ptr(), (self.len() / INDEX_ENTRY_SIZE) as usize) + } + } +} + +/// Sink with item (offset, position, batch size) +impl Sink<(Size,Size,Size)> for MutLogIndex { + + type Error = IoError; + + fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) // mem file is always ready + } + + fn start_send(mut self: Pin<&mut Self>, item: (Size,Size,Size)) -> Result<(), Self::Error> { + + trace!("index start: {:#?}",item); + let batch_size = item.2; + + let bytes_delta = self.bytes_delta; + if bytes_delta < self.option.index_max_interval_bytes { + trace!("index writing skipped accumulated bytes {} less than max less interval: {}",bytes_delta,self.option.index_max_interval_bytes); + *self.as_mut().bytes_delta() = bytes_delta + batch_size; + trace!("index updated accumulated bytes: {}",self.bytes_delta); + return Ok(()); + } + + let pos = self.pos as usize; + let max_entries = self.entries(); + *self.as_mut().pos() = (pos + 1) as Size; + *self.as_mut().bytes_delta() = 0; + let this = unsafe { Pin::get_unchecked_mut(self) }; + + if pos < max_entries as usize { + this[pos] = (item.0,item.1).to_be(); + trace!("index successfully written: {:#?} at: {}", item,pos); + } else { + error!("index position: {} is greater than max entries: {}, ignorring",pos,max_entries); + } + + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let ft = self.mmap().flush_ft(); + pin_mut!(ft); + ft.poll(cx) + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } +} + + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use std::fs::File; + use std::io::Error as IoError; + use std::io::Read; + + use future_helper::test_async; + + use super::MutLogIndex; + use crate::index::Index; + use crate::fixture::default_option; + use crate::fixture::ensure_clean_file; + use crate::index::OffsetPosition; + + const TEST_FILE: &str = "00000000000000000121.index"; + + + + #[test_async] + async fn test_index_write() -> Result<(), IoError> { + let option = default_option(50); + let test_file = option.base_dir.join(TEST_FILE); + ensure_clean_file(&test_file); + + let mut index_sink = MutLogIndex::create(121, &option).await?; + + index_sink.send((5, 200,70)).await?; // this will be ignored + index_sink.send((10, 100,70)).await?; // this will be written since batch size 70 is greater than 50 + + assert_eq!(index_sink.pos,1); + + + let mut f = File::open(&test_file)?; + let mut buffer = vec![0; 32]; + f.read(&mut buffer)?; + + // ensure offset,position are stored in the big endian format + assert_eq!(buffer[0], 0); + assert_eq!(buffer[1], 0); + assert_eq!(buffer[2], 0); + assert_eq!(buffer[3], 10); + assert_eq!(buffer[4], 0); + assert_eq!(buffer[5], 0); + assert_eq!(buffer[6], 0); + assert_eq!(buffer[7], 100); + + drop(index_sink); + + // open same file + + let index_sink = MutLogIndex::open(121, &option).await?; + assert_eq!(index_sink.pos,1); + + + Ok(()) + } + + const TEST_FILE2: &str = "00000000000000000122.index"; + + #[test_async] + async fn test_index_shrink() -> Result<(), IoError> { + let option = default_option(0); + let test_file = option.base_dir.join(TEST_FILE2); + ensure_clean_file(&test_file); + + let mut index_sink = MutLogIndex::create(122, &option).await?; + + index_sink.send((5, 16,70)).await?; + + index_sink.shrink().await?; + + let f = File::open(&test_file)?; + let m = f.metadata()?; + assert_eq!(m.len(), 8); + + Ok(()) + } + + + const TEST_FILE3: &str = "00000000000000000123.index"; + + #[test_async] + async fn test_mut_index_findoffset() -> Result<(), IoError> { + let option = default_option(0); + let test_file = option.base_dir.join(TEST_FILE3); + ensure_clean_file(&test_file); + + let mut index_sink = MutLogIndex::create(123, &option).await?; + + index_sink.send((100, 16,70)).await?; + index_sink.send((500, 200,70)).await?; + index_sink.send((800, 100,70)).await?; + index_sink.send((1000, 200,70)).await?; + + assert_eq!(index_sink.find_offset(600).map(|p| p.to_be()), Some((500,200))); + assert_eq!(index_sink.find_offset(2000).map(|p| p.to_be()), Some((1000,200))); + Ok(()) + } + +} diff --git a/storage/src/mut_records.rs b/storage/src/mut_records.rs new file mode 100644 index 0000000000..7a542b9cb7 --- /dev/null +++ b/storage/src/mut_records.rs @@ -0,0 +1,218 @@ +use std::pin::Pin; +use std::io::Error as IoError; +use std::task::Context; +use std::task::Poll; + +use futures::sink::Sink; +use futures::ready; +use log::debug; +use log::trace; +use pin_utils::pin_mut; +use pin_utils::unsafe_pinned; + +use future_aio::fs::FileSink; +use future_aio::fs::AsyncFile; +use future_aio::fs::AsyncFileSlice; +use future_aio::fs::FileSinkError; +use future_aio::fs::FileSinkOption; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::Offset; +use kf_protocol::api::Size; +use kf_protocol::Encoder; + +use crate::util::generate_file_name; +use crate::validator::validate; +use crate::validator::LogValidationError; +use crate::ConfigOption; +use crate::StorageError; +use crate::records::FileRecords; + +pub const MESSAGE_LOG_EXTENSION: &'static str = "log"; + + +/// Can append new batch to file +pub struct MutFileRecords { + base_offset: Offset, + item_last_offset_delta: Size, + f_sink: FileSink>, +} + +impl MutFileRecords { + unsafe_pinned!(f_sink: FileSink>); + + pub async fn create( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + let sink_option = FileSinkOption { + max_len: Some(option.segment_max_bytes as u64), + }; + let log_path = generate_file_name(&option.base_dir, base_offset, MESSAGE_LOG_EXTENSION); + debug!("creating log at: {}", log_path.display()); + let f_sink = FileSink::open_append(&log_path, sink_option).await?; + Ok(MutFileRecords { + base_offset, + f_sink, + item_last_offset_delta: 0, + }) + } + + pub async fn open( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + let log_path = generate_file_name(&option.base_dir, base_offset, MESSAGE_LOG_EXTENSION); + trace!("opening commit log at: {}", log_path.display()); + + let sink_option = FileSinkOption { + max_len: Some(option.segment_max_bytes as u64), + }; + + let f_sink = FileSink::open_append(log_path, sink_option).await?; + Ok(MutFileRecords { + base_offset, + f_sink, + item_last_offset_delta: 0 + }) + } + + pub fn get_base_offset(&self) -> Offset { + self.base_offset + } + + pub async fn validate(&mut self) -> Result { + validate(self.f_sink.get_mut_writer()).await + } + + pub fn get_pos(&self) -> Size { + self.f_sink.get_current_len() as Size + } + + pub fn get_pending_batch_len(&self) -> Size { + self.f_sink.get_pending_len() as Size + } + + + pub fn get_item_last_offset_delta(&self) -> Size { + self.item_last_offset_delta + } + + + + +} + +impl FileRecords for MutFileRecords { + + fn get_base_offset(&self) -> Offset { + self.base_offset + } + + fn get_file(&self) -> &AsyncFile { + &self.f_sink.get_writer() + } + + + fn as_file_slice(&self, start: Size) -> Result { + self.f_sink.slice_from(start as u64, self.f_sink.get_current_len() - start as u64) + } + + + fn as_file_slice_from_to(&self, start: Size, len: Size) -> Result { + self.f_sink.slice_from(start as u64, len as u64) + } + +} + +impl Unpin for MutFileRecords {} + +impl Sink for MutFileRecords { + type Error = StorageError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.f_sink().poll_ready(cx).map_err(|err| err.into()) + } + + fn start_send(mut self: Pin<&mut Self>, item: DefaultBatch) -> Result<(), Self::Error> { + debug!("writing batch {:#?}", item.get_header()); + self.item_last_offset_delta = item.get_last_offset_delta(); + let mut buffer: Vec = vec![]; + item.encode(&mut buffer,0)?; + let sink = &mut self.as_mut().f_sink(); + pin_mut!(sink); + trace!("writing {} bytes", buffer.len()); + match sink.start_send(buffer) { + Ok(_) => Ok(()), + Err(err) => match err { + FileSinkError::MaxLenReached => Err(StorageError::NoRoom(item)), + _ => Err(err.into()), + }, + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let f_sink = self.as_mut().f_sink(); + let flush_poll: Poll> = + f_sink.poll_flush(cx).map_err(|err| err.into()); + ready!(flush_poll)?; + debug!("flushed log with pos: {}", self.get_pos()); + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.f_sink().poll_close(cx).map_err(|err| err.into()) + } +} + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use std::env::temp_dir; + use std::io::Cursor; + + use future_helper::test_async; + use kf_protocol::api::DefaultBatch; + use kf_protocol::Decoder; + + use super::MutFileRecords; + use super::StorageError; + use crate::fixture::create_batch; + use crate::fixture::ensure_clean_file; + use crate::fixture::read_bytes_from_file; + use crate::ConfigOption; + + const TEST_FILE_NAME: &str = "00000000000000000100.log"; // for offset 100 + + #[test_async] + async fn test_sink_msg() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME); + ensure_clean_file(&test_file); + + let options = ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + }; + let mut msg_sink = MutFileRecords::create(100, &options).await?; + + msg_sink.send(create_batch()).await?; + + let bytes = read_bytes_from_file(&test_file)?; + assert_eq!(bytes.len(), 79, "sould be 70 bytes"); + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + assert_eq!(batch.get_header().magic, 2, "check magic"); + assert_eq!(batch.records.len(), 2); + + msg_sink.send(create_batch()).await?; + let bytes = read_bytes_from_file(&test_file)?; + assert_eq!(bytes.len(), 158, "sould be 158 bytes"); + + let old_msg_sink = MutFileRecords::open(100, &options).await?; + assert_eq!(old_msg_sink.get_base_offset(), 100); + + Ok(()) + } + +} diff --git a/storage/src/range_map.rs b/storage/src/range_map.rs new file mode 100644 index 0000000000..7ba56f98a0 --- /dev/null +++ b/storage/src/range_map.rs @@ -0,0 +1,240 @@ +use std::cmp::max; +use std::cmp::min; +use std::collections::BTreeMap; +use std::ops::Bound::Excluded; +use std::ops::Bound::Included; +use std::ffi::OsStr; + +use log::debug; +use log::trace; +use log::error; + +use kf_protocol::api::Offset; + +use crate::segment::ReadSegment; +use crate::StorageError; +use crate::ConfigOption; +use crate::util::log_path_get_offset; + +#[derive(Debug)] +pub(crate) struct SegmentList { + segments: BTreeMap, + max_base_offset: Offset, // maximum number of offset for all segments + min_base_offset: Offset +} + +impl SegmentList { + + pub fn new() -> Self { + SegmentList { + segments: BTreeMap::new(), + max_base_offset: 0, + min_base_offset: -1 + } + } + + // load segments + pub async fn from_dir(option: &ConfigOption) -> Result<(SegmentList,Option),StorageError> + { + + let dirs = option.base_dir.read_dir()?; + debug!("reading segments at: {:#?}",dirs); + let files: Vec<_> = dirs.into_iter().filter_map( |entry| entry.ok()).collect(); + + let mut offsets: Vec = vec![]; + for entry in files { + if let Ok(metadata) = entry.metadata() { + + if metadata.is_file() { + let path = entry.path(); + trace!("scanning file: {:#?}",path); + + if path.extension() == Some(OsStr::new("log")) { + + if let Ok(offset) = log_path_get_offset(&path) { + trace!("detected valid log: {}",offset); + offsets.push(offset); + /* + match Segment::open(offset,option).await { + Ok(segment) => segments.add_segment(segment), + Err(err) => error!("error opening segment: {:#?}",err) + } + } else { + debug!("not log, skipping: {:#?}",path); + */ + } + } + } + } + } + + offsets.sort(); + + let last_offset = offsets.pop(); + let mut segments = Self::new(); + + for offset in offsets { + match ReadSegment::open_for_read(offset,option).await { + Ok(segment) => segments.add_segment(segment), + Err(err) => error!("error opening segment: {:#?}",err) + } + } + + Ok((segments,last_offset)) + + } + + #[allow(dead_code)] + pub fn len(&self) -> usize { + self.segments.len() + } + + #[allow(dead_code)] + pub fn max_offset(&self) -> Offset { + self.max_base_offset + } + + pub fn min_offset(&self) -> Offset { + self.min_base_offset + } + + pub fn add_segment(&mut self, segment: ReadSegment) { + let base_offset = segment.get_base_offset(); + debug!("inserting segment base: {}", base_offset); + self.max_base_offset = max(self.max_base_offset,base_offset); + self.min_base_offset = if self.min_base_offset < 0 { base_offset} else { min(self.min_base_offset,base_offset) }; + &self.segments.insert(segment.get_base_offset(), segment); + } + + #[allow(dead_code)] + pub fn get_segment(&self, offset: Offset) -> Option<&ReadSegment> { + self.segments.get(&offset) + } + + + #[allow(dead_code)] + pub fn find_segment(&self, offset: Offset) -> Option<(&Offset, &ReadSegment)> { + (&self.segments) + .range((Excluded(offset - self.max_base_offset), Included(offset))) + .next_back() + } +} + + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + use std::path::PathBuf; + + use futures::sink::SinkExt; + + use future_helper::test_async; + use kf_protocol::api::Offset; + + use crate::fixture::ensure_new_dir; + use super::SegmentList; + use crate::StorageError; + use crate::segment::MutableSegment; + use crate::segment::ReadSegment; + use crate::ConfigOption; + use crate::fixture::create_batch; + + const TEST_SEGMENT_DIR: &str = "segmentlist-test"; + + async fn create_segment(option: &ConfigOption,start: Offset, _offsets: Offset) -> Result { + let mut mut_segment = MutableSegment::create(start,option).await?; + mut_segment.send(create_batch()).await?; +// mut_segment.set_end_offset(offsets); + let segment = mut_segment.convert_to_segment().await?; + Ok(segment) + } + + fn default_option(base_dir: PathBuf) -> ConfigOption { + ConfigOption { + segment_max_bytes: 100, + base_dir, + index_max_bytes: 1000, + index_max_interval_bytes: 0, + ..Default::default() + } + } + + #[test_async] + async fn test_find_segment() -> Result<(),StorageError> { + + let rep_dir = temp_dir().join(TEST_SEGMENT_DIR); + ensure_new_dir(&rep_dir)?; + + let mut list = SegmentList::new(); + + let option = default_option(rep_dir); + + list.add_segment(create_segment(&option,0, 500).await?); + list.add_segment(create_segment(&option,500, 2000).await?); + list.add_segment(create_segment(&option,2000, 1000).await?); + list.add_segment(create_segment(&option,3000, 2000).await?); + + let index = list.find_segment(1500); + + assert!(index.is_some()); + let (pos, _) = index.unwrap(); + assert_eq!(*pos, 500); + + Ok(()) + + } + + + const TEST_READ_DIR: &str = "segmentlist-read-many"; + + #[test_async] + async fn test_segment_read_many() -> Result<(),StorageError> { + + let rep_dir = temp_dir().join(TEST_READ_DIR); + ensure_new_dir(&rep_dir)?; + + let option = default_option(rep_dir); + + + create_segment(&option,10, 500).await?; + create_segment(&option,500, 2000).await?; + create_segment(&option,2000, 1000).await?; + create_segment(&option,3000, 2000).await?; + + let (segments,last_offset_res) = SegmentList::from_dir(&option).await?; + + assert_eq!(segments.len(),3); // 0,500,2000 + assert_eq!(segments.max_offset(),2000); + assert_eq!(segments.min_offset(),10); + let segment1 = segments.get_segment(10).expect("should have segment at 0 "); + assert_eq!(segment1.get_base_offset(),10); + let last_offset = last_offset_res.expect("last segment should be there"); + assert_eq!(last_offset,3000); + let segment2 = segments.get_segment(500).expect("should have segment at 500"); + assert_eq!(segment2.get_base_offset(),500); + + Ok(()) + } + + + const TEST_EMPTY_DIR: &str = "segmentlist-read-empty"; + + + #[test_async] + async fn test_segment_read_empty() -> Result<(),StorageError> { + + let rep_dir = temp_dir().join(TEST_EMPTY_DIR); + ensure_new_dir(&rep_dir)?; + + let option = default_option(rep_dir); + + let (segments,last_segment) = SegmentList::from_dir(&option).await?; + + assert_eq!(segments.len(),0); // 0,500,2000 + assert!(last_segment.is_none()); + Ok(()) + + } + +} diff --git a/storage/src/records.rs b/storage/src/records.rs new file mode 100644 index 0000000000..3b7c955bf9 --- /dev/null +++ b/storage/src/records.rs @@ -0,0 +1,101 @@ + +use std::io::Error as IoError; +use std::io::ErrorKind; + +use log::debug; +use future_aio::fs::AsyncFile; +use future_aio::fs::AsyncFileSlice; + +use kf_protocol::api::Offset; +use kf_protocol::api::Size; + + +use crate::util::generate_file_name; +use crate::validator::validate; +use crate::validator::LogValidationError; +use crate::ConfigOption; +use crate::StorageError; + +pub(crate) const MESSAGE_LOG_EXTENSION: &'static str = "log"; + +/// Records stored in the file +pub(crate) trait FileRecords { + + fn get_base_offset(&self) -> Offset; + + fn get_file(&self) -> &AsyncFile; + + /// as file slice from position + fn as_file_slice(&self, start: Size) -> Result; + + fn as_file_slice_from_to(&self, start: Size, len: Size) -> Result; + +} + +pub struct FileRecordsSlice { + base_offset: Offset, + file: AsyncFile, + len: u64 +} + +impl FileRecordsSlice { + + pub async fn open( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + let log_path = generate_file_name(&option.base_dir, base_offset, MESSAGE_LOG_EXTENSION); + debug!("opening commit log at: {}", log_path.display()); + + let file = AsyncFile::open(log_path).await?; + + let metadata = file.metadata().await?; + let len = metadata.len(); + + Ok(FileRecordsSlice { + base_offset, + file, + len + }) + } + + pub fn get_base_offset(&self) -> Offset { + self.base_offset + } + + #[allow(dead_code)] + pub async fn validate(&mut self) -> Result { + validate(&mut self.file).await + } +} + +impl FileRecords for FileRecordsSlice { + + fn get_base_offset(&self) -> Offset { + self.base_offset + } + + fn get_file(&self) -> &AsyncFile { + &self.file + } + + + fn as_file_slice(&self, start_pos: Size) -> Result { + Ok(self.file.raw_slice(start_pos as u64 ,self.len - start_pos as u64)) + } + + fn as_file_slice_from_to(&self, start: Size, len: Size) -> Result { + if len as u64 > self.len { + Err(IoError::new(ErrorKind::UnexpectedEof,"len is smaller than actual len")) + } else { + Ok(self.file.raw_slice(start as u64 ,len as u64)) + } + + } + + +} + +// message log doesn't have circular structure +impl Unpin for FileRecordsSlice {} + diff --git a/storage/src/replica.rs b/storage/src/replica.rs new file mode 100644 index 0000000000..fd68ed249f --- /dev/null +++ b/storage/src/replica.rs @@ -0,0 +1,729 @@ +use std::io::Error as IoError; +use std::mem; +use std::pin::Pin; +use std::task::Context; +use std::task::Poll; + +use futures::future::Future; +use futures::Sink; +use futures::SinkExt; +use log::debug; +use log::trace; +use log::error; +use pin_utils::pin_mut; +use pin_utils::unsafe_pinned; +use pin_utils::unsafe_unpinned; + +use future_aio::fs::create_dir_all; +use kf_protocol::api::ErrorCode; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::Offset; +use kf_protocol::api::Size; +use kf_protocol::api::DefaultRecords; +use kf_protocol::api::Isolation; + + +use crate::checkpoint::CheckPoint; +use crate::range_map::SegmentList; +use crate::segment::MutableSegment; +use crate::ConfigOption; +use crate::SegmentSlice; +use crate::StorageError; +use crate::SlicePartitionResponse; +use crate::ReplicaStorage; + +/// alway evaluate expression and return +/// this is usesful for debugging value +macro_rules! f_trace { + ($target:expr,$value:expr) => {{ + let res = $value; + trace!($target, res); + res + }}; +} + +// Based on Sink Buffer: +// it accumulates item into holding state until we clear either segment creation or active sgement +#[derive(Debug)] +enum ReplicateState { + Active, + Rollover(DefaultBatch, Offset), +} + +impl ReplicateState { + fn roll_over_item(self) -> DefaultBatch { + match self { + ReplicateState::Rollover(item, _) => item, + _ => panic!("should only be called for rollover state"), + } + } +} + +/// Sink for Replica. Replica is lowest public API for commit log +/// Internally it is storeed as list of segments. Each segment contains finite sets of record batches. +/// +#[derive(Debug)] +pub struct FileReplica { + #[allow(dead_code)] + last_base_offset: Offset, + #[allow(dead_code)] + partition: Size, + option: ConfigOption, + active_segment: MutableSegment, + state: ReplicateState, + prev_segments: SegmentList, + commit_checkpoint: CheckPoint, +} + +impl Unpin for FileReplica {} + +impl ReplicaStorage for FileReplica { + + + fn get_hw(&self) -> Offset { + *self.commit_checkpoint.get_offset() + } + + + /// offset mark that beggining of uncommitted + fn get_leo(&self) -> Offset { + self.active_segment.get_end_offset() + } + +} + +impl FileReplica { + unsafe_pinned!(active_segment: MutableSegment); + unsafe_unpinned!(prev_segments: SegmentList); + + /// Construct a new replica with specified topic and partition. + /// It can start with arbitrary offset. However, for normal replica, + /// it is usually starts with 0. + /// + /// Replica is minimum unit of logs that will can be replicated. + /// It is a unique pair of (topic,partiton) + /// + /// Replica will use base directory to create it's own directory. + /// Directory name will encode unique replica id which is combination of topic + /// and partition. + /// + /// If there is existing directory then it will load existing logs. + /// The logs will be validated to ensure it's safe to use it. + /// It is possible logs can't be used because they may be corrupted. + pub async fn create( + topic: S, + partition: Size, + base_offset: Offset, + option: &ConfigOption, + ) -> Result + where + S: AsRef + Send + 'static, + { + let replica_dir = option.base_dir.join(replica_dir_name(topic, partition)); + + debug!("creating rep dir: {}", replica_dir.display()); + create_dir_all(&replica_dir).await?; // ensure dir_name exits + + let mut rep_option = option.clone(); + rep_option.base_dir = replica_dir; + // create acive segment + + let (segments, last_offset_res) = SegmentList::from_dir(&rep_option).await?; + + let active_segment = if let Some(last_offset) = last_offset_res { + trace!("last segment found, validating offsets: {}", last_offset); + let mut last_segment = + MutableSegment::open_for_write(last_offset, &rep_option).await?; + last_segment.validate().await?; + trace!( + "segment validated with last offset: {}", + last_segment.get_end_offset() + ); + last_segment + } else { + debug!("no segment found, creating new one"); + MutableSegment::create(base_offset, &rep_option).await? + }; + + let last_base_offset = active_segment.get_base_offset(); + + let commit_checkpoint: CheckPoint = CheckPoint::create( + &rep_option, + "replication.chk", + last_base_offset + ).await?; + + Ok(FileReplica { + option: rep_option, + last_base_offset, + partition, + active_segment, + state: ReplicateState::Active, + prev_segments: segments, + commit_checkpoint, + }) + } + + /// update committed offset (highwatermark) + pub async fn update_high_watermark(&mut self, offset: Offset) -> Result<(), IoError> { + let old_offset = self.get_hw(); + if old_offset == offset { + trace!("new high watermark: {} is same as existing one, skipping",offset); + Ok(()) + } else { + trace!("updating to new highwatermark: {} old: {}",old_offset,offset); + self.commit_checkpoint.write(offset).await + } + } + + /// update high watermark to + pub async fn update_high_watermark_to_end(&mut self) -> Result<(),IoError>{ + + self.update_high_watermark(self.get_leo()).await + } + + + /// earliest offset + pub fn get_log_start_offset(&self) -> Offset { + let min_base_offset = self.prev_segments.min_offset(); + if min_base_offset < 0 { + self.active_segment.get_base_offset() + } else { + min_base_offset + } + } + + /// find the segment that contains offsets + /// segment could be active segment which can be written + /// or read only segment. + pub(crate) fn find_segment(&self, offset: Offset) -> Option { + + trace!("finding segment for: {}",offset); + if offset >= self.active_segment.get_base_offset() { + trace!("active segment found for: {}", offset); + Some(self.active_segment.to_segment_slice()) + } else { + trace!("offset is before active, searching prev segment"); + self.prev_segments + .find_segment(offset) + .map(|(_, segment)| segment.to_segment_slice()) + } + } + + /// write records to this replica, update highwatermark if required + pub async fn send_records(&mut self, records: DefaultRecords, update_highwatermark: bool) -> Result<(),StorageError>{ + + for batch in records.batches { + self.send(batch).await?; + } + + if update_highwatermark { + self.update_high_watermark_to_end().await?; + } + + Ok(()) + } + + + /// read uncommitted records( between highwatermark and end offset) to file response + pub async fn read_uncommitted_records

(&self, response: &mut P) where P: SlicePartitionResponse{ + self.read_records(self.get_hw(),None,response).await + } + + /// read committed records + pub async fn read_committed_records

(&self, start_offset: Offset, response: &mut P) where P: SlicePartitionResponse { + self.read_records(start_offset,Some(self.get_hw()),response).await + } + + + pub async fn read_records_with_isolation

( + &self, + offset: Offset, + isolation: Isolation, + partition_response: &mut P, + ) where + P: SlicePartitionResponse, + { + match isolation { + Isolation::ReadCommitted => { + + self + .read_committed_records(offset, partition_response) + .await + } + Isolation::ReadUncommitted => { + self + .read_records(offset, None, partition_response) + .await + } + } + } + + /// read records + /// * `start_offset`: start offsets + /// * `max_offset`: max offset (exclusive) + /// * `responsive`: output + pub async fn read_records

(&self,start_offset: Offset,max_offset: Option,response: &mut P) where P: SlicePartitionResponse { + + trace!("read records to response from: {} max: {:#?}",start_offset,max_offset); + + let highwatermark = self.get_hw(); + response.set_hw(highwatermark); + response.set_last_stable_offset(highwatermark); + response.set_log_start_offset(self.get_log_start_offset()); + + match self.find_segment(start_offset) { + Some(segment) => { + + let slice = + match segment { + SegmentSlice::MutableSegment(segment) => { + // optimization + if start_offset == self.get_leo() { + trace!("start offset is same as end offset, skipping"); + return + } else { + debug!("active segment with baseoffset: {} found for offset: {}",segment.get_base_offset(),start_offset); + segment.records_slice(start_offset,max_offset).await + } + + }, + SegmentSlice::Segment(segment) => { + debug!("read segment with baseoffset: {} found for offset: {}",segment.get_base_offset(),start_offset); + segment.records_slice(start_offset,max_offset).await + } + }; + + + match slice { + + Ok(slice) => { + match slice { + Some(slice) => { + debug!("retrieved slice: {:#?}",slice); + response.set_slice(slice); + }, + None => { + debug!("records not found for: {}",start_offset); + response.set_error_code(ErrorCode::OffsetOutOfRange); + + } + } + }, + Err(err) => { + response.set_error_code(ErrorCode::UnknownServerError); + error!("error fetch: {:#?}",err); + } + } + + }, + None => { + response.set_error_code(ErrorCode::OffsetOutOfRange); + debug!("segment not found for offset: {}",start_offset); + } + } + } + + +} + +impl Sink for FileReplica { + + type Error = StorageError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + debug!("start polling ready"); + match self.as_ref().state { + ReplicateState::Active => { + trace!("in active state. polling segment for ready status"); + f_trace!( + "segment status: {:#?}", + self.active_segment().poll_ready(cx) + ) + } + _ => panic!("should not poll when in roll over state"), + } + } + + fn start_send(mut self: Pin<&mut Self>, item: DefaultBatch) -> Result<(), Self::Error> { + debug!("start_send"); + match self.as_ref().state { + ReplicateState::Active => { + let active_segment = &mut self.as_mut().active_segment; + let offset = active_segment.get_end_offset(); + match self.as_mut().active_segment().start_send(item) { + Err(err) => match err { + StorageError::NoRoom(item) => { + mem::replace(&mut self.state, ReplicateState::Rollover(item, offset)); + Ok(()) + } + _ => Err(err), + }, + _ => Ok(()), + } + } + _ => panic!("do not send start to roll over"), + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + debug!("flushing"); + // get unpinned to self, you can't do multiple mut self + match self.as_ref().state { + ReplicateState::Active => self.as_mut().active_segment().poll_flush(cx), + ReplicateState::Rollover(_, last_offset) => { + let poll_resut = { + trace!("segment creation complete. switching as active"); + match self.as_mut().active_segment().poll_roll_over(cx) { + Poll::Ready(Ok(_)) => { + let roll_over_ft = async { + trace!("segment creation complete. switching as active"); + let option = &self.as_ref().option; + let new_segment = + MutableSegment::create(last_offset, option).await?; + let old_mut_segment = + mem::replace(&mut self.as_mut().active_segment, new_segment); + let old_segment = old_mut_segment.as_segment().await?; + self.as_mut().prev_segments().add_segment(old_segment); + let roll_over = + mem::replace(&mut self.state, ReplicateState::Active); + Ok(roll_over.roll_over_item()) as Result + }; + pin_mut!(roll_over_ft); + roll_over_ft.poll(cx) + } + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err.into())), + } + }; + + match poll_resut { + Poll::Ready(Ok(batch)) => { + let pin_active_segment = self.as_mut().active_segment(); + match pin_active_segment.start_send(batch) { + Ok(_) => { + trace!("now flushing"); + let pin_active_segment = self.as_mut().active_segment(); + pin_active_segment.poll_flush(cx) + } + Err(err) => return Poll::Ready(Err(err)), + } + } + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(err)) => return Poll::Ready(Err(err)), + } + } + } + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + debug!("closing"); + match self.state { + ReplicateState::Active => self.active_segment().poll_close(cx), + _ => panic!("do not close on roll over"), + } + } +} + +// generate replication folder name +fn replica_dir_name>(topic_name: S, partition_index: Size) -> String { + format!("{}-{}", topic_name.as_ref(), partition_index) +} + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use log::debug; + use std::env::temp_dir; + use std::fs; + use std::fs::metadata; + use std::io::Cursor; + + use future_helper::test_async; + use kf_protocol::api::DefaultBatch; + use kf_protocol::api::Offset; + use kf_protocol::Decoder; + use kf_protocol::Encoder; + use kf_protocol::api::ErrorCode; + use kf_socket::FilePartitionResponse; + use kf_protocol::api::DefaultRecords; + + use super::FileReplica; + use crate::fixture::create_batch; + use crate::fixture::ensure_clean_dir; + use crate::fixture::read_bytes_from_file; + use crate::ConfigOption; + use crate::StorageError; + use crate::SegmentSlice; + use crate::ReplicaStorage; + + + const TEST_SEG_NAME: &str = "00000000000000000020.log"; + const TEST_SE2_NAME: &str = "00000000000000000022.log"; + const TEST_SEG_IDX: &str = "00000000000000000020.index"; + const TEST_SEG2_IDX: &str = "00000000000000000022.index"; + const START_OFFSET: Offset = 20; + + fn base_option(dir: &str) -> ConfigOption { + + let base_dir = temp_dir().join(dir); + ensure_clean_dir(&base_dir); + ConfigOption { + segment_max_bytes: 10000, + base_dir, + index_max_interval_bytes: 1000, + index_max_bytes: 1000, + ..Default::default() + } + } + + fn rollover_option(dir: &str) -> ConfigOption { + + let base_dir = temp_dir().join(dir); + ensure_clean_dir(&base_dir); + ConfigOption { + segment_max_bytes: 100, + base_dir, + index_max_bytes: 1000, + index_max_interval_bytes: 0, + ..Default::default() + } + } + + #[test_async] + async fn test_replica_simple() -> Result<(), StorageError> { + let option = base_option("testsimple"); + let mut replica = FileReplica::create("test", 0, START_OFFSET, &option).await.expect("test replica"); + + assert_eq!(replica.get_log_start_offset(),START_OFFSET); + replica.send(create_batch()).await?; + replica.update_high_watermark(10).await?; + + let test_file = option.base_dir.join("test-0").join(TEST_SEG_NAME); + debug!("using test file: {:#?}",test_file); + let bytes = read_bytes_from_file(&test_file)?; + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + assert_eq!(batch.get_header().magic, 2, "check magic"); + assert_eq!(batch.get_base_offset(), START_OFFSET); + assert_eq!(batch.get_header().last_offset_delta, 1); + assert_eq!(batch.records.len(), 2); + + // find segment + let segment = replica.find_segment(0); + assert!(segment.is_none()); + + let segment = replica.find_segment(20); + match segment.unwrap() { + SegmentSlice::MutableSegment(_) => assert!(true,"should be active segment"), + SegmentSlice::Segment(_) => assert!(false,"cannot be inactive") + } + + + let segment = replica.find_segment(21); + assert!(segment.is_some()); + match segment.unwrap() { + SegmentSlice::MutableSegment(_) => assert!(true,"should be active segment"), + SegmentSlice::Segment(_) => assert!(false,"cannot be inactive") + } + + replica.send(create_batch()).await?; + + let segment = replica.find_segment(30); + assert!(segment.is_some()); + match segment.unwrap() { + SegmentSlice::MutableSegment(_) => assert!(true,"should be active segment"), + SegmentSlice::Segment(_) => assert!(false,"cannot be inactive") + } + + + Ok(()) + } + + const TEST_UNCOMMIT_DIR: &str = "testuncommitted"; + + #[test_async] + async fn test_uncommited_fetch() -> Result<(), StorageError> { + + let option = base_option(TEST_UNCOMMIT_DIR); + + let mut replica = FileReplica::create("test", 0, 0, &option).await.expect("test replica"); + + let mut empty_response = FilePartitionResponse::default(); + replica.read_uncommitted_records(&mut empty_response).await; + assert_eq!(empty_response.records.len(),0); + assert_eq!(empty_response.error_code,ErrorCode::None); + + + let batch = create_batch(); + let batch_len = batch.write_size(0); + debug!("batch len: {}",batch_len); + replica.send(batch).await?; + + let mut partition_response = FilePartitionResponse::default(); + replica.read_uncommitted_records(&mut partition_response).await; + assert_eq!(partition_response.records.len(),batch_len); + + replica.update_high_watermark(2).await?; // first batch + assert_eq!(replica.get_hw(),2); + + let batch = create_batch(); + let batch_len = batch.write_size(0); + replica.send(batch).await?; + + let mut partition_response = FilePartitionResponse::default(); + replica.read_uncommitted_records(&mut partition_response).await; + debug!("partiton response: {:#?}",partition_response); + assert_eq!(partition_response.records.len(),batch_len); + + replica.send(create_batch()).await?; + let mut partition_response = FilePartitionResponse::default(); + replica.read_uncommitted_records(&mut partition_response).await; + assert_eq!(partition_response.records.len(),batch_len*2); + + Ok(()) + } + + const TEST_OFFSET_DIR: &str = "testoffset"; + + #[test_async] + async fn test_replica_end_offset() -> Result<(), StorageError> { + + let option = base_option(TEST_OFFSET_DIR); + + let mut rep_sink = FileReplica::create("test", 0, START_OFFSET, &option).await.expect("test replica"); + rep_sink.send(create_batch()).await?; + rep_sink.send(create_batch()).await?; + drop(rep_sink); + + // open replica + let replica2 = FileReplica::create("test", 0, START_OFFSET, &option).await.expect("test replica"); + assert_eq!(replica2.get_leo(), START_OFFSET + 4); + + Ok(()) + } + + const TEST_REPLICA_DIR: &str = "test_replica"; + + // you can show log by: RUST_LOG=commit_log=debug cargo test roll_over + + #[test_async] + async fn test_rep_log_roll_over() -> Result<(), StorageError> { + + let option = rollover_option(TEST_REPLICA_DIR); + + let mut replica = + FileReplica::create("test", 1, START_OFFSET, &option).await.expect("create rep"); + + // first batch + debug!(">>>> sending first batch"); + let batches = create_batch(); + replica.send(batches).await?; + + // second batch + debug!(">>>> sending second batch. this should rollover"); + let batches = create_batch(); + replica.send(batches).await?; + debug!("finish sending next batch"); + + assert_eq!(replica.get_log_start_offset(),START_OFFSET); + let replica_dir = &option.base_dir.join("test-1"); + let dir_contents = fs::read_dir(&replica_dir)?; + assert_eq!(dir_contents.count(), 5, "should be 5 files"); + + + let seg2_file = replica_dir.join(TEST_SE2_NAME); + let bytes = read_bytes_from_file(&seg2_file)?; + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + assert_eq!(batch.get_header().magic, 2, "check magic"); + assert_eq!(batch.records.len(), 2); + assert_eq!(batch.get_base_offset(), 22); + + + let metadata_res = metadata(replica_dir.join(TEST_SEG2_IDX)); + assert!(metadata_res.is_ok()); + let metadata2 = metadata_res.unwrap(); + assert_eq!(metadata2.len(), 1000); + + + let seg1_metadata = metadata(replica_dir.join(TEST_SEG_IDX))?; + assert_eq!(seg1_metadata.len(), 8); + + + Ok(()) + } + + + const TEST_COMMIT_DIR: &str = "testcommit"; + + #[test_async] + async fn test_replica_commit() -> Result<(), StorageError> { + + let option = base_option(TEST_COMMIT_DIR); + let mut replica = FileReplica::create("test", 0, 0, &option).await.expect("test replica"); + + let records = DefaultRecords::default() + .add(create_batch()); + + replica.send_records(records,true).await?; + + // record contains 2 batch + assert_eq!(replica.get_hw(),2); + + drop(replica); + + // restore replica + let replica = FileReplica::create("test", 0, 0, &option).await.expect("test replica"); + assert_eq!(replica.get_hw(),2); + + Ok(()) + } + + + + const TEST_COMMIT_FETCH_DIR: &str = "testcommitt_fetch"; + + /// test fetch only committed records + #[test_async] + async fn test_committed_fetch() -> Result<(), StorageError> { + + let option = base_option(TEST_COMMIT_FETCH_DIR); + + let mut replica = FileReplica::create("test", 0, 0, &option).await.expect("test replica"); + + let batch = create_batch(); + let batch_len = batch.write_size(0); + replica.send(batch).await.expect("writing records"); + + let mut partition_response = FilePartitionResponse::default(); + replica.read_committed_records(0,&mut partition_response).await; + debug!("partiton response: {:#?}",partition_response); + assert_eq!(partition_response.records.len(),0); + + replica.update_high_watermark_to_end().await.expect("update highwatermark"); + + debug!("replica end: {} high: {}",replica.get_leo(),replica.get_hw()); + + let mut partition_response = FilePartitionResponse::default(); + replica.read_committed_records(0,&mut partition_response).await; + debug!("partiton response: {:#?}",partition_response); + assert_eq!(partition_response.records.len(),batch_len); + + // write 1 more batch + let batch = create_batch(); + replica.send(batch).await.expect("writing 2nd batch"); + debug!("2nd batch: replica end: {} high: {}",replica.get_leo(),replica.get_hw()); + + let mut partition_response = FilePartitionResponse::default(); + replica.read_committed_records(0,&mut partition_response).await; + debug!("partiton response: {:#?}",partition_response); + // should return same records as 1 batch since we didn't commit 2nd batch + assert_eq!(partition_response.records.len(),batch_len); + + Ok(()) + } + + +} diff --git a/storage/src/segment.rs b/storage/src/segment.rs new file mode 100644 index 0000000000..4269b833eb --- /dev/null +++ b/storage/src/segment.rs @@ -0,0 +1,593 @@ +use std::fmt; +use std::io::Error as IoError; +use std::pin::Pin; +use std::ops::Deref; +use std::task::Context; +use std::task::Poll; + +use futures::sink::Sink; +use futures::stream::StreamExt; +use futures::ready; +use futures::Future; +use log::debug; +use log::trace; +use pin_utils::pin_mut; +use pin_utils::unsafe_pinned; +use pin_utils::unsafe_unpinned; + +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::Offset; +use kf_protocol::api::Size; +use future_aio::fs::AsyncFileSlice; + +use crate::BatchHeaderStream; +use crate::mut_index::MutLogIndex; +use crate::index::LogIndex; +use crate::index::Index; +use crate::records::FileRecords; +use crate::mut_records::MutFileRecords; +use crate::records::FileRecordsSlice; +use crate::BatchHeaderPos; +use crate::ConfigOption; +use crate::StorageError; +use crate::DefaultFileBatchStream; +use crate::index::OffsetPosition; +use crate::validator::LogValidationError; +use crate::util::OffsetError; + +pub(crate) type MutableSegment = Segment; +pub(crate) type ReadSegment = Segment; + + +pub(crate) enum SegmentSlice<'a> { + MutableSegment(&'a MutableSegment), + Segment(&'a ReadSegment) +} + + +impl <'a>Unpin for SegmentSlice<'a> {} + +impl <'a>SegmentSlice<'a> { + pub fn new_mut_segment(segment: &'a MutableSegment) -> Self { + SegmentSlice::MutableSegment(segment) + } + + pub fn new_segment(segment: &'a ReadSegment) -> Self { + SegmentSlice::Segment(segment) + } + + +} + + + + + +/// Segment contains both message log and index +pub(crate) struct Segment { + option: ConfigOption, + msg_log: L, + index: I, + base_offset: Offset, + end_offset: Offset, +} + +impl fmt::Debug for Segment { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Segment {{base: {}, current: {} }}", + self.get_base_offset(), self.get_end_offset() + ) + } +} + +impl Segment { + + /// end offset, this always starts at 0 which indicates empty records + pub fn get_end_offset(&self) -> Offset { + self.end_offset + } + + #[allow(dead_code)] + /// set end offset, this is used by test + pub(crate) fn set_end_offset(&mut self, offset: Offset) { + self.end_offset = offset; + } + + pub fn get_base_offset(&self) -> Offset { + self.base_offset + } + +} + + +impl Segment + where I:Index, + I: Deref, + L: FileRecords + + { + + #[allow(dead_code)] + pub fn get_index(&self) -> &I { + &self.index + } + + pub async fn open_batch_header_stream( + &self, + start_pos: Size, + ) -> Result { + trace!("opening batch headere stream at: {}", start_pos); + let file = self.msg_log.get_file().read_clone().await?; + BatchHeaderStream::new_with_pos(file, start_pos).await + } + + + #[allow(dead_code)] + pub async fn open_default_batch_stream(&self) -> Result { + let file = self.msg_log.get_file().read_clone().await?; + Ok(DefaultFileBatchStream::new(file)) + } + + + /// get file slice from offset to end of segment + pub async fn records_slice(&self,start_offset: Offset,max_offset_opt: Option) -> Result,StorageError> { + + match self.find_offset_position(start_offset).await? { + Some(start_pos) => { + trace!("found batch: {:#?} at: {}",start_pos.get_batch(),start_pos.get_pos()); + match max_offset_opt { + Some(max_offset) => { + // check if max offset same as segment endset + if max_offset == self.get_end_offset() { + trace!("max offset is same as end offset, reading to end"); + Ok(Some(self.msg_log.as_file_slice(start_pos.get_pos())?)) + } else { + trace!("end offset is supplied: {}",max_offset); + match self.find_offset_position(max_offset).await? { + Some(end_pos) => { + Ok(Some(self.msg_log.as_file_slice_from_to(start_pos.get_pos(),end_pos.get_pos() - start_pos.get_pos())?)) + }, + None => Err(StorageError::OffsetError(OffsetError::NotExistent)) + } + } + }, + None => Ok(Some(self.msg_log.as_file_slice(start_pos.get_pos())?)) + } + + } + None => Ok(None) + } + + } + + /// find position of the offset + pub(crate) async fn find_offset_position( + &self, + offset: Offset, + ) -> Result, StorageError> { + trace!("finding offset position: {}", offset); + if offset < self.base_offset { + trace!( + "invalid offset: {} is below base offset: {}", + offset, + self.base_offset + ); + return Ok(None); + } + if offset >= self.end_offset { + trace!( + "invalid offset: {} exceed end offset: {}", + offset, + self.end_offset + ); + return Ok(None); + } + + let delta = (offset - self.base_offset) as Size; + + let position = match self.index.find_offset(delta) { + None => 0, + Some(entry) => entry.position(), + }; + trace!( + "found relative pos: {}", + position + ); + + let mut header_stream = self.open_batch_header_stream(position).await?; + trace!("iterating header stream"); + while let Some(batch_pos) = header_stream.next().await { + let last_offset = batch_pos.get_last_offset(); + if last_offset >= offset { + trace!( + "found batch last offset which matches offset: {}", + last_offset + ); + return Ok(Some(batch_pos)); + } else { + trace!( + "skipping batch end offset: {}", + last_offset + ); + } + } + Ok(None) + } +} + +impl Segment { + + pub async fn open_for_read(base_offset: Offset, option: &ConfigOption) -> Result { + let msg_log = FileRecordsSlice::open(base_offset, option).await?; + let base_offset = msg_log.get_base_offset(); + let index = LogIndex::open_from_offset(base_offset, option).await?; + + let base_offset = msg_log.get_base_offset(); + Ok(Segment { + msg_log, + index, + option: option.to_owned(), + base_offset, + end_offset: base_offset, + }) + } + + pub fn to_segment_slice(&self) -> SegmentSlice { + SegmentSlice::new_segment(self) + } +} + + +impl Segment { + unsafe_pinned!(msg_log: MutFileRecords); + unsafe_pinned!(index: MutLogIndex); + unsafe_unpinned!(base_offset: Offset); + unsafe_unpinned!(end_offset: Offset); + + // create segment on base directory + pub async fn create( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + debug!("creating new segment: offset: {}", base_offset); + let msg_log = MutFileRecords::create(base_offset, option).await?; + + let index = MutLogIndex::create(base_offset, option).await?; + + Ok(MutableSegment { + option: option.to_owned(), + msg_log, + index, + base_offset, + end_offset: base_offset, + }) + } + + pub async fn open_for_write( + base_offset: Offset, + option: &ConfigOption, + ) -> Result { + trace!( + "opening mut segment: {} at: {:#?}", + base_offset, &option.base_dir + ); + let msg_log = MutFileRecords::open(base_offset, option).await?; + let base_offset = msg_log.get_base_offset(); + let index = MutLogIndex::open(base_offset, option).await?; + + let base_offset = msg_log.get_base_offset(); + Ok(MutableSegment { + option: option.to_owned(), + msg_log, + index, + base_offset, + end_offset: base_offset, + }) + } + + fn get_log_pos(&self) -> u32 { + self.msg_log.get_pos() + } + + /// validate the segment and load last offset + pub async fn validate(&mut self) -> Result<(), StorageError> { + self.end_offset = self.msg_log.validate().await?; + Ok(()) + } + + async fn shrink_index(&mut self) -> Result<(),IoError> { + self.index.shrink().await + } + + /* + // do modification under 'a lifetime so we do multiple mutations + #[allow(dead_code)] + fn update_send(self, mut batch: DefaultRecordBatch) -> Result<(), StorageError> { + batch.set_base_offset(self.current_offset); + Ok(()) + } + */ + + // use poll rather than future to make it easier to call from other poll + // otherwise you get complex lifetime issue + + pub fn poll_roll_over(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let ft = self.shrink_index(); + pin_mut!(ft); + ft.poll(cx) + } + + + /// convert to immutable segment + pub async fn as_segment(self) -> Result { + Segment::open_for_read(self.get_base_offset(), &self.option).await + } + + /// shrink and convert as immutable + #[allow(dead_code)] + pub async fn convert_to_segment(mut self) -> Result { + self.shrink_index().await?; + Segment::open_for_read(self.get_base_offset(), &self.option).await + } + + pub fn to_segment_slice(&self) -> SegmentSlice { + SegmentSlice::new_mut_segment(self) + } + + +} + +impl Unpin for Segment {} + +impl Sink for Segment { + + type Error = StorageError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.msg_log().poll_ready(cx) + } + + fn start_send( + mut self: Pin<&mut Self>, + mut item: DefaultBatch, + ) -> Result<(), Self::Error> { + + let current_offset = self.as_ref().end_offset; + let base_offset = self.as_ref().base_offset; + let pos = self.as_ref().get_log_pos(); + + // fill in the baseoffset using current offset if record's batch offset is 0 + // ensure batch is not already recorded + if item.base_offset == 0 { + item.set_base_offset(current_offset); + } else { + if item.base_offset < current_offset { + return Err(StorageError::LogValidationError(LogValidationError::ExistingBatch)) + } + } + + let batch_offset_delta = (current_offset - base_offset) as i32; + debug!( + "writing batch with base: {}, file pos: {}", + base_offset, pos + ); + + match self.as_mut().msg_log().start_send(item) { + Ok(_) => { + let batch_len = self.msg_log.get_pending_batch_len(); + self.index() + .start_send((batch_offset_delta as u32, pos, batch_len)) + .map_err(|err| err.into()) + } + Err(err) => Err(err), + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let last_offset_delta = self.as_ref().msg_log.get_item_last_offset_delta(); + trace!("flushing: last offset delta: {}",last_offset_delta); + ready!(self.as_mut().msg_log().poll_flush(cx))?; + let offset_pt = self.end_offset(); + *offset_pt = *offset_pt + last_offset_delta as Offset + 1; + debug!("flushing, updated end offset: {}", *offset_pt); + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + self.msg_log().poll_close(cx) + } +} + + +#[cfg(test)] +mod tests { + + use futures::sink::SinkExt; + use log::debug; + use std::env::temp_dir; + use std::fs::metadata; + use std::io::Cursor; + use std::path::PathBuf; + + use future_helper::test_async; + + use kf_protocol::api::DefaultBatch; + use kf_protocol::api::Size; + use kf_protocol::Decoder; + + use super::MutableSegment; + use crate::fixture::create_batch_with_producer; + use crate::fixture::create_batch; + use crate::fixture::ensure_new_dir; + use crate::fixture::read_bytes_from_file; + use crate::ConfigOption; + use crate::StorageError; + use crate::index::OffsetPosition; + + // TODO: consolidate + + fn default_option(base_dir: PathBuf, index_max_interval_bytes: Size) -> ConfigOption { + ConfigOption { + segment_max_bytes: 1000, + base_dir, + index_max_interval_bytes, + index_max_bytes: 1000, + ..Default::default() + } + } + + const TEST_FILE_NAME: &str = "00000000000000000020.log"; // offset 20 different from other test + const SEG_INDEX: &str = "00000000000000000020.index"; + + #[test_async] + async fn test_segment_single_record() -> Result<(), StorageError> { + let test_dir = temp_dir().join("seg-single-record"); + ensure_new_dir(&test_dir)?; + + let option = default_option(test_dir.clone(), 0); + + let base_offset = 20; + + let mut seg_sink = MutableSegment::create(base_offset, &option).await?; + + assert_eq!(seg_sink.get_end_offset(),20); + seg_sink.send(create_batch_with_producer(100,1)).await?; + assert_eq!(seg_sink.get_end_offset(),21); + + // check to see if batch is written + let bytes = read_bytes_from_file(&test_dir.join(TEST_FILE_NAME))?; + debug!("read {} bytes", bytes.len()); + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + assert_eq!(batch.get_base_offset(), 20); + assert_eq!(batch.get_header().magic, 2, "check magic"); + assert_eq!(batch.records.len(), 1); + + + let seg1_metadata = metadata(test_dir.join(SEG_INDEX))?; + assert_eq!(seg1_metadata.len(), 1000); + + + assert!((seg_sink.find_offset_position(10).await?).is_none()); + + let offset_position = (seg_sink.find_offset_position(20).await?).expect("offset exists"); + + assert_eq!(offset_position.get_base_offset(), 20); + assert_eq!(offset_position.get_pos(), 0);// + assert_eq!(offset_position.len(), 70 - 12); + assert!((seg_sink.find_offset_position(30).await?).is_none()); + + Ok(()) + } + + + #[test_async] + async fn test_segment_multiple_record() -> Result<(), StorageError> { + let test_dir = temp_dir().join("seg-multiple-record"); + ensure_new_dir(&test_dir)?; + + let option = default_option(test_dir.clone(), 0); + + let base_offset = 20; + + let mut seg_sink = MutableSegment::create(base_offset, &option).await?; + + seg_sink.send(create_batch_with_producer(100,4)).await?; + + + + // each record contains 9 bytes + + // check to see if batch is written + let bytes = read_bytes_from_file(&test_dir.join(TEST_FILE_NAME))?; + debug!("read {} bytes", bytes.len()); + + let batch = DefaultBatch::decode_from(&mut Cursor::new(bytes),0)?; + assert_eq!(batch.get_base_offset(), 20); + assert_eq!(batch.get_header().magic, 2, "check magic"); + assert_eq!(batch.records.len(), 4); + + let seg1_metadata = metadata(test_dir.join(SEG_INDEX))?; + assert_eq!(seg1_metadata.len(), 1000); + + assert!((seg_sink.find_offset_position(10).await?).is_none()); + let offset_position = (seg_sink.find_offset_position(20).await?).expect("offset exists"); + assert_eq!(offset_position.get_base_offset(), 20); + assert_eq!(offset_position.get_pos(), 0);// + assert_eq!(offset_position.len(), 85); + assert!((seg_sink.find_offset_position(30).await?).is_none()); + + Ok(()) + } + + + const TEST2_FILE_NAME: &str = "00000000000000000040.log"; // offset 20 different from other test + + #[test_async] + async fn test_segment_multiple_batch() -> Result<(), StorageError> { + let test_dir = temp_dir().join("multiple-segment"); + ensure_new_dir(&test_dir)?; + + let base_offset = 40; + + let option = default_option(test_dir.clone(), 50); + + let mut seg_sink = MutableSegment::create(base_offset, &option).await?; + seg_sink.send(create_batch()).await?; + seg_sink.send(create_batch()).await?; + seg_sink.send(create_batch()).await?; + + assert_eq!(seg_sink.get_end_offset(),46); + + assert_eq!(seg_sink.get_log_pos(), 237); // each takes 79 bytes + + let index = seg_sink.get_index(); + assert_eq!(index[0].to_be(), (2, 79)); + + let bytes = read_bytes_from_file(&test_dir.join(TEST2_FILE_NAME))?; + debug!("read {} bytes", bytes.len()); + + let cursor = &mut Cursor::new(bytes); + let batch = DefaultBatch::decode_from(cursor,0)?; + assert_eq!(batch.get_base_offset(), 40); + assert_eq!(batch.get_header().last_offset_delta, 1); + + let batch2 = DefaultBatch::decode_from(cursor,0)?; + assert_eq!(batch2.get_base_offset(), 42); + assert_eq!(batch2.get_header().last_offset_delta, 1); + + + let offset_pos1 = seg_sink.find_offset_position(40).await?.expect("pos"); + assert_eq!(offset_pos1.get_base_offset(), 40); + assert_eq!(offset_pos1.get_pos(), 0); + assert_eq!(offset_pos1.len(), 67); + + + let offset_pos2 = seg_sink.find_offset_position(42).await?.expect("pos"); + assert_eq!(offset_pos2.get_base_offset(), 42); + assert_eq!(offset_pos2.get_pos(), 79); + assert_eq!(offset_pos2.len(), 67); + + + let offset_pos3 = seg_sink.find_offset_position(44).await?.expect("pos"); + assert_eq!(offset_pos3.get_base_offset(), 44); + assert_eq!(offset_pos3.get_pos(), 158); + assert_eq!(offset_pos3.len(), 67); + + + // test whether you can send batch with non zero base offset + let mut next_batch = create_batch(); + next_batch.base_offset = 46; + assert!(seg_sink.send(next_batch).await.is_ok()); + + let mut fail_batch = create_batch(); + fail_batch.base_offset = 45; + assert!(seg_sink.send(fail_batch).await.is_err()); + + + Ok(()) + } + +} diff --git a/storage/src/util.rs b/storage/src/util.rs new file mode 100644 index 0000000000..4f60d3b219 --- /dev/null +++ b/storage/src/util.rs @@ -0,0 +1,92 @@ + +use std::path::Path; +use std::path::PathBuf; +use std::num::ParseIntError; +use std::fmt; + +use kf_protocol::api::Offset; + +/// given parent directory, base offset, extension, generate path +pub fn generate_file_name

(parent_dir: P, base_offset: Offset, extension: &str) -> PathBuf + where P: AsRef + { + + let mut file = parent_dir.as_ref().join(format!("{:020}",base_offset)); + file.set_extension(extension); + file +} + +#[derive(Debug)] +pub enum OffsetError { + NotExistent, + InvalidPath, + InvalidLogFileName, + OffsetParseError(ParseIntError) +} + + +impl fmt::Display for OffsetError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::NotExistent => write!(f, "non existent"), + Self::InvalidPath => write!(f,"invalid path"), + Self::InvalidLogFileName => write!(f,"log file name is invalid"), + Self::OffsetParseError(err) => write!(f,"{}",err), + } + } +} + + + +impl From for OffsetError { + fn from(error: ParseIntError) -> Self { + OffsetError::OffsetParseError(error) + } +} + +pub fn log_path_get_offset

(path: P) -> Result where P: AsRef { + + let log_path = path.as_ref(); + + match log_path.file_stem() { + None => Err(OffsetError::InvalidPath), + Some(file_name) => { + if file_name.len() != 20 { + Err(OffsetError::InvalidLogFileName) + } else { + file_name.to_str().unwrap().parse().map_err(|err: ParseIntError| err.into()) + } + } + } +} + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + use std::ffi::OsStr; + + use super::generate_file_name; + use super::log_path_get_offset; + + #[test] + fn test_file_generation() { + + let dir = temp_dir(); + let path = generate_file_name(&dir,5,"log"); + assert_eq!(path.file_name(),Some(OsStr::new("00000000000000000005.log"))); + } + + + #[test] + fn test_log_path() { + + let test_val = log_path_get_offset(temp_dir().join("00000000000000000005.log")); + assert!(test_val.is_ok()); + assert_eq!(test_val.unwrap(),5); + assert!(log_path_get_offset(temp_dir().join("jwowow.log")).is_err()); + assert!(log_path_get_offset(temp_dir().join("00000000000000000005.txt")).is_ok()); + assert!(log_path_get_offset(temp_dir().join("00000000000000005.log")).is_err()); + } + +} \ No newline at end of file diff --git a/storage/src/validator.rs b/storage/src/validator.rs new file mode 100644 index 0000000000..c89a784dc4 --- /dev/null +++ b/storage/src/validator.rs @@ -0,0 +1,269 @@ +use std::io::Error as IoError; + +use futures::stream::StreamExt; +use log::warn; +use log::trace; +use std::fmt; + +use future_aio::fs::AsyncFile; +use future_aio::fs::FileSinkError; +use kf_protocol::api::Offset; + +use crate::BatchHeaderStream; +use crate::util::log_path_get_offset; +use crate::util::OffsetError; + +#[derive(Debug)] +pub enum LogValidationError { + InvalidExtension, + LogNameError(OffsetError), + FileSinkError(FileSinkError), + IoError(IoError), + BaseOffError, + OffsetNotOrderedError, + NoBatches, + ExistingBatch +} + + +impl fmt::Display for LogValidationError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::InvalidExtension => write!(f, "invalid extension"), + Self::LogNameError(err) => write!(f,"{}",err), + Self::FileSinkError(err) => write!(f,"{}",err), + Self::IoError(err) => write!(f,"{}",err), + Self::BaseOffError => write!(f,"base off error"), + Self::OffsetNotOrderedError => write!(f,"offset not order"), + Self::NoBatches => write!(f,"no batches"), + Self::ExistingBatch => write!(f,"batch exist") + } + } +} + + + + +impl From for LogValidationError { + fn from(error: OffsetError) -> Self { + LogValidationError::LogNameError(error) + } +} + +impl From for LogValidationError { + fn from(error: IoError) -> Self { + LogValidationError::IoError(error) + } +} + +impl From for LogValidationError { + fn from(error: FileSinkError) -> Self { + LogValidationError::FileSinkError(error) + } +} + +/// validate the file and find last offset +/// if file is not valid then return error +#[allow(dead_code)] +pub async fn validate(file: &mut AsyncFile) -> Result { + let base_offset = log_path_get_offset(file.get_path())?; + let file_name = file.get_path().display().to_string(); + + trace!( + "validating file: {}, baseoffset: {}", + file_name, base_offset + ); + + let file_clone = file.try_clone().await?; + let mut batch_stream = BatchHeaderStream::new(file_clone); + let mut end_offset: Offset = -1; + + while let Some(batch_pos) = batch_stream.next().await { + let batch_base_offset = batch_pos.get_batch().get_base_offset(); + let header = batch_pos.get_batch().get_header(); + let offset_delta = header.last_offset_delta; + + trace!( + "found batch base: {} offset delta: {}", + batch_base_offset, offset_delta + ); + + if batch_base_offset < base_offset { + warn!( + "batch base offset: {} is less than base offset: {} path: {:#?}", + batch_base_offset, base_offset, file_name + ); + return Err(LogValidationError::BaseOffError); + } + + if batch_base_offset <= end_offset { + warn!( + "batch offset is {} is less than prev offset {}", + batch_base_offset, end_offset + ); + return Err(LogValidationError::OffsetNotOrderedError); + } + + end_offset = batch_base_offset + offset_delta as Offset; + } + + if let Some(err) = batch_stream.invalid() { + return Err(err.into()); + } + + if end_offset == -1 { + trace!("no batch found, returning last offset delta 0"); + return Ok(base_offset) + } + + trace!("end offset: {}",end_offset); + Ok(end_offset + 1) +} + +#[cfg(test)] +mod tests { + + use std::env::temp_dir; + + use futures::sink::SinkExt; + + use future_aio::fs::AsyncFile; + use future_aio::fs::FileSink; + use future_aio::fs::FileSinkOption; + use future_helper::test_async; + use kf_protocol::api::DefaultRecord; + use kf_protocol::api::DefaultBatch; + use kf_protocol::api::Offset; + + use crate::fixture::ensure_clean_file; + use crate::mut_records::MutFileRecords; + use crate::ConfigOption; + + use super::validate; + use crate::StorageError; + + + const PRODUCER: i64 = 33; + + pub fn create_batch(base_offset: Offset, records: u16) -> DefaultBatch { + let mut batches = DefaultBatch::default(); + batches.set_base_offset(base_offset); + let header = batches.get_mut_header(); + header.magic = 2; + header.producer_id = PRODUCER; + header.producer_epoch = -1; + + for _ in 0..records { + let mut record = DefaultRecord::default(); + let bytes: Vec = vec![10, 20]; + record.value = Some(bytes).into(); + batches.add_record(record); + } + + batches + } + + const TEST_FILE_NAME: &str = "00000000000000000301.log"; // for offset 301 + const BASE_OFFSET: Offset = 301; + + #[test_async] + async fn test_validate_empty() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME); + ensure_clean_file(&test_file); + + let options = ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + }; + + let _ = MutFileRecords::open(BASE_OFFSET, &options).await?; + let mut file = AsyncFile::open(&test_file).await?; + let next_offset = validate(&mut file).await?; + assert_eq!(next_offset,BASE_OFFSET); + + Ok(()) + } + + const TEST_FILE_SUCCESS_NAME: &str = "00000000000000000601.log"; // for offset 301 + const SUCCESS_BASE_OFFSET: Offset = 601; + + + #[test_async] + async fn test_validate_success() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_SUCCESS_NAME); + ensure_clean_file(&test_file); + + let options = ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + }; + + let mut msg_sink = MutFileRecords::create(SUCCESS_BASE_OFFSET, &options).await?; + + msg_sink.send(create_batch(SUCCESS_BASE_OFFSET, 2)).await?; + msg_sink.send(create_batch(SUCCESS_BASE_OFFSET+2,3)).await?; + + let mut file = AsyncFile::open(&test_file).await?; + + let next_offset = validate(&mut file).await?; + assert_eq!(next_offset, SUCCESS_BASE_OFFSET + 5); + + Ok(()) + } + + const TEST_FILE_NAME_FAIL: &str = "00000000000000000401.log"; // for offset 301 + + #[test_async] + async fn test_validate_offset() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME_FAIL); + ensure_clean_file(&test_file); + + let options = ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + }; + + let mut msg_sink = MutFileRecords::create(401, &options).await?; + + msg_sink.send(create_batch(401, 0)).await?; + msg_sink.send(create_batch(111, 1)).await?; + + let mut file = AsyncFile::open(&test_file).await?; + + assert!(validate(&mut file).await.is_err()); + + Ok(()) + } + + const TEST_FILE_NAME_FAIL2: &str = "00000000000000000501.log"; // for offset 301 + + #[test_async] + async fn test_validate_invalid_contents() -> Result<(), StorageError> { + let test_file = temp_dir().join(TEST_FILE_NAME_FAIL2); + ensure_clean_file(&test_file); + + let options = ConfigOption { + base_dir: temp_dir(), + segment_max_bytes: 1000, + ..Default::default() + }; + + let mut msg_sink = MutFileRecords::create(501, &options).await?; + msg_sink.send(create_batch(501, 2)).await?; + + // add some junk + let mut f_sink = FileSink::create(&test_file, FileSinkOption::default()).await?; + let bytes = vec![0x01, 0x02, 0x03]; + f_sink.send(bytes).await?; + + let mut file = AsyncFile::open(&test_file).await?; + + assert!(validate(&mut file).await.is_err()); + + Ok(()) + } + +} diff --git a/storage/tests/replica_test.rs b/storage/tests/replica_test.rs new file mode 100644 index 0000000000..4a2b1fc6e8 --- /dev/null +++ b/storage/tests/replica_test.rs @@ -0,0 +1,234 @@ +// test fetch of replica + +use std::env::temp_dir; +use std::net::SocketAddr; +use std::time::Duration; + +use log::debug; +use futures::stream::StreamExt; +use futures::sink::SinkExt; +use futures::future::join; + +use future_helper::test_async; +use future_helper::sleep; +use kf_protocol::message::fetch::FetchPartition; +use kf_protocol::message::fetch::FetchableTopic; +use kf_protocol::api::RequestMessage; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::DefaultRecord; +use kf_protocol::message::fetch::DefaultKfFetchRequest; +use kf_protocol::api::Offset; +use future_aio::net::AsyncTcpListener; +use kf_socket::KfSocket; +use kf_socket::KfSocketError; + + +use utils::fixture::ensure_clean_dir; + +use kf_socket::FileFetchResponse; +use kf_socket::KfFileFetchRequest; +use kf_socket::FilePartitionResponse; +use kf_socket::FileTopicResponse; +use storage::StorageError; +use storage::FileReplica; +use storage::ConfigOption; + +const TEST_REP_DIR: &str = "testreplica-fetch"; +const START_OFFSET: Offset = 0; + +fn default_option() -> ConfigOption { + ConfigOption { + segment_max_bytes: 10000, + base_dir: temp_dir().join(TEST_REP_DIR), + index_max_interval_bytes: 1000, + index_max_bytes: 1000, + ..Default::default() + } + } + +/// create sample batches with variable number of records +fn create_batch(records: u16) -> DefaultBatch { + let mut batches = DefaultBatch::default(); + let header = batches.get_mut_header(); + header.magic = 2; + header.producer_id = 20; + header.producer_epoch = -1; + + for i in 0..records { + let msg = format!("record {}",i); + let record: DefaultRecord = msg.into(); + batches.add_record(record); + } + + batches +} + + + +// create new replica and add two batches +async fn setup_replica() -> Result { + + let option = default_option(); + + ensure_clean_dir(&option.base_dir); + + let mut replica = FileReplica::create("testsimple", 0, START_OFFSET, &option).await.expect("test replica"); + replica.send(create_batch(2)).await.expect("first batch"); + replica.send(create_batch(2)).await.expect("first batch"); + + Ok(replica) +} + +async fn handle_response<'a>(socket: &'a mut KfSocket,replica: &'a FileReplica) -> Result<(),KfSocketError> { + + let request: Result,KfSocketError> = + socket.get_mut_stream().next_request_item().await.expect("next value"); + let request = request?; + + let (header,fetch_request) = request.get_header_request(); + debug!("received fetch request"); + + let topic_request = &fetch_request.topics[0]; + let partiton_request = &topic_request.fetch_partitions[0]; + let fetch_offset = partiton_request.fetch_offset; + debug!("fetch offset: {}",fetch_offset); + + let mut response = FileFetchResponse::default(); + let mut topic_response = FileTopicResponse::default(); + let mut part_response = FilePartitionResponse::default(); + replica.read_records(fetch_offset, None, &mut part_response).await; + topic_response.partitions.push(part_response); + response.topics.push(topic_response); + + + let response = RequestMessage::::response_with_header(&header,response); + socket.get_mut_sink().encode_file_slices(&response,0).await?; + debug!("server: finish sending out"); + Ok(()) +} + + + async fn test_server(addr: SocketAddr) -> Result<(),StorageError> { + + debug!("setting up replica"); + let replica = setup_replica().await?; + + debug!("set up the replica"); + let listener = AsyncTcpListener::bind(&addr)?; + debug!("server is running"); + let mut incoming = listener.incoming(); + let incoming_stream = incoming.next().await; + debug!("server: got connection"); + let incoming_stream = incoming_stream.expect("next").expect("unwrap again"); + + let mut socket: KfSocket = incoming_stream.into(); + + handle_response(&mut socket,&replica).await?; + // await!(handle_response(&mut socket,&replica))?; + + Ok(()) +} + +async fn test_fetch_0(socket: &mut KfSocket) -> Result<(),KfSocketError> { + + debug!("testing fetch batch 0"); + let mut request = DefaultKfFetchRequest::default(); + let mut topic_request = FetchableTopic::default(); + topic_request.name = "testsimple".to_owned(); + let mut part_request = FetchPartition::default(); + part_request.fetch_offset = 0; + topic_request.fetch_partitions.push(part_request); + request.topics.push(topic_request); + + + let mut req_msg = RequestMessage::new_request(request); + req_msg + .get_mut_header() + .set_client_id("test") + .set_correlation_id(10); + + let res_msg = socket.send(&req_msg).await?; + + debug!("output: {:#?}",res_msg); + let topic_responses = res_msg.response.topics; + assert_eq!(topic_responses.len(),1); + let part_responses = &topic_responses[0].partitions; + assert_eq!(part_responses.len(),1); + let batches = &part_responses[0].records.batches; + assert_eq!(batches.len(),2); + let records = &batches[0].records; + assert_eq!(records.len(),2); + assert_eq!(records[0].value.to_string(),"record 0"); + assert_eq!(records[1].value.to_string(),"record 1"); + + Ok(()) +} + + +async fn test_fetch_2(socket: &mut KfSocket) -> Result<(),KfSocketError> { + + debug!("testing fetch batch 2"); + let mut request = DefaultKfFetchRequest::default(); + let mut topic_request = FetchableTopic::default(); + topic_request.name = "testsimple".to_owned(); + let mut part_request = FetchPartition::default(); + part_request.fetch_offset = 2; + topic_request.fetch_partitions.push(part_request); + request.topics.push(topic_request); + + let mut req_msg = RequestMessage::new_request(request); + req_msg + .get_mut_header() + .set_client_id("test") + .set_correlation_id(10); + + let res_msg = socket.send(&req_msg).await?; + + debug!("output: {:#?}",res_msg); + let topic_responses = res_msg.response.topics; + assert_eq!(topic_responses.len(),1); + let part_responses = &topic_responses[0].partitions; + assert_eq!(part_responses.len(),1); + let batches = &part_responses[0].records.batches; + assert_eq!(batches.len(),2); + assert!(false,"fail"); + let batch = &batches[0]; + assert_eq!(batch.get_base_offset(),2); + let records = &batches[0].records; + assert_eq!(records.len(),2); + assert_eq!(records[0].value.to_string(),"record 0"); + assert_eq!(records[1].value.to_string(),"record 1"); + + Ok(()) +} + +async fn test_client(addr: SocketAddr) -> Result<(),KfSocketError> { + + sleep(Duration::from_millis(100)).await; + + debug!("client: trying to connect"); + let mut socket = KfSocket::connect(&addr).await?; + debug!("client: connect to test server and waiting..."); + + test_fetch_0(&mut socket).await?; + test_fetch_2(&mut socket).await?; + + Ok(()) +} + + + /// test replica fetch using dummy server +#[test_async] +async fn test_replica_fetch() -> Result<(),StorageError> { + + + + let addr = "127.0.0.1:9911".parse::().expect("parse"); + + let _r = join(test_client(addr),test_server(addr.clone())).await; + + + Ok(()) + +} + diff --git a/test-helper/Cargo.toml b/test-helper/Cargo.toml new file mode 100644 index 0000000000..67e681cf5c --- /dev/null +++ b/test-helper/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "flv-integration-test" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] +log = "0.4.6" +bytes = "0.4.12" +futures-preview = { version = "0.3.0-alpha.17" } +future-helper = { path = "../future-helper" } +kf-protocol = { path = "../kf-protocol"} +kf-socket = {path = "../kf-socket"} +kf-service = { path = "../kf-service"} +internal-api = { path = "../api/internal-api"} +types = { path= "../types"} +utils = { path= "../utils"} +metadata = { path = "../metadata" } +future-aio = { path = "../future-aio"} diff --git a/test-helper/src/integration/mod.rs b/test-helper/src/integration/mod.rs new file mode 100644 index 0000000000..1103e172a6 --- /dev/null +++ b/test-helper/src/integration/mod.rs @@ -0,0 +1,74 @@ +#![feature(async_await)] + +mod test_runner; + +pub use test_runner::SpuTestRunner; + +use std::io::Error as IoError; + +use futures::Future; +use futures::channel::mpsc::Receiver; + +use metadata::spu::SpuSpec; +use metadata::spu::Endpoint as MetadatEndPoint; +use kf_socket::KfSocketError; +use types::SpuId; + +/// trait for driving system test +pub trait FlvSystemTest: Sized { + + type EnvGenerator: TestGenerator; + + // return environment generator + fn env_configuration(&self) -> Self::EnvGenerator; + + fn followers(&self) -> usize; + + type TestResponseFuture: Send + Future,KfSocketError>>; + + fn main_test(self,runner: SpuTestRunner) -> Self::TestResponseFuture; +} + +pub trait SpuServer { + + type ShutdownFuture: Send + Future>; + fn run_shutdown(self,shutdown_signal: Receiver) -> Self::ShutdownFuture; + + fn id(&self) -> SpuId; + + fn spec(&self) -> &SpuSpec; + + +} + + +pub trait TestGenerator { + + type SpuServer: SpuServer; + + fn base_port(&self) -> u16; + + fn base_id(&self) -> i32; + + fn create_spu(&self, spu_index: u16) -> SpuSpec { + + let port = spu_index * 2 + self.base_port(); + + SpuSpec { + id: self.base_id() + spu_index as i32, + public_endpoint: MetadatEndPoint { + port, + ..Default::default() + }, + private_endpoint: MetadatEndPoint { + port: port + 1, + ..Default::default() + }, + ..Default::default() + } + } + + /// create server with and start controller + fn create_server(&self, spu: &SpuSpec) -> Result; + +} diff --git a/test-helper/src/integration/test_runner.rs b/test-helper/src/integration/test_runner.rs new file mode 100644 index 0000000000..456ecce5da --- /dev/null +++ b/test-helper/src/integration/test_runner.rs @@ -0,0 +1,238 @@ +use std::convert::TryInto; +use std::net::SocketAddr; +use std::time::Duration; + +use log::debug; +use log::trace; +use futures::future::join; +use futures::future::join_all; +use futures::channel::mpsc::channel; +use futures::channel::mpsc::Sender; +use futures::SinkExt; + +use future_helper::sleep; +use kf_socket::KfSocketError; +use kf_socket::KfSocket; +use kf_protocol::api::Offset; +use kf_protocol::api::Request; +use kf_protocol::api::RequestMessage; +//use kf_protocol::api::ResponseMessage; +use kf_protocol::message::produce::DefaultKfProduceRequest; +use kf_protocol::message::produce::DefaultKfPartitionRequest; +use kf_protocol::message::produce::DefaultKfTopicRequest; +use kf_protocol::message::fetch::DefaultKfFetchRequest; +use kf_protocol::message::fetch::FetchPartition; +use kf_protocol::message::fetch::KfFetchRequest; +use kf_protocol::message::fetch::FetchableTopic; +use kf_protocol::api::DefaultBatch; +use kf_protocol::api::DefaultRecord; +use internal_api::messages::UpdateAllSpusMsg; +use internal_api::messages::UpdateAllSpusContent; +use internal_api::messages::Replica; +use internal_api::UpdateSpuRequest; +use metadata::partition::ReplicaKey; +use types::SpuId; +use metadata::spu::SpuSpec; +use metadata::spu::Endpoint; + +use super::TestGenerator; +use super::SpuServer; +use super::FlvSystemTest; + +pub struct SpuTestRunner where T: FlvSystemTest { + client_id: String, + servers: Vec<<::EnvGenerator as TestGenerator>::SpuServer>, + senders: Vec> +} + +impl SpuTestRunner where T: FlvSystemTest, + <::EnvGenerator as TestGenerator>::SpuServer: From +{ + + + pub async fn run(client_id: String, config: T) -> Result<(),KfSocketError> { + + let generator = config.env_configuration(); + + let mut servers = vec![]; + let mut futures = vec![]; + let mut senders = vec![]; + + + for i in 0..config.followers() + 1 { + let spu = generator.create_spu(i as u16); + let (sender,receiver) = channel::(1); + let server = generator.create_server(&spu)?; + futures.push(server.run_shutdown(receiver)); + senders.push(sender); + + servers.push(spu.into()); + } + + + let runner = SpuTestRunner { + client_id, + servers, + senders + }; + + + join( + runner.run_test(config), + join_all(futures) + ) + .await; + + Ok(()) + } + + async fn run_test(self,config: T) { + + // wait until controller start up + sleep(Duration::from_millis(10)).await.expect("panic"); + + let mut runner = config.main_test(self).await.expect("test should run"); + runner.terminate_server().await; + } + + // terminating server + async fn terminate_server(&mut self) { + // terminate servers + for i in 0..self.servers.len() { + let server = &self.servers[i]; + let sender = &mut self.senders[i]; + + debug!("terminating server: {}",server.id()); + sender + .send(true) + .await + .expect("shutdown should work"); + + } + + } + + + pub fn leader(&self) -> &<::EnvGenerator as TestGenerator>::SpuServer { + &self.servers[0] + } + + pub fn leader_spec(&self) -> &SpuSpec { + self.leader().spec() + } + + pub fn followers_count(&self) -> usize { + self.servers.len() -1 + } + + pub fn follower_spec(&self,index: usize) -> &SpuSpec { + self.servers[index+1].spec() + } + + + pub fn spu_metadata(&self) -> UpdateAllSpusContent { + + let mut spu_metadata = UpdateAllSpusContent::default(); + + for server in &self.servers { + spu_metadata.mut_add_spu_content(server.spec()); + } + + spu_metadata + } + + pub fn replica_ids(&self) -> Vec { + self.servers.iter().map(|follower| follower.spec().id).collect() + } + + pub fn replica_metadata(&self,replica: &ReplicaKey) -> Replica { + + let leader_id = self.leader_spec().id; + + Replica { + replica: replica.clone(), + leader: leader_id, + live_replicas: self.replica_ids() + } + } + + + pub async fn send_metadata_to_all<'a>(&'a self,replica: &'a ReplicaKey) -> Result<(),KfSocketError> { + + let spu_metadata = self.spu_metadata().add_replica(self.replica_metadata(replica)); + + for server in &self.servers { + let spu_id = server.spec().id; + let _spu_req_msg = RequestMessage::new_request(UpdateSpuRequest::encode_request( + UpdateAllSpusMsg::with_content(spu_id,spu_metadata.clone()), + )) + .set_client_id(self.client_id.clone()); + trace!("sending spu metadata to server: {}",spu_id); + // send_to_endpoint(server.private_endpoint(),&spu_req_msg).await; + + } + + debug!("sleeping to allow controllers to catch up with messages"); + sleep(Duration::from_millis(50)).await.expect("panic"); + debug!("woke up, start testing"); + + Ok(()) + } + + + + pub fn create_producer_msg(&self,msg: S, topic: S,partition: i32) -> RequestMessage + where S: Into + { + let msg_string: String = msg.into(); + let record: DefaultRecord = msg_string.into(); + let mut batch = DefaultBatch::default(); + batch.records.push(record); + + let mut topic_request = DefaultKfTopicRequest::default(); + topic_request.name = topic.into(); + let mut partition_request = DefaultKfPartitionRequest::default(); + partition_request.partition_index = partition; + partition_request.records.batches.push(batch); + topic_request.partitions.push(partition_request); + let mut req = DefaultKfProduceRequest::default(); + req.topics.push(topic_request); + + RequestMessage::new_request(req).set_client_id(self.client_id.clone()) + } + + pub fn create_fetch_request(&self,offset: Offset, topic: S, partition: i32 ) -> RequestMessage + where S: Into + { + let mut request: DefaultKfFetchRequest = KfFetchRequest::default(); + let mut part_request = FetchPartition::default(); + part_request.partition_index = partition; + part_request.fetch_offset = offset; + + let mut topic_request = FetchableTopic::default(); + topic_request.name = topic.into(); + topic_request.fetch_partitions.push(part_request); + + request.topics.push(topic_request); + + RequestMessage::new_request(request).set_client_id("test_client") + } + + + +} + +#[allow(dead_code)] +pub async fn send_to_endpoint<'a,R>(endpoint: &'a Endpoint, req_msg: &'a RequestMessage) -> Result<(), KfSocketError> where R: Request, +{ + debug!( + "client: trying to connect to private endpoint: {:#?}", + endpoint + ); + let socket: SocketAddr = endpoint.try_into()?; + let mut socket = KfSocket::connect(&socket).await?; + debug!("connected to internal endpoint {:#?}", endpoint); + let res_msg = socket.send(&req_msg).await?; + debug!("response: {:#?}", res_msg); + Ok(()) +} diff --git a/test-helper/src/lib.rs b/test-helper/src/lib.rs new file mode 100644 index 0000000000..e69de29bb2 diff --git a/types/Cargo.toml b/types/Cargo.toml new file mode 100644 index 0000000000..b9672a97c8 --- /dev/null +++ b/types/Cargo.toml @@ -0,0 +1,7 @@ +[package] +name = "types" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[dependencies] diff --git a/types/rust-toolchain b/types/rust-toolchain new file mode 120000 index 0000000000..9327ba4034 --- /dev/null +++ b/types/rust-toolchain @@ -0,0 +1 @@ +../rust-toolchain \ No newline at end of file diff --git a/types/src/defaults.rs b/types/src/defaults.rs new file mode 100644 index 0000000000..836be9ba82 --- /dev/null +++ b/types/src/defaults.rs @@ -0,0 +1,67 @@ +pub const PRODUCT_NAME: &'static str = "fluvio"; +pub const IMAGE_NAME: &'static str = "infinyon/fluvio-spu"; + +// SPU/SC Server Path +pub const SERVER_CONFIG_BASE_PATH: &'static str = "/etc"; +pub const SERVER_CONFIG_DIR: &'static str = "fluvio"; +pub const CONFIG_FILE_EXTENTION: &'static str = "toml"; + +// SC defaults +pub const SC_DEFAULT_ID: i32 = 1; +pub const SC_CONFIG_FILE: &'static str = "sc_server"; +pub const SC_PUBLIC_PORT: u16 = 9003; +pub const SC_PRIVATE_PORT: u16 = 9004; +pub const SC_HOSTNAME: &'static str = "localhost"; +pub const SC_RECONCILIATION_INTERVAL_SEC: u64 = 300; // 5 min + +// SPU defaults +pub const SPU_DEFAULT_ID: i32 = 0; +pub const SPU_DEFAULT_NAME: &'static str = "spu"; +pub const SPU_CONFIG_FILE: &'static str = "spu_server"; +pub const SPU_PUBLIC_PORT: u16 = 9005; +pub const SPU_PRIVATE_PORT: u16 = 9006; +pub const SPU_PUBLIC_HOSTNAME: &'static str = "0.0.0.0"; +pub const SPU_PRIVATE_HOSTNAME: &'static str = "0.0.0.0"; +pub const SPU_CREDENTIALS_FILE: &'static str = "/etc/fluvio/.credentials/token_secret"; +pub const SPU_RETRY_SC_TIMEOUT_MS: u16 = 3000; +pub const SPU_MIN_IN_SYNC_REPLICAS: u16 = 1; +pub const SPU_LOG_BASE_DIR: &'static str = "/tmp/fluvio"; +pub const SPU_LOG_SIZE: &'static str = "1Gi"; +pub const SPU_LOG_INDEX_MAX_BYTES: u32 = 10485760; +pub const SPU_LOG_INDEX_MAX_INTERVAL_BYTES: u32 = 4096; +pub const SPU_LOG_SEGMENT_MAX_BYTES: u32 = 1073741824; + +// CLI config +pub const CLI_PROFILES_DIR: &'static str = "profiles"; +pub const CLI_DEFAULT_PROFILE: &'static str = "default"; +pub const CLI_CONFIG_PATH: &'static str = ".fluvio"; + +// Env +pub const FLV_FLUVIO_HOME: &'static str = "FLUVIO_HOME"; +pub const FLV_SPU_ID: &'static str = "FLV_SPU_ID"; +pub const FLV_SPU_TYPE: &'static str = "FLV_SPU_TYPE"; +pub const FLV_TOKEN_SECRET_FILE: &'static str = "FLV_TOKEN_SECRET_FILE"; +pub const FLV_RACK: &'static str = "FLV_RACK"; +pub const FLV_SPU_PUBLIC_HOST: &'static str = "FLV_SPU_PUBLIC_HOST"; +pub const FLV_SPU_PUBLIC_PORT: &'static str = "FLV_SPU_PUBLIC_PORT"; +pub const FLV_SPU_PRIVATE_HOST: &'static str = "FLV_SPU_PRIVATE_HOST"; +pub const FLV_SPU_PRIVATE_PORT: &'static str = "FLV_SPU_PRIVATE_PORT"; +pub const FLV_SC_PRIVATE_HOST: &'static str = "FLV_SC_PRIVATE_HOST"; +pub const FLV_SC_PRIVATE_PORT: &'static str = "FLV_SC_PRIVATE_PORT"; +pub const FLV_SC_RETRY_TIMEOUT_MS: &'static str = "FLV_SC_RETRY_TIMEOUT_MS"; +pub const FLV_REPLICA_IN_SYNC_REPLICA_MIN: &'static str = "FLV_REPLICA_IN_SYNC_REPLICA_MIN"; +pub const FLV_LOG_BASE_DIR: &'static str = "FLV_LOG_BASE_DIR"; +pub const FLV_LOG_SIZE: &'static str = "FLV_LOG_SIZE"; +pub const FLV_LOG_INDEX_MAX_BYTES: &'static str = "FLV_LOG_INDEX_MAX_BYTES"; +pub const FLV_LOG_INDEX_MAX_INTERVAL_BYTES: &'static str = "FLV_LOG_INDEX_MAX_INTERVAL_BYTES"; +pub const FLV_LOG_SEGMENT_MAX_BYTES: &'static str = "FLV_LOG_SEGMENT_MAX_BYTES"; + +// Health Checks +pub const HC_SPU_TRIGGER_INTERVAL_SEC: u64 = 60 * 5; +pub const HC_SPU_PING_INTERVAL_SEC: u64 = 5; + +/// K8 Secret +pub const K8_TOKEN_SECRET_KEY: &'static str = "token_secret"; + +// Kafka +pub const KF_REQUEST_TIMEOUT_MS: i32 = 1500; diff --git a/types/src/lib.rs b/types/src/lib.rs new file mode 100644 index 0000000000..e41ee807b8 --- /dev/null +++ b/types/src/lib.rs @@ -0,0 +1,32 @@ +use std::collections::BTreeMap; + +pub mod defaults; +pub mod macros; +pub mod partition; +pub mod socket_helpers; + +pub use partition::PartitionError; + +// +// Types +// +pub type ReplicaMap = BTreeMap>; +pub type Reason = String; +pub type Name = String; + +pub type SpuName = String; +pub type SpuId = i32; + +pub type IsOnline = bool; +pub type IsOk = bool; + +// Topic +pub type TopicName = String; +pub type PartitionId = i32; +pub type PartitionCount = i32; +pub type ReplicationFactor = i32; +pub type IgnoreRackAssignment = bool; + +// AuthToken +pub type TokenName = String; +pub type TokenSecret = String; diff --git a/types/src/macros.rs b/types/src/macros.rs new file mode 100644 index 0000000000..2278854ca1 --- /dev/null +++ b/types/src/macros.rs @@ -0,0 +1,49 @@ +#[macro_export] +macro_rules! log_on_err { + ($x:expr) => { + if let Err(err) = $x { + log::error!("{}", err); + } + }; + + ($x:expr,$msg:expr) => { + if let Err(err) = $x { + log::error!($msg, err); + } + }; +} + +#[macro_export] +macro_rules! log_actions { + ($x1:expr, $x2:expr, $x3:expr, $x4:expr, $x5:expr, $x6:expr) => { + log::debug!( + "{:<20}: [add:{}, mod:{}, del:{}, skip:{}]", + format!("{}({})", $x1, $x2), + $x3, + $x4, + $x5, + $x6 + ); + }; +} + +#[macro_export] +macro_rules! print_cli_err { + ($x:expr) => { + println!("\x1B[1;31merror:\x1B[0m {}", $x); + }; +} + +#[macro_export] +macro_rules! print_cli_ok { + () => { + println!("\x1B[32mOk!\x1B[0m"); + }; +} + +#[macro_export] +macro_rules! print_ok_msg { + ($x:expr, $y:expr) => { + println!("\x1B[32m{}\x1B[0m: {}", $x, $y); + }; +} \ No newline at end of file diff --git a/types/src/partition.rs b/types/src/partition.rs new file mode 100644 index 0000000000..06a67016a5 --- /dev/null +++ b/types/src/partition.rs @@ -0,0 +1,44 @@ +use std::fmt; + +#[derive(Debug)] +pub enum PartitionError { + InvalidSyntax(String) +} + + +impl fmt::Display for PartitionError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Self::InvalidSyntax(msg) => write!(f, "invalid partition syntax: {}", msg), + } + } +} + + +// returns a tuple (topic_name, idx) +pub fn decompose_partition_name(partition_name: &str) -> Result<(String, i32), PartitionError> { + let dash_pos = partition_name.rfind('-'); + if dash_pos.is_none() { + return Err(PartitionError::InvalidSyntax(partition_name.to_owned())); + } + + let pos = dash_pos.unwrap(); + if (pos + 1) >= partition_name.len() { + return Err(PartitionError::InvalidSyntax(partition_name.to_owned())); + } + + let topic_name = &partition_name[..pos]; + let idx_string = &partition_name[(pos + 1)..]; + let idx = match idx_string.parse::() { + Ok(n) => n, + Err(_) => { + return Err(PartitionError::InvalidSyntax(partition_name.to_owned())); + } + }; + + Ok((topic_name.to_string(), idx)) +} + +pub fn create_partition_name(topic_name: &str, idx: &i32) -> String { + format!("{}-{}", topic_name.clone(), idx) +} diff --git a/types/src/socket_helpers.rs b/types/src/socket_helpers.rs new file mode 100644 index 0000000000..b204c43de3 --- /dev/null +++ b/types/src/socket_helpers.rs @@ -0,0 +1,144 @@ +use std::fmt; +use std::io::Error as IoError; +use std::io::ErrorKind; +use std::convert::TryFrom; +use std::net::SocketAddr; +use std::net::ToSocketAddrs; +use std::net::Ipv4Addr; +use std::net::IpAddr; + +// +// Structures +// +#[derive(Debug, PartialEq, Clone)] +pub struct ServerAddress { + pub host: String, + pub port: u16, +} + +impl fmt::Display for ServerAddress { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}:{}", self.host, self.port) + } +} + +impl TryFrom<&String> for ServerAddress { + type Error = IoError; + + fn try_from(host_port: &String) -> Result { + let v: Vec<&str> = host_port.split(':').collect(); + + if v.len() != 2 { + return Err(IoError::new( + ErrorKind::InvalidInput, + format!("invalid host:port format {}", host_port).as_str(), + )); + } + + Ok(ServerAddress { + host: v[0].to_string(), + port: v[1] + .parse::() + .map_err(|err| IoError::new(ErrorKind::InvalidData, format!("{}", err)))?, + }) + } +} + +impl TryFrom for SocketAddr { + type Error = IoError; + + fn try_from(endpoint: ServerAddress) -> Result { + host_port_to_socket_addr(&endpoint.host, endpoint.port) + } +} + +// converts a host/port to SocketAddress +pub fn server_to_socket_addr(server_addr: &ServerAddress) -> Result { + host_port_to_socket_addr(&server_addr.host, server_addr.port) +} + +// converts a host/port to SocketAddress +pub fn host_port_to_socket_addr(host: &str, port: u16) -> Result { + let addr_string = format!("{}:{}", host, port); + string_to_socket_addr(&addr_string) +} + +/// convert string to socket addr +pub fn string_to_socket_addr(addr_string: &str) -> Result { + let mut addrs_iter = addr_string.to_socket_addrs()?; + let addr = addrs_iter.next(); + match addr { + Some(addr) => Ok(addr), + None => Err(IoError::new( + ErrorKind::InvalidInput, + format!("host/port cannot be resolved {}", addr_string).as_str(), + )), + } +} + +#[derive(Debug, PartialEq, Clone)] +pub enum EndPointEncryption { + PLAINTEXT, +} + +impl Default for EndPointEncryption { + fn default() -> Self { + EndPointEncryption::PLAINTEXT + } +} + +impl fmt::Display for EndPointEncryption { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Plain") + } +} + +#[derive(Debug, PartialEq, Clone)] +pub struct EndPoint { + pub addr: SocketAddr, + pub encryption: EndPointEncryption, +} + +impl EndPoint { + /// Build endpoint for local server + pub fn local_end_point(port: u16) -> Self { + Self { + addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port), + encryption: EndPointEncryption::default(), + } + } + + /// listen on 0.0.0.0 + pub fn all_end_point(port: u16) -> Self { + Self { + addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), port), + encryption: EndPointEncryption::default(), + } + } +} + +impl From for EndPoint { + fn from(addr: SocketAddr) -> Self { + EndPoint { + addr, + encryption: EndPointEncryption::default(), + } + } +} + +impl TryFrom<&str> for EndPoint { + type Error = IoError; + + fn try_from(value: &str) -> Result { + string_to_socket_addr(value).map(|addr| EndPoint { + addr, + encryption: EndPointEncryption::PLAINTEXT, + }) + } +} + +impl fmt::Display for EndPoint { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{} {}", self.addr, self.encryption) + } +} diff --git a/utils/Cargo.toml b/utils/Cargo.toml new file mode 100644 index 0000000000..b7276f81f8 --- /dev/null +++ b/utils/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "utils" +version = "0.1.0-alpha.1" +authors = ["fluvio.io"] +edition = "2018" + +[features] +default = [] +fixture = [] + +[dependencies] +env_logger = "0.6.2" +regex = "1.2.1" +log = "0.4.6" +rand = "0.7.0" +chrono = { version = "0.4.7"} +types = { path= "../types"} diff --git a/utils/src/actions.rs b/utils/src/actions.rs new file mode 100644 index 0000000000..d6f7472cc3 --- /dev/null +++ b/utils/src/actions.rs @@ -0,0 +1,101 @@ +//! +//! # Actions Template +//! +//! Template to enable chaining actions in a list. +//! Operations supported: +//! * push, count compare, traverse(iter.) +//! +use std::collections::vec_deque::Iter; +use std::collections::vec_deque::IntoIter; +use std::collections::VecDeque; + + +/// queue of Action +#[derive(Debug,Clone)] +pub struct Actions(VecDeque); + + +impl ::std::default::Default for Actions { + fn default() -> Self { + Self(VecDeque::new()) + } +} + + +impl ::std::cmp::PartialEq for Actions +where + T: PartialEq, +{ + fn eq(&self, other: &Actions) -> bool { + let local_actions = &self.0; + let other_actions = &other.0; + + if local_actions.len() != other_actions.len() { + return false; + } + for (idx, action) in local_actions.iter().enumerate() { + if action != &other_actions[idx] { + return false; + } + } + + true + } +} + +impl Actions { + + pub fn add_all(&mut self,items: Vec) { + for item in items.into_iter() { + self.push(item); + } + } + + pub fn push(&mut self, action: T) { + self.0.push_back(action); + } + + pub fn push_once(&mut self, action: T) + where + T: PartialEq, + { + let mut found = false; + for local_action in self.0.iter() { + if local_action == &action { + found = true; + break; + } + } + if !found { + self.0.push_back(action); + } + } + + pub fn pop_front(&mut self) -> Option { + self.0.pop_front() + } + + #[cfg(feature = "fixture")] + pub fn count(&self) -> usize { + self.0.len() + } + + pub fn iter<'a>(&'a self) -> Iter<'a, T> { + self.0.iter() + } + + pub fn into_iter(self) -> IntoIter { + self.0.into_iter() + } +} + + +impl From> for Actions { + fn from(vec: Vec) -> Self { + let mut actions = Self::default(); + for item in vec.into_iter() { + actions.push(item); + } + actions + } +} \ No newline at end of file diff --git a/utils/src/concurrent.rs b/utils/src/concurrent.rs new file mode 100644 index 0000000000..7d8fcd79d7 --- /dev/null +++ b/utils/src/concurrent.rs @@ -0,0 +1,76 @@ +use std::collections::HashMap; +use std::collections::BTreeMap; +use std::hash::Hash; +use std::sync::RwLock; +use std::sync::RwLockReadGuard; +use std::sync::RwLockWriteGuard; + + +/// inefficient but simple concurren hashma +/// this should be only used in a test +/// it locks for every write +pub struct SimpleConcurrentHashMap(RwLock>); + +impl SimpleConcurrentHashMap +where + K: Eq + Hash, +{ + pub fn new() -> Self { + SimpleConcurrentHashMap(RwLock::new(HashMap::new())) + } + + pub fn insert(&self, key: K, value: V) -> Option { + let mut lock = self.write(); + lock.insert(key, value) + } + + pub fn read(&self) -> RwLockReadGuard> { + self.0.read().unwrap() + } + + pub fn write(&self) -> RwLockWriteGuard> { + self.0.write().unwrap() + } + + pub fn contains_key(&self, key: &K) -> bool { + self.read().contains_key(key) + } +} + +#[derive(Debug)] +pub struct SimpleConcurrentBTreeMap(RwLock>); + +impl Default for SimpleConcurrentBTreeMap where K: Ord{ + + fn default() -> Self { + SimpleConcurrentBTreeMap(RwLock::new(BTreeMap::new())) + } + +} + +impl SimpleConcurrentBTreeMap + where K: Ord +{ + pub fn new() -> Self { + SimpleConcurrentBTreeMap(RwLock::new(BTreeMap::new())) + } + + pub fn read(&self) -> RwLockReadGuard> { + self.0.read().unwrap() + } + + pub fn write(&self) -> RwLockWriteGuard> { + self.0.write().unwrap() + } + + pub fn insert(&self, key: K, value: V) -> Option { + let mut lock = self.write(); + lock.insert(key, value) + } + + pub fn contains_key(&self, key: &K) -> bool { + self.read().contains_key(key) + } + + +} \ No newline at end of file diff --git a/utils/src/config_helper.rs b/utils/src/config_helper.rs new file mode 100644 index 0000000000..87c1c590fa --- /dev/null +++ b/utils/src/config_helper.rs @@ -0,0 +1,50 @@ +//! +//! # Config helper +//! +use std::path::PathBuf; + +use types::defaults::CONFIG_FILE_EXTENTION; +use types::defaults::{SERVER_CONFIG_BASE_PATH, SERVER_CONFIG_DIR}; + +/// generate server configuration file +pub fn build_server_config_file_path(file_name: &'static str) -> PathBuf { + let mut config_file_path = PathBuf::new(); + + // stitch-up default configuration file path + config_file_path.push(SERVER_CONFIG_BASE_PATH); + config_file_path.push(SERVER_CONFIG_DIR); + config_file_path.push(file_name); + config_file_path.set_extension(CONFIG_FILE_EXTENTION); + + config_file_path +} + +// +// Unit Tests +// + +#[cfg(test)] +pub mod test { + use super::*; + use types::defaults::{SC_CONFIG_FILE, SPU_CONFIG_FILE}; + + #[test] + fn test_build_sc_server_config_file_path() { + let sc_server_file_path = build_server_config_file_path(SC_CONFIG_FILE); + + let mut expected_file_path = PathBuf::new(); + expected_file_path.push("/etc/fluvio/sc_server.toml"); + + assert_eq!(sc_server_file_path, expected_file_path); + } + + #[test] + fn test_build_spu_server_config_file_path() { + let spu_server_file_path = build_server_config_file_path(SPU_CONFIG_FILE); + + let mut expected_file_path = PathBuf::new(); + expected_file_path.push("/etc/fluvio/spu_server.toml"); + + assert_eq!(spu_server_file_path, expected_file_path); + } +} diff --git a/utils/src/counters/counter_tbl.rs b/utils/src/counters/counter_tbl.rs new file mode 100644 index 0000000000..8b27992f74 --- /dev/null +++ b/utils/src/counters/counter_tbl.rs @@ -0,0 +1,263 @@ +//! +//! # Counter Table +//! +//! Stores counters in table costruct. Each table has a header and rows with counters for each column. +//! +use std::cmp; +use std::collections::BTreeMap; +use std::sync::RwLock; + +#[derive(Debug)] +pub struct CounterTable { + pub columns: BTreeMap, + pub rows: RwLock>>, +} + +#[derive(Debug, PartialEq)] +pub struct Column { + label: &'static str, + internal: bool, +} + +impl ::std::default::Default for CounterTable +where + C: Ord, + T: Ord, +{ + fn default() -> Self { + Self { + columns: BTreeMap::new(), + rows: RwLock::new(BTreeMap::new()), + } + } +} + +impl ::std::cmp::PartialEq for CounterTable +where + C: PartialEq, + T: PartialEq, +{ + fn eq(&self, other: &CounterTable) -> bool { + // compare columns + if self.columns != other.columns { + return false; + } + + // compare counters + let local_rows = self.rows.read().unwrap(); + let other_rows = other.rows.read().unwrap(); + if *local_rows != *other_rows { + return false; + } + true + } +} + +impl CounterTable +where + C: Ord + Clone, + T: Ord, +{ + /// builder pattern to add columns + pub fn with_columns(mut self, columns: Vec<(C, &'static str, bool)>) -> Self { + for (column_id, label, internal) in columns { + self.columns.insert(column_id, Column { label, internal }); + } + self + } + + /// create a row and add counter columns + pub fn add_row(&self, row_id: T) { + // add one counter per column + let mut column_counters = BTreeMap::new(); + for column_id in self.columns.keys() { + column_counters.insert(column_id.clone(), 0); + } + + // add counters to row + self.rows.write().unwrap().insert(row_id, column_counters); + } + + /// remove counter row + pub fn remove_row(&self, row_id: &T) { + self.rows.write().unwrap().remove(row_id); + } + + /// number of rows + pub fn row_count(&self) -> usize { + self.rows.read().unwrap().len() + } + + /// increment counter + pub fn inc_counter(&self, row_id: &T, column_id: C) { + if let Some(row) = self.rows.write().unwrap().get_mut(row_id) { + if let Some(counter) = row.get_mut(&column_id) { + *counter += 1; + } + } + } + + /// set counter + pub fn set_counter(&self, row_id: &T, column_id: C, val: u32) { + if let Some(row) = self.rows.write().unwrap().get_mut(row_id) { + if let Some(counter) = row.get_mut(&column_id) { + *counter = val; + } + } + } + + /// reset all counters + pub fn reset_counters(&self) { + for row in self.rows.write().unwrap().values_mut() { + for counter in row.values_mut() { + *counter = 0; + } + } + } + + /// column headers in string format (center justified, min 10 spaces) + pub fn header_fmt(&self) -> String { + let mut res = String::new(); + + // accumulate labels + for column in self.columns.values() { + res.push_str(&format!("{:^10}", column.label)); + res.push_str(" "); + } + + // remove last 2 spaces + if res.len() > 2 { + res.truncate(res.len() - 2); + } + + res + } + + /// format values in string format (center justified to column header - min 10 spaces) + pub fn values_fmt(&self, row_id: T) -> String { + let mut res = String::new(); + if let Some(row) = self.rows.write().unwrap().get_mut(&row_id) { + // accumulate labels + for (column_id, counter) in row { + let value_str = counter.to_string(); + let column_label = if let Some(column) = self.columns.get(column_id) { + column.label.clone() + } else { + "" + }; + let space_width = cmp::max(column_label.len(), 10); + let value_width = value_str.len(); + + // center justify... need to compute our own adding (as Rust formatter requires literal) + let (pad_left, pad_right) = if value_width > space_width { + (0, 0) + } else { + let pad_left = (space_width - value_width) / 2; + let pad_right = space_width - pad_left - value_width; + (pad_left, pad_right) + }; + + res.push_str(&(0..pad_left).map(|_| " ").collect::()); + res.push_str(&value_str); + res.push_str(&(0..pad_right).map(|_| " ").collect::()); + res.push_str(" "); + } + + // remove last 2 spaces + if res.len() > 2 { + res.truncate(res.len() - 2); + } + } + + res + } +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + + #[derive(Debug, PartialEq, PartialOrd, Eq, Ord, Clone)] + enum TestCntr { + Ok = 0, + Failed = 1, + Retry = 2, + Shutdown = 3, + InternalErr = 4, + } + + fn generate_counters() -> Vec<(TestCntr, &'static str, bool)> { + vec![ + (TestCntr::Ok, "CONN-OK", false), + (TestCntr::Failed, "CONN-FAILED", false), + (TestCntr::Retry, "CONN-RETRY", false), + (TestCntr::Shutdown, "CONN-SHUTDOWN", false), + (TestCntr::InternalErr, "INTERNAL-ERR", true), + ] + } + + #[test] + fn test_counter_table_all() { + let counter_tbl = CounterTable::default().with_columns(generate_counters()); + + // test generation + assert_eq!(counter_tbl.columns.len(), 5); + + // test add rows + let (row0, row1): (i32, i32) = (0, 1); + counter_tbl.add_row(row0); + counter_tbl.add_row(row1); + assert_eq!(counter_tbl.row_count(), 2); + + // test header formatter + let header = counter_tbl.header_fmt(); + let expected_header = + " CONN-OK CONN-FAILED CONN-RETRY CONN-SHUTDOWN INTERNAL-ERR".to_owned(); + assert_eq!(header, expected_header); + + // test increment & value formatter + counter_tbl.set_counter(&row0, TestCntr::Ok, 4294967290); + counter_tbl.inc_counter(&row0, TestCntr::Failed); + counter_tbl.set_counter(&row0, TestCntr::Retry, 4199999999); + let values_r1 = counter_tbl.values_fmt(row0); + let expected_values_r1 = + "4294967290 1 4199999999 0 0 ".to_owned(); + assert_eq!(values_r1, expected_values_r1); + + counter_tbl.inc_counter(&row1, TestCntr::Shutdown); + counter_tbl.inc_counter(&row1, TestCntr::Shutdown); + let values_r2 = counter_tbl.values_fmt(row1); + let expected_values_r2 = + " 0 0 0 2 0 ".to_owned(); + assert_eq!(values_r2, expected_values_r2); + + // test equality + let expected_counter_tbl = CounterTable::default().with_columns(generate_counters()); + expected_counter_tbl.add_row(row0); + expected_counter_tbl.add_row(row1); + expected_counter_tbl.set_counter(&row0, TestCntr::Ok, 4294967290); + expected_counter_tbl.set_counter(&row0, TestCntr::Failed, 1); + expected_counter_tbl.set_counter(&row0, TestCntr::Retry, 4199999999); + expected_counter_tbl.set_counter(&row1, TestCntr::Shutdown, 2); + assert_eq!(counter_tbl, expected_counter_tbl); + + // test reset + counter_tbl.reset_counters(); + let expected_reset_tbl = CounterTable::default().with_columns(generate_counters()); + expected_reset_tbl.add_row(row0); + expected_reset_tbl.add_row(row1); + assert_eq!(counter_tbl, expected_reset_tbl); + assert_eq!(counter_tbl.row_count(), 2); + + // teest remoev row + counter_tbl.remove_row(&row0); + let expected_one_row_tbl = CounterTable::default().with_columns(generate_counters()); + expected_one_row_tbl.add_row(row1); + assert_eq!(counter_tbl, expected_one_row_tbl); + assert_eq!(counter_tbl.row_count(), 1); + } +} diff --git a/utils/src/counters/counters.rs b/utils/src/counters/counters.rs new file mode 100644 index 0000000000..c50cfa1be4 --- /dev/null +++ b/utils/src/counters/counters.rs @@ -0,0 +1,190 @@ +//! +//! # Counters +//! +//! Counters object definition and functionality +//! +use std::cmp; +use std::collections::BTreeMap; + +#[derive(Debug, PartialEq)] +pub struct Counters { + pub list: BTreeMap, +} + +#[derive(Debug, PartialEq)] +pub struct Counter { + label: &'static str, + internal: bool, + value: u32, +} + +impl ::std::default::Default for Counters +where + T: Ord, +{ + fn default() -> Self { + Self { + list: BTreeMap::new(), + } + } +} + +impl Counters +where + T: Ord, +{ + /// build counter from array of tuples + pub fn new(items: Vec<(T, &'static str, bool)>) -> Self { + let mut counters = Counters::default(); + for (id, label, internal) in items { + counters.list.insert( + id, + Counter { + label, + internal, + value: 0, + }, + ); + } + counters + } + + /// increment counter + pub fn inc_counter(&mut self, id: T) { + if let Some(counter) = self.list.get_mut(&id) { + counter.value += 1; + } + } + + /// increment counter + pub fn set_counter(&mut self, id: T, val: u32) { + if let Some(counter) = self.list.get_mut(&id) { + counter.value = val; + } + } + + /// reset counters + pub fn reset(&mut self) { + for counter in self.list.values_mut() { + counter.value = 0; + } + } + + /// counter heders in string format (center justified, min 10 spaces) + pub fn header_fmt(&self) -> String { + let mut res = String::new(); + + // accumulate labels + for counter in self.list.values() { + res.push_str(&format!("{:^10}", counter.label)); + res.push_str(" "); + } + + // remove last 2 spaces + if res.len() > 2 { + res.truncate(res.len() - 2); + } + + res + } + + /// format values in string format (center justified to column header - min 10 spaces) + pub fn values_fmt(&self) -> String { + let mut res = String::new(); + + // accumulate labels + for counter in self.list.values() { + let value_str = counter.value.to_string(); + let space_width = cmp::max(counter.label.len(), 10); + let value_width = value_str.len(); + + // center justify... need to compute our own adding (as Rust formatter requires literal) + let (pad_left, pad_right) = if value_width > space_width { + (0, 0) + } else { + let pad_left = (space_width - value_width) / 2; + let pad_right = space_width - pad_left - value_width; + (pad_left, pad_right) + }; + + res.push_str(&(0..pad_left).map(|_| " ").collect::()); + res.push_str(&value_str); + res.push_str(&(0..pad_right).map(|_| " ").collect::()); + res.push_str(" "); + } + + // remove last 2 spaces + if res.len() > 2 { + res.truncate(res.len() - 2); + } + + res + } +} + +// ----------------------------------- +// Unit Tests +// ----------------------------------- + +#[cfg(test)] +pub mod test { + use super::*; + + #[derive(Debug, PartialEq, PartialOrd, Eq, Ord)] + enum TestCntr { + Ok = 0, + Failed = 1, + Retry = 2, + Shutdown = 3, + InternalErr = 4, + } + + fn generate_counters() -> Vec<(TestCntr, &'static str, bool)> { + vec![ + (TestCntr::Ok, "CONN-OK", false), + (TestCntr::Failed, "CONN-FAILED", false), + (TestCntr::Retry, "CONN-RETRY", false), + (TestCntr::Shutdown, "CONN-SHUTDOWN", false), + (TestCntr::InternalErr, "INTERNAL-ERR", true), + ] + } + + #[test] + fn test_counters_all() { + let mut counters = Counters::new(generate_counters()); + + // test generation + assert_eq!(counters.list.len(), 5); + + // test header formatter + let header = counters.header_fmt(); + let expected_header = + " CONN-OK CONN-FAILED CONN-RETRY CONN-SHUTDOWN INTERNAL-ERR".to_owned(); + assert_eq!(header, expected_header); + + // test increment & value formatter + counters.set_counter(TestCntr::Ok, 4294967290); + counters.inc_counter(TestCntr::Failed); + counters.set_counter(TestCntr::Retry, 4199999999); + counters.inc_counter(TestCntr::Shutdown); + counters.inc_counter(TestCntr::Shutdown); + let values = counters.values_fmt(); + let expected_values = + "4294967290 1 4199999999 2 0 ".to_owned(); + assert_eq!(values, expected_values); + + // test equal + let mut expected_counters = Counters::new(generate_counters()); + expected_counters.set_counter(TestCntr::Ok, 4294967290); + expected_counters.set_counter(TestCntr::Failed, 1); + expected_counters.set_counter(TestCntr::Retry, 4199999999); + expected_counters.set_counter(TestCntr::Shutdown, 2); + assert_eq!(counters, expected_counters); + + // test reset + counters.reset(); + assert_eq!(counters, Counters::new(generate_counters())); + assert_eq!(counters.list.len(), 5); + } + +} diff --git a/utils/src/counters/mod.rs b/utils/src/counters/mod.rs new file mode 100644 index 0000000000..e2bf08dada --- /dev/null +++ b/utils/src/counters/mod.rs @@ -0,0 +1,5 @@ +mod counter_tbl; +mod counters; + +pub use self::counter_tbl::CounterTable; +pub use self::counters::Counters; \ No newline at end of file diff --git a/utils/src/fixture.rs b/utils/src/fixture.rs new file mode 100644 index 0000000000..047b48e761 --- /dev/null +++ b/utils/src/fixture.rs @@ -0,0 +1,31 @@ +use log::info; +use std::fs::remove_dir_all; +use std::fs::remove_file; +use std::fs::create_dir_all; +use std::io; +use std::path::Path; + + +pub fn ensure_clean_dir

(dir_path: P) where P: AsRef { + let path = dir_path.as_ref(); + match remove_dir_all(path) { + Ok(_) => info!("removed dir: {}", path.display()), + Err(_) => info!("unable to delete dir: {}", path.display()), + } +} + +pub fn ensure_new_dir

(path: P) -> Result<(),io::Error> where P: AsRef { + let dir_path = path.as_ref(); + ensure_clean_dir(dir_path); + create_dir_all(dir_path) +} + +// remove existing file +pub fn ensure_clean_file

(path: P) where P: AsRef { + let log_path = path.as_ref(); + if let Ok(_) = remove_file(log_path) { + info!("remove existing file: {}", log_path.display()); + } else { + info!("there was no existing file: {}", log_path.display()); + } +} \ No newline at end of file diff --git a/utils/src/generators.rs b/utils/src/generators.rs new file mode 100644 index 0000000000..9c8134adc0 --- /dev/null +++ b/utils/src/generators.rs @@ -0,0 +1,87 @@ +//! +//! # Utils +//! +//! Utility file to generate random entities +//! + +use rand::prelude::*; +use std::i32; + +/// Generate a random correlation_id (0 to 65535) +pub fn rand_correlation_id() -> i32 { + thread_rng().gen_range(0, 65535) +} + +/// Generate a random client group key (50001 to 65535) +pub fn generate_group_id() -> String { + format!("fluvio-consumer-{}", thread_rng().gen_range(50001, 65535)) +} + +#[allow(dead_code)] +/// Generates a random authorization secret +pub fn generate_secret() -> String { + const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyz\ + 0123456789"; + + const SECRET_SIZE: usize = 16; + let secret: String = (0..SECRET_SIZE) + .map(|_| { + let idx = thread_rng().gen_range(0, CHARSET.len()); + // This is safe because `idx` is in range of `CHARSET` + char::from(unsafe { *CHARSET.get_unchecked(idx) }) + }) + .collect(); + + secret +} + +#[allow(dead_code)] +/// Generates a random authorization token +pub fn generate_auth_token() -> (String, String) { + const CHARSET: &[u8] = b"abcdefghijklmnopqrstuvwxyz\ + 0123456789"; + + const ID_SIZE: usize = 6; + let token_name: String = (0..ID_SIZE) + .map(|_| { + let idx = thread_rng().gen_range(0, CHARSET.len()); + // This is safe because `idx` is in range of `CHARSET` + char::from(unsafe { *CHARSET.get_unchecked(idx) }) + }) + .collect(); + + const SECRET_SIZE: usize = 16; + let token_secret: String = (0..SECRET_SIZE) + .map(|_| { + let idx = thread_rng().gen_range(0, CHARSET.len()); + // This is safe because `idx` is in range of `CHARSET` + char::from(unsafe { *CHARSET.get_unchecked(idx) }) + }) + .collect(); + + (token_name, token_secret) +} + +#[allow(dead_code)] +/// Generate a random session id +pub fn rand_session_id() -> i32 { + thread_rng().gen_range(1024, i32::MAX) +} + +#[allow(dead_code)] +/// Generates a random key +pub fn generate_random_key() -> String { + const CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\ + abcdefghijklmnopqrstuvwxyz\ + 0123456789)(*&^%$#@!~"; + const SIZE: usize = 32; + let key: String = (0..SIZE) + .map(|_| { + let idx = thread_rng().gen_range(0, CHARSET.len()); + // This is safe because `idx` is in range of `CHARSET` + char::from(unsafe { *CHARSET.get_unchecked(idx) }) + }) + .collect(); + + key +} diff --git a/utils/src/lib.rs b/utils/src/lib.rs new file mode 100644 index 0000000000..2af5bdc5a3 --- /dev/null +++ b/utils/src/lib.rs @@ -0,0 +1,16 @@ +pub mod actions; +pub mod config_helper; +pub mod counters; +pub mod string_helper; +pub mod generators; + +mod logger; +mod concurrent; + +pub use logger::init_logger; + +#[cfg(feature = "fixture")] +pub mod fixture; + +pub use concurrent::SimpleConcurrentHashMap; +pub use concurrent::SimpleConcurrentBTreeMap; \ No newline at end of file diff --git a/utils/src/logger.rs b/utils/src/logger.rs new file mode 100644 index 0000000000..db5009cace --- /dev/null +++ b/utils/src/logger.rs @@ -0,0 +1,51 @@ +use chrono::Local; +use env_logger::{ + fmt::{Color, Style, StyledValue}, + Builder, +}; +use log::Level; +use std::sync::atomic::{AtomicUsize, Ordering}; + +static MAX_MODULE_WIDTH: AtomicUsize = AtomicUsize::new(0); + +fn colored_level<'a>(style: &'a mut Style, level: Level) -> StyledValue<'a, &'static str> { + match level { + Level::Trace => style.set_color(Color::Magenta).value("TRACE"), + Level::Debug => style.set_color(Color::Blue).value("DEBUG"), + Level::Info => style.set_color(Color::Green).value("INFO "), + Level::Warn => style.set_color(Color::Yellow).value("WARN "), + Level::Error => style.set_color(Color::Red).value("ERROR"), + } +} + +pub fn init_logger() { + let mut builder = Builder::from_default_env(); + + builder.format(|f, record| { + use std::io::Write; + let target = record.target(); + let mut max_width = MAX_MODULE_WIDTH.load(Ordering::Relaxed); + if max_width < target.len() { + MAX_MODULE_WIDTH.store(target.len(), Ordering::Relaxed); + max_width = target.len(); + } + + let mut style = f.style(); + let level = colored_level(&mut style, record.level()); + let mut style = f.style(); + let target = style + .set_bold(true) + .value(format!("{: {}", + //Local::now().format("%Y-%m-%d %H:%M:%S%.3f"), + Local::now().format("%H:%M:%S%.3f"), + level, + target, + record.args(), + ) + }); + + let _ = builder.try_init(); +} diff --git a/utils/src/string_helper.rs b/utils/src/string_helper.rs new file mode 100644 index 0000000000..9ecc0db671 --- /dev/null +++ b/utils/src/string_helper.rs @@ -0,0 +1,64 @@ +//! +//! # String helpers +//! + +/// Convert string in UpperCammelCase to senteces +pub fn upper_cammel_case_to_sentence(src: String, remove_first: bool) -> String { + // convert "ThisIsATest" to "this is a test" + let mut letters: Vec<_> = vec![]; + for c in src.chars() { + if c.is_uppercase() { + if !letters.is_empty() { + letters.push(' '); + } + if let Some(lowercase) = c.to_lowercase().to_string().chars().next() { + letters.push(lowercase); + } + } else { + letters.push(c); + } + } + let sentence: String = letters.into_iter().collect(); + + // remove first word "this is a test" to "is a test" + if remove_first { + let mut words: Vec<_> = sentence.split_whitespace().collect(); + words.drain(0..1); + words.join(" ") + } else { + sentence + } +} + +// +// Unit Tests +// + +#[cfg(test)] +pub mod test { + use super::*; + + #[test] + fn test_upper_cammel_case_to_sentence() { + assert_eq!( + upper_cammel_case_to_sentence("ThisIsATest".to_owned(), false), + "this is a test" + ); + + assert_eq!( + upper_cammel_case_to_sentence("ThisIsATest".to_owned(), true), + "is a test" + ); + + assert_eq!( + upper_cammel_case_to_sentence("TopicAlreadyExists".to_owned(), true), + "already exists" + ); + + assert_eq!( + upper_cammel_case_to_sentence("UnknownTopicOrPartition".to_owned(), false), + "unknown topic or partition" + ); + + } +} \ No newline at end of file