diff --git a/CHANGELOG.md b/CHANGELOG.md index df388bfb71b..4058dac6306 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,48 @@ This file documents all notable changes to Falco. The release numbering uses [semantic versioning](http://semver.org). +## v0.13.1 + +Released 2019-01-16 + +## Major Changes + + +## Minor Changes + +* Unbuffer outputs by default. This helps make output readable when used in environments like K8s. [[#494](https://github.com/falcosecurity/falco/pull/494)] + +* Improved documentation for running Falco within K8s and getting K8s Audit Logging to work with Minikube and Falco as a Daemonset within K8s. [[#496](https://github.com/falcosecurity/falco/pull/496)] + +* Fix AWS Permissions for Kubernetes Response Engine [[#465](https://github.com/falcosecurity/falco/pull/465)] + +* Tighten compilation flags to include `-Wextra` and `-Werror` [[#479](https://github.com/falcosecurity/falco/pull/479)] + +* Add `k8s.ns.name` to outputs when `-pk` argument is used [[#472](https://github.com/falcosecurity/falco/pull/472)] + +* Remove kubernetes-response-engine from system:masters [[#488](https://github.com/falcosecurity/falco/pull/488)] + +## Bug Fixes + +* Ensure `-pc`/`-pk` only apply to syscall rules and not k8s_audit rules [[#495](https://github.com/falcosecurity/falco/pull/495)] + +* Fix a potential crash that could occur when using the falco engine and rulesets [[#468](https://github.com/falcosecurity/falco/pull/468)] + +* Fix a regression where format output options were mistakenly removed [[#485](https://github.com/falcosecurity/falco/pull/485)] + +## Rule Changes + +* Fix FPs related to calico and writing files below etc [[#481](https://github.com/falcosecurity/falco/pull/481)] + +* Fix FPs related to `apt-config`/`apt-cache`, `apk` [[#490](https://github.com/falcosecurity/falco/pull/490)] + +* New rules `Launch Package Management Process in Container`, `Netcat Remote Code Execution in Container`, `Lauch Suspicious Network Tool in Container` look for host-level network tools like `netcat`, package management tools like `apt-get`, or network tool binaries being run in a container. [[#490](https://github.com/falcosecurity/falco/pull/490)] + +* Fix the `inbound` and `outbound` macros so they work with sendto/recvfrom/sendmsg/recvmsg. [[#470](https://github.com/falcosecurity/falco/pull/470)] + +* Fix FPs related to prometheus/openshift writing config below /etc. [[#470](https://github.com/falcosecurity/falco/pull/470)] + + ## v0.13.0 Released 2018-11-09 diff --git a/CMakeLists.txt b/CMakeLists.txt index 3c7e965d9d9..662dab91b5a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -19,6 +19,8 @@ cmake_minimum_required(VERSION 2.8.2) project(falco) +option(BUILD_WARNINGS_AS_ERRORS "Enable building with -Wextra -Werror flags") + if(NOT DEFINED FALCO_VERSION) set(FALCO_VERSION "0.1.1dev") endif() @@ -35,8 +37,15 @@ if(NOT DRAIOS_DEBUG_FLAGS) set(DRAIOS_DEBUG_FLAGS "-D_DEBUG") endif() -set(CMAKE_C_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS}") -set(CMAKE_CXX_FLAGS "-Wall -ggdb --std=c++0x ${DRAIOS_FEATURE_FLAGS}") +set(CMAKE_COMMON_FLAGS "-Wall -ggdb ${DRAIOS_FEATURE_FLAGS}") + +if(BUILD_WARNINGS_AS_ERRORS) + set(CMAKE_SUPPRESSED_WARNINGS "-Wno-unused-parameter -Wno-missing-field-initializers -Wno-sign-compare -Wno-type-limits -Wno-implicit-fallthrough -Wno-format-truncation") + set(CMAKE_COMMON_FLAGS "${CMAKE_COMMON_FLAGS} -Wextra -Werror ${CMAKE_SUPPRESSED_WARNINGS}") +endif() + +set(CMAKE_C_FLAGS "${CMAKE_COMMON_FLAGS}") +set(CMAKE_CXX_FLAGS "--std=c++0x ${CMAKE_COMMON_FLAGS}") set(CMAKE_C_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}") set(CMAKE_CXX_FLAGS_DEBUG "${DRAIOS_DEBUG_FLAGS}") diff --git a/README.md b/README.md index d7ebe95fe8a..85878b6565f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ #### Latest release -**v0.13.0** +**v0.13.1** Read the [change log](https://github.com/falcosecurity/falco/blob/dev/CHANGELOG.md) Dev Branch: [![Build Status](https://travis-ci.org/falcosecurity/falco.svg?branch=dev)](https://travis-ci.org/falcosecurity/falco)
diff --git a/examples/k8s_audit_config/README.md b/examples/k8s_audit_config/README.md index 481a590f672..709ce2cac67 100644 --- a/examples/k8s_audit_config/README.md +++ b/examples/k8s_audit_config/README.md @@ -3,21 +3,36 @@ The files in this directory can be used to configure k8s audit logging. The relevant files are: * [audit-policy.yaml](./audit-policy.yaml): The k8s audit log configuration we used to create the rules in [k8s_audit_rules.yaml](../../rules/k8s_audit_rules.yaml). You may find it useful as a reference when creating your own K8s Audit Log configuration. -* [webhook-config.yaml](./webhook-config.yaml): A webhook configuration that sends audit events to localhost, port 8765. You may find it useful as a starting point when deciding how to route audit events to the embedded webserver within falco. +* [webhook-config.yaml.in](./webhook-config.yaml.in): A (templated) webhook configuration that sends audit events to an ip associated with the falco service, port 8765. It is templated in that the *actual* ip is defined in an environment variable `FALCO_SERVICE_CLUSTERIP`, which can be plugged in using a program like `envsubst`. You may find it useful as a starting point when deciding how to route audit events to the embedded webserver within falco. -This file is only needed when using Minikube, which doesn't currently +These files are only needed when using Minikube, which doesn't currently have the ability to provide an audit config/webhook config directly from the minikube commandline. See [this issue](https://github.com/kubernetes/minikube/issues/2741) for more details. * [apiserver-config.patch.sh](./apiserver-config.patch.sh): A script that changes the configuration file `/etc/kubernetes/manifests/kube-apiserver.yaml` to add necessary config options and mounts for the kube-apiserver container that runs within the minikube vm. -A way to use these files with minikube to enable audit logging would be to run the following commands, from this directory: +A way to use these files with minikube to run falco and enable audit logging would be the following: + +#### Start Minikube with Audit Logging Enabled + +Run the following to start minikube with Audit Logging Enabled: ``` minikube start --kubernetes-version v1.11.0 --mount --mount-string $PWD:/tmp/k8s_audit_config --feature-gates AdvancedAuditing=true +``` + +#### Create a Falco DaemonSet and Supporting Accounts/Services + +Follow the [K8s Using Daemonset](../../integrations/k8s-using-daemonset/README.md) instructions to create a falco service account, service, configmap, and daemonset. + +#### Configure Audit Logging with a Policy and Webhook + +Run the following commands to fill in the template file with the ClusterIP ip address you created with the `falco-service` service above, and configure audit logging to use a policy and webhook that directs the right events to the falco daemonset. Although services like `falco-service.default.svc.cluster.local` can not be resolved from the kube-apiserver container within the minikube vm (they're run as pods but not *really* a part of the cluster), the ClusterIPs associated with those services are routable. + +``` +FALCO_SERVICE_CLUSTERIP=$(kubectl get service falco-service -o=jsonpath={.spec.clusterIP}) envsubst < webhook-config.yaml.in > webhook-config.yaml ssh -i $(minikube ssh-key) docker@$(minikube ip) sudo bash /tmp/k8s_audit_config/apiserver-config.patch.sh -ssh -i $(minikube ssh-key) -R 8765:localhost:8765 docker@$(minikube ip) ``` -K8s audit events will then be sent to localhost on the host (not minikube vm) machine, port 8765. +K8s audit events will then be routed to the falco daemonset within the cluster, which you can observe via `kubectl logs -f $(kubectl get pods -l app=falco-example -o jsonpath={.items[0].metadata.name})`. diff --git a/examples/k8s_audit_config/webhook-config.yaml b/examples/k8s_audit_config/webhook-config.yaml.in similarity index 77% rename from examples/k8s_audit_config/webhook-config.yaml rename to examples/k8s_audit_config/webhook-config.yaml.in index f188dbdb5d5..3ace6a964bd 100644 --- a/examples/k8s_audit_config/webhook-config.yaml +++ b/examples/k8s_audit_config/webhook-config.yaml.in @@ -3,7 +3,7 @@ kind: Config clusters: - name: falco cluster: - server: http://127.0.0.1:8765/k8s_audit + server: http://$FALCO_SERVICE_CLUSTERIP:8765/k8s_audit contexts: - context: cluster: falco diff --git a/falco.yaml b/falco.yaml index 0ed9202b994..986f6de21e1 100644 --- a/falco.yaml +++ b/falco.yaml @@ -60,8 +60,8 @@ log_level: info priority: debug # Whether or not output to any of the output channels below is -# buffered. Defaults to true -buffered_outputs: true +# buffered. Defaults to false +buffered_outputs: false # A throttling mechanism implemented as a token bucket limits the # rate of falco notifications. This throttling is controlled by the following configuration diff --git a/integrations/k8s-using-daemonset/README.md b/integrations/k8s-using-daemonset/README.md index e55fbd9ac29..e224fa7367c 100644 --- a/integrations/k8s-using-daemonset/README.md +++ b/integrations/k8s-using-daemonset/README.md @@ -4,7 +4,7 @@ This directory gives you the required YAML files to stand up Sysdig Falco on Kub The two options are provided to deploy a Daemon Set: - `k8s-with-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes with RBAC enabled. -- `k8s-without-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes without RBAC enabled. +- `k8s-without-rbac` - This directory provides a definition to deploy a Daemon Set on Kubernetes without RBAC enabled. **This method is deprecated in favor of RBAC-based installs, and won't be updated going forward.** Also provided: - `falco-event-generator-deployment.yaml` - A Kubernetes Deployment to generate sample events. This is useful for testing, but note it will generate a large number of events. @@ -21,11 +21,20 @@ clusterrolebinding "falco-cluster-role-binding" created k8s-using-daemonset$ ``` +We also create a service that allows other services to reach the embedded webserver in falco, which listens on https port 8765: + +``` +k8s-using-daemonset$ kubectl create -f k8s-with-rbac/falco-service.yaml +service/falco-service created +k8s-using-daemonset$ +``` + The Daemon Set also relies on a Kubernetes ConfigMap to store the Falco configuration and make the configuration available to the Falco Pods. This allows you to manage custom configuration without rebuilding and redeploying the underlying Pods. In order to create the ConfigMap you'll need to first need to copy the required configuration from their location in this GitHub repo to the `k8s-with-rbac/falco-config/` directory. Any modification of the configuration should be performed on these copies rather than the original files. ``` k8s-using-daemonset$ cp ../../falco.yaml k8s-with-rbac/falco-config/ k8s-using-daemonset$ cp ../../rules/falco_rules.* k8s-with-rbac/falco-config/ +k8s-using-daemonset$ cp ../../rules/k8s_audit_rules.yaml k8s-with-rbac/falco-config/ ``` If you want to send Falco alerts to a Slack channel, you'll want to modify the `falco.yaml` file to point to your Slack webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Add the below to the bottom of the `falco.yaml` config file you just copied to enable Slack messages. @@ -54,7 +63,7 @@ k8s-using-daemonset$ ``` -## Deploying to Kubernetes without RBAC enabled +## Deploying to Kubernetes without RBAC enabled (**Deprecated**) If you are running Kubernetes with Legacy Authorization enabled, you can use `kubectl` to deploy the Daemon Set provided in the `k8s-without-rbac` directory. The example provides the ability to post messages to a Slack channel via a webhook. For more information on getting a webhook URL for your Slack team, refer to the [Slack documentation](https://api.slack.com/incoming-webhooks). Modify the [`args`](https://github.com/draios/falco/blob/dev/examples/k8s-using-daemonset/falco-daemonset.yaml#L21) passed to the Falco container to point to the appropriate URL for your webhook. diff --git a/integrations/k8s-using-daemonset/k8s-with-rbac/falco-account.yaml b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-account.yaml index 9d611519522..b3968a79e34 100644 --- a/integrations/k8s-using-daemonset/k8s-with-rbac/falco-account.yaml +++ b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-account.yaml @@ -2,11 +2,17 @@ apiVersion: v1 kind: ServiceAccount metadata: name: falco-account + labels: + app: falco-example + role: security --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: falco-cluster-role + labels: + app: falco-example + role: security rules: - apiGroups: ["extensions",""] resources: ["nodes","namespaces","pods","replicationcontrollers","services","events","configmaps"] @@ -19,6 +25,9 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: falco-cluster-role-binding namespace: default + labels: + app: falco-example + role: security subjects: - kind: ServiceAccount name: falco-account diff --git a/integrations/k8s-using-daemonset/k8s-with-rbac/falco-daemonset-configmap.yaml b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-daemonset-configmap.yaml index 406b7892649..b88a8fe56b1 100644 --- a/integrations/k8s-using-daemonset/k8s-with-rbac/falco-daemonset-configmap.yaml +++ b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-daemonset-configmap.yaml @@ -1,16 +1,15 @@ apiVersion: extensions/v1beta1 kind: DaemonSet metadata: - name: falco + name: falco-daemonset labels: - name: falco-daemonset - app: demo + app: falco-example + role: security spec: template: metadata: labels: - name: falco - app: demo + app: falco-example role: security spec: serviceAccount: falco-account diff --git a/integrations/k8s-using-daemonset/k8s-with-rbac/falco-service.yaml b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-service.yaml new file mode 100644 index 00000000000..3ed22658de5 --- /dev/null +++ b/integrations/k8s-using-daemonset/k8s-with-rbac/falco-service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: falco-service + labels: + app: falco-example + role: security +spec: + selector: + app: falco-example + ports: + - protocol: TCP + port: 8765 diff --git a/integrations/kubernetes-response-engine/deployment/aws/.gitignore b/integrations/kubernetes-response-engine/deployment/aws/.gitignore index e95d270e26b..5b70b6d365d 100644 --- a/integrations/kubernetes-response-engine/deployment/aws/.gitignore +++ b/integrations/kubernetes-response-engine/deployment/aws/.gitignore @@ -1,4 +1,4 @@ .terraform/* .terraform.* terraform.* -*.yaml +aws-auth-patch.yml diff --git a/integrations/kubernetes-response-engine/deployment/aws/Makefile b/integrations/kubernetes-response-engine/deployment/aws/Makefile index ff640c9ccc8..fc7c14a6f7e 100644 --- a/integrations/kubernetes-response-engine/deployment/aws/Makefile +++ b/integrations/kubernetes-response-engine/deployment/aws/Makefile @@ -1,11 +1,17 @@ -all: create configure +deploy: rbac create configure + +rbac: + kubectl apply -f cluster-role.yaml + kubectl apply -f cluster-role-binding.yaml create: - terraform apply + terraform apply -auto-approve configure: kubectl get -n kube-system configmap/aws-auth -o yaml | awk "/mapRoles: \|/{print;print \"$(shell terraform output patch_for_aws_auth)\";next}1" > aws-auth-patch.yml kubectl -n kube-system replace -f aws-auth-patch.yml clean: - terraform destroy + terraform destroy -force + kubectl delete -f cluster-role-binding.yaml + kubectl delete -f cluster-role.yaml diff --git a/integrations/kubernetes-response-engine/deployment/aws/cluster-role-binding.yaml b/integrations/kubernetes-response-engine/deployment/aws/cluster-role-binding.yaml new file mode 100644 index 00000000000..5b264a23ec2 --- /dev/null +++ b/integrations/kubernetes-response-engine/deployment/aws/cluster-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-response-engine-cluster-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-response-engine-cluster-role +subjects: +- kind: User + apiGroup: rbac.authorization.k8s.io + name: kubernetes-response-engine diff --git a/integrations/kubernetes-response-engine/deployment/aws/cluster-role.yaml b/integrations/kubernetes-response-engine/deployment/aws/cluster-role.yaml new file mode 100644 index 00000000000..4c76c26b995 --- /dev/null +++ b/integrations/kubernetes-response-engine/deployment/aws/cluster-role.yaml @@ -0,0 +1,25 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kubernetes-response-engine-cluster-role +rules: + - apiGroups: + - "" + resources: + - pods + verbs: + - delete + - list + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - patch + - apiGroups: + - batch + resources: + - jobs + verbs: + - create diff --git a/integrations/kubernetes-response-engine/deployment/aws/lambda.tf b/integrations/kubernetes-response-engine/deployment/aws/lambda.tf index 375a2ef3eba..cc2f724cc3a 100644 --- a/integrations/kubernetes-response-engine/deployment/aws/lambda.tf +++ b/integrations/kubernetes-response-engine/deployment/aws/lambda.tf @@ -1,3 +1,7 @@ +resource "aws_iam_user" "kubernetes-response-engine-user" { + name = "kubernetes_response_engine" +} + resource "aws_iam_role" "iam-for-lambda" { name = "iam_for_lambda" @@ -9,7 +13,7 @@ resource "aws_iam_role" "iam-for-lambda" { "Action": "sts:AssumeRole", "Principal": { "Service": "lambda.amazonaws.com", - "AWS": "${var.iam-user-arn}" + "AWS": "${aws_iam_user.kubernetes-response-engine-user.arn}" }, "Effect": "Allow", "Sid": "" diff --git a/integrations/kubernetes-response-engine/deployment/aws/outputs.tf b/integrations/kubernetes-response-engine/deployment/aws/outputs.tf index c793c2d2e7b..0423215785b 100644 --- a/integrations/kubernetes-response-engine/deployment/aws/outputs.tf +++ b/integrations/kubernetes-response-engine/deployment/aws/outputs.tf @@ -1,9 +1,7 @@ locals { patch_for_aws_auth = < - (((evt.type in (accept,listen) and evt.dir=<)) or + (((evt.type in (accept,listen) and evt.dir=<) or + (evt.type in (recvfrom,recvmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and (fd.typechar = 4 or fd.typechar = 6) and (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and (evt.rawres >= 0 or evt.res = EINPROGRESS)) - macro: outbound condition: > - (((evt.type = connect and evt.dir=<)) or + (((evt.type = connect and evt.dir=<) or + (evt.type in (sendto,sendmsg) and evt.dir=< and + fd.l4proto != tcp and fd.connected=false and fd.name_changed=true)) and (fd.typechar = 4 or fd.typechar = 6) and (fd.ip != "0.0.0.0" and fd.net != "127.0.0.0/8") and (evt.rawres >= 0 or evt.res = EINPROGRESS)) @@ -628,7 +638,7 @@ condition: (veritas_progs and (fd.name startswith /etc/vx or fd.name startswith /etc/opt/VRTS or fd.name startswith /etc/vom)) - macro: nginx_writing_conf - condition: (proc.name=nginx and fd.name startswith /etc/nginx) + condition: (proc.name in (nginx,nginx-ingress-c) and fd.name startswith /etc/nginx) - macro: nginx_writing_certs condition: > @@ -839,7 +849,17 @@ condition: (proc.aname[2] in (dpkg-reconfigur, dpkg-preconfigu)) - macro: ufw_writing_conf - condition: proc.name=ufw and fd.directory=/etc/ufw + condition: (proc.name=ufw and fd.directory=/etc/ufw) + +- macro: calico_writing_conf + condition: > + (proc.name = calico-node and fd.name startswith /etc/calico) + +- macro: prometheus_conf_writing_conf + condition: (proc.name=prometheus-conf and fd.directory=/etc/prometheus/config_out) + +- macro: openshift_writing_conf + condition: (proc.name=oc and fd.name=/etc/origin/node/node.kubeconfig) # Add conditions to this macro (probably in a separate file, # overwriting this macro) to allow for specific combinations of @@ -943,6 +963,9 @@ and not iscsi_writing_conf and not istio_writing_conf and not ufw_writing_conf + and not calico_writing_conf + and not prometheus_conf_writing_conf + and not openshift_writing_conf - rule: Write below etc desc: an attempt to write to any file below /etc @@ -1153,7 +1176,7 @@ as a part of creating a container) by calling setns. condition: > evt.type = setns - and not proc.name in (docker_binaries, k8s_binaries, lxd_binaries, sysdigcloud_binaries, sysdig, nsenter) + and not proc.name in (docker_binaries, k8s_binaries, lxd_binaries, sysdigcloud_binaries, sysdig, nsenter, calico) and not proc.name in (user_known_change_thread_namespace_binaries) and not proc.name startswith "runc:" and not proc.pname in (sysdigcloud_binaries) @@ -1742,6 +1765,46 @@ priority: NOTICE tags: [network, k8s, container] +- list: network_tool_binaries + items: [nc, ncat, nmap] + +- macro: network_tool_procs + condition: proc.name in (network_tool_binaries) + +# Container is supposed to be immutable. Package management should be done in building the image. +- rule: Launch Package Management Process in Container + desc: Package management process ran inside container + condition: > + spawned_process and container and user.name != "_apt" and package_mgmt_procs and not package_mgmt_ancestor_procs + output: > + Package management process launched in container (user=%user.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image) + priority: ERROR + tags: [process] + +- rule: Netcat Remote Code Execution in Container + desc: Netcat Program runs inside container that allows remote code execution + condition: > + spawned_process and container and + ((proc.name = "nc" and (proc.args contains "-e" or proc.args contains "-c")) or + (proc.name = "ncat" and (proc.args contains "--sh-exec" or proc.args contains "--exec")) + ) + output: > + Netcat runs inside container that allows remote code execution (user=%user.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image) + priority: WARNING + tags: [network, process] + +- rule: Lauch Suspicious Network Tool in Container + desc: Detect network tools launched inside container + condition: > + spawned_process and container and network_tool_procs + output: > + Network tool launched in container (user=%user.name + command=%proc.cmdline container_id=%container.id container_name=%container.name image=%container.image) + priority: NOTICE + tags: [network, process] + # Application rules have moved to application_rules.yaml. Please look # there if you want to enable them by adding to # falco_rules.local.yaml. diff --git a/userspace/engine/formats.cpp b/userspace/engine/formats.cpp index cc1c416b395..904e856cb64 100644 --- a/userspace/engine/formats.cpp +++ b/userspace/engine/formats.cpp @@ -152,7 +152,27 @@ int falco_formats::format_event (lua_State *ls) if(s_json_output) { - s_inspector->set_buffer_format(sinsp_evt::PF_JSON); + switch(s_inspector->get_buffer_format()) + { + case sinsp_evt::PF_NORMAL: + s_inspector->set_buffer_format(sinsp_evt::PF_JSON); + break; + case sinsp_evt::PF_EOLS: + s_inspector->set_buffer_format(sinsp_evt::PF_JSONEOLS); + break; + case sinsp_evt::PF_HEX: + s_inspector->set_buffer_format(sinsp_evt::PF_JSONHEX); + break; + case sinsp_evt::PF_HEXASCII: + s_inspector->set_buffer_format(sinsp_evt::PF_JSONHEXASCII); + break; + case sinsp_evt::PF_BASE64: + s_inspector->set_buffer_format(sinsp_evt::PF_JSONBASE64); + break; + default: + // do nothing + break; + } s_formatters->tostring((sinsp_evt *) evt, sformat, &json_line); // The formatted string might have a leading newline. If it does, remove it. @@ -160,8 +180,6 @@ int falco_formats::format_event (lua_State *ls) { json_line.erase(0, 1); } - - s_inspector->set_buffer_format(sinsp_evt::PF_NORMAL); } } catch (sinsp_exception& e) diff --git a/userspace/engine/lua/rule_loader.lua b/userspace/engine/lua/rule_loader.lua index 9fb96fd00a1..8c81d62b9a2 100644 --- a/userspace/engine/lua/rule_loader.lua +++ b/userspace/engine/lua/rule_loader.lua @@ -493,24 +493,26 @@ function load_rules(sinsp_lua_parser, -- If the format string contains %container.info, replace it -- with extra. Otherwise, add extra onto the end of the format -- string. - if string.find(v['output'], "%container.info", nil, true) ~= nil then - - -- There may not be any extra, or we're not supposed - -- to replace it, in which case we use the generic - -- "%container.name (id=%container.id)" - if replace_container_info == false then - v['output'] = string.gsub(v['output'], "%%container.info", "%%container.name (id=%%container.id)") + if v['source'] == "syscall" then + if string.find(v['output'], "%container.info", nil, true) ~= nil then + + -- There may not be any extra, or we're not supposed + -- to replace it, in which case we use the generic + -- "%container.name (id=%container.id)" + if replace_container_info == false then + v['output'] = string.gsub(v['output'], "%%container.info", "%%container.name (id=%%container.id)") + if extra ~= "" then + v['output'] = v['output'].." "..extra + end + else + safe_extra = string.gsub(extra, "%%", "%%%%") + v['output'] = string.gsub(v['output'], "%%container.info", safe_extra) + end + else + -- Just add the extra to the end if extra ~= "" then v['output'] = v['output'].." "..extra end - else - safe_extra = string.gsub(extra, "%%", "%%%%") - v['output'] = string.gsub(v['output'], "%%container.info", safe_extra) - end - else - -- Just add the extra to the end - if extra ~= "" then - v['output'] = v['output'].." "..extra end end diff --git a/userspace/engine/ruleset.cpp b/userspace/engine/ruleset.cpp index a43ffb33f22..27cb0a42a88 100644 --- a/userspace/engine/ruleset.cpp +++ b/userspace/engine/ruleset.cpp @@ -234,6 +234,11 @@ bool falco_ruleset::run(gen_event *evt, uint32_t etag, uint16_t ruleset) void falco_ruleset::event_tags_for_ruleset(vector &evttypes, uint16_t ruleset) { + if(m_rulesets.size() < (size_t) ruleset + 1) + { + return; + } + return m_rulesets[ruleset]->event_tags_for_ruleset(evttypes); } @@ -314,7 +319,7 @@ void falco_sinsp_ruleset::evttypes_for_ruleset(vector &evttypes, uint16_t { uint32_t etag = evttype_to_event_tag(etype); - if(event_tags[etag]) + if(etag < event_tags.size() && event_tags[etag]) { evttypes[etype] = true; } @@ -333,7 +338,7 @@ void falco_sinsp_ruleset::syscalls_for_ruleset(vector &syscalls, uint16_t { uint32_t etag = evttype_to_event_tag(syscallid); - if(event_tags[etag]) + if(etag < event_tags.size() && event_tags[etag]) { syscalls[syscallid] = true; } diff --git a/userspace/falco/configuration.cpp b/userspace/falco/configuration.cpp index 5b7706207a0..1894946cee8 100644 --- a/userspace/falco/configuration.cpp +++ b/userspace/falco/configuration.cpp @@ -30,7 +30,7 @@ limitations under the License. using namespace std; falco_configuration::falco_configuration() - : m_buffered_outputs(true), + : m_buffered_outputs(false), m_webserver_enabled(false), m_webserver_listen_port(8765), m_webserver_k8s_audit_endpoint("/k8s_audit"), @@ -155,7 +155,7 @@ void falco_configuration::init(string conf_filename, list &cmdline_optio } m_min_priority = (falco_common::priority_type) (it - falco_common::priority_names.begin()); - m_buffered_outputs = m_config->get_scalar("buffered_outputs", true); + m_buffered_outputs = m_config->get_scalar("buffered_outputs", false); falco_logger::log_stderr = m_config->get_scalar("log_stderr", false); falco_logger::log_syslog = m_config->get_scalar("log_syslog", true); diff --git a/userspace/falco/falco.cpp b/userspace/falco/falco.cpp index b9e26a5e928..ddeb85f7d78 100644 --- a/userspace/falco/falco.cpp +++ b/userspace/falco/falco.cpp @@ -458,6 +458,7 @@ int falco_init(int argc, char **argv) static struct option long_options[] = { {"help", no_argument, 0, 'h' }, + {"print-base64", no_argument, 0, 'b'}, {"daemon", no_argument, 0, 'd' }, {"k8s-api", required_argument, 0, 'k'}, {"k8s-api-cert", required_argument, 0, 'K' }, @@ -488,7 +489,7 @@ int falco_init(int argc, char **argv) // Parse the args // while((op = getopt_long(argc, argv, - "hc:AdD:e:ik:K:Ll:m:M:o:P:p:r:s:T:t:UvV:w:", + "hc:AbdD:e:F:ik:K:Ll:m:M:o:P:p:r:S:s:T:t:UvV:w:", long_options, &long_index)) != -1) { switch(op) @@ -559,7 +560,7 @@ int falco_init(int argc, char **argv) } else if(string(optarg) == "k" || string(optarg) == "kubernetes") { - output_format = "k8s.pod=%k8s.pod.name container=%container.id"; + output_format = "k8s.ns=%k8s.ns.name k8s.pod=%k8s.pod.name container=%container.id"; replace_container_info = true; } else if(string(optarg) == "m" || string(optarg) == "mesos") diff --git a/userspace/falco/falco_outputs.cpp b/userspace/falco/falco_outputs.cpp index 3ef21d620ef..97341e0b1b8 100644 --- a/userspace/falco/falco_outputs.cpp +++ b/userspace/falco/falco_outputs.cpp @@ -37,19 +37,26 @@ falco_outputs::falco_outputs(falco_engine *engine) falco_outputs::~falco_outputs() { + // Note: The assert()s in this destructor were previously places where + // exceptions were thrown. C++11 doesn't allow destructors to + // emit exceptions; if they're thrown, they'll trigger a call + // to 'terminate()'. To maintain similar behavior, the exceptions + // were replace with calls to 'assert()' if(m_initialized) { lua_getglobal(m_ls, m_lua_output_cleanup.c_str()); if(!lua_isfunction(m_ls, -1)) { - throw falco_exception("No function " + m_lua_output_cleanup + " found. "); + falco_logger::log(LOG_ERR, std::string("No function ") + m_lua_output_cleanup + " found. "); + assert(nullptr == "Missing lua cleanup function in ~falco_outputs"); } if(lua_pcall(m_ls, 0, 0, 0) != 0) { const char* lerr = lua_tostring(m_ls, -1); - throw falco_exception(string(lerr)); + falco_logger::log(LOG_ERR, std::string("lua_pcall failed, err: ") + lerr); + assert(nullptr == "lua_pcall failed in ~falco_outputs"); } } }