From 357d735f03dc49a4ed91f4711bd04d0718e14381 Mon Sep 17 00:00:00 2001 From: SG <13872653+mmguero@users.noreply.github.com> Date: Fri, 10 Jul 2020 12:56:04 -0600 Subject: [PATCH] Merge topic/dockerperms to address issue #137 (#138) (#139) This pull request adds the some new environment variables for Malcolm to address #137 * `PUID` and `PGID` * Docker runs all of its containers as the privileged `root` user by default. For better security, Malcolm immediately drops to non-privileged user accounts for executing internal processes wherever possible. The `PUID` (**p**rocess **u**ser **ID**) and `PGID` (**p**rocess **g**roup **ID**) environment variables allow Malcolm to map internal non-privileged user accounts to a corresponding [user account](https://en.wikipedia.org/wiki/User_identifier) on the host. Additionally, this pull request additionally moves all remaining process that can be run non-privileged to run as non-privileged. Each docker container now has the following in its Dockerfile (this example is from the zeek container, they're all similar but may have different specific values): ``` ARG DEFAULT_UID=1000 ARG DEFAULT_GID=1000 ENV DEFAULT_UID $DEFAULT_UID ENV DEFAULT_GID $DEFAULT_GID ENV PUSER "zeek" ENV PGROUP "zeek" ENV PUSER_PRIV_DROP true ``` The entrypoint of each docker comtainer is now [docker-uid-gid-setup.sh](https://github.com/idaholab/Malcolm/blob/master/shared/bin/docker-uid-gid-setup.sh), which does the following: 1. changes the UID and GID of the default (1000:1000) user to match the PUID:PGID provided 2. finds any files *inside* the docker image owned by those IDs and chown them 3. if required, execs the container command by dropping privileges to the unprivileged user Additionally, control.py (used for start, restart, etc.) will now error out it run as root rather than just running with a bunch of errors. Malcolm should not be run as a root user. --- Dockerfiles/curator.Dockerfile | 36 ++- Dockerfiles/elastalert.Dockerfile | 19 +- Dockerfiles/elasticsearch.Dockerfile | 35 +++ Dockerfiles/file-monitor.Dockerfile | 27 +- Dockerfiles/file-upload.Dockerfile | 37 ++- Dockerfiles/filebeat.Dockerfile | 41 ++- Dockerfiles/freq.Dockerfile | 27 +- Dockerfiles/htadmin.Dockerfile | 16 +- Dockerfiles/kibana.Dockerfile | 34 ++- Dockerfiles/logstash.Dockerfile | 37 ++- Dockerfiles/moloch.Dockerfile | 37 ++- Dockerfiles/name-map-ui.Dockerfile | 45 ++- Dockerfiles/nginx.Dockerfile | 46 ++- Dockerfiles/pcap-capture.Dockerfile | 33 ++- Dockerfiles/pcap-monitor.Dockerfile | 22 +- Dockerfiles/zeek.Dockerfile | 21 +- README.md | 77 ++--- curator/scripts/docker-entrypoint.sh | 7 + docker-compose-standalone.yml | 89 ++++-- docker-compose.yml | 92 ++++-- elastalert/elastalert-start.sh | 2 + file-monitor/supervisord.conf | 17 +- file-upload/docker-entrypoint.sh | 4 +- file-upload/supervisord.conf | 12 +- .../filebeat-watch-zeeklogs-uploads-folder.sh | 5 +- filebeat/supervisord.conf | 20 +- freq-server/supervisord.conf | 11 +- htadmin/supervisord.conf | 12 +- .../kibana-create-moloch-sessions-index.sh | 26 +- kibana/supervisord.conf | 17 +- logstash/supervisord.conf | 7 +- moloch/patch/query_db_fields_pr1463.patch | 264 ------------------ moloch/scripts/initmoloch.sh | 10 +- moloch/scripts/viewer_service.sh | 2 +- moloch/scripts/wise_service.sh | 2 +- moloch/supervisord.conf | 14 +- name-map-ui/config/nginx.conf | 6 +- .../config/supervisor_logstash_ctl.conf | 2 +- name-map-ui/config/supervisord.conf | 34 ++- name-map-ui/scripts/name-map-save-watch.sh | 2 +- nginx/supervisord.conf | 12 +- pcap-capture/scripts/supervisor.sh | 9 +- pcap-capture/supervisord.conf | 13 +- pcap-capture/templates/netsniff.template | 2 +- pcap-capture/templates/tcpdump.template | 2 +- .../scripts/watch-pcap-uploads-folder.sh | 4 +- pcap-monitor/supervisord.conf | 12 +- scripts/control.py | 84 +++++- scripts/install.py | 34 ++- scripts/malcolm_common.py | 19 +- sensor-iso/README.md | 2 +- sensor-iso/docs/Notes.md | 4 +- sensor-iso/moloch/Dockerfile | 2 +- shared/bin/configure-capture.py | 2 +- shared/bin/docker-uid-gid-setup.sh | 51 ++++ zeek/supervisord.conf | 11 +- 56 files changed, 904 insertions(+), 606 deletions(-) create mode 100644 Dockerfiles/elasticsearch.Dockerfile create mode 100755 curator/scripts/docker-entrypoint.sh delete mode 100644 moloch/patch/query_db_fields_pr1463.patch create mode 100755 shared/bin/docker-uid-gid-setup.sh diff --git a/Dockerfiles/curator.Dockerfile b/Dockerfiles/curator.Dockerfile index 05a83b896..3661f8a18 100644 --- a/Dockerfiles/curator.Dockerfile +++ b/Dockerfiles/curator.Dockerfile @@ -10,6 +10,17 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/elastalert' LABEL org.opencontainers.image.description='Malcolm container providing curation for Elasticsearch indices' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "curator" +ENV PGROUP "curator" +ENV PUSER_PRIV_DROP true + +ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm + ARG ES_HOST=elasticsearch ARG ES_PORT=9200 ARG CURATOR_TIMEOUT=120 @@ -44,18 +55,20 @@ ENV CURATOR_SNAPSHOT_REPO $CURATOR_SNAPSHOT_REPO ENV CURATOR_SNAPSHOT_COMPRESSED $CURATOR_SNAPSHOT_COMPRESSED ENV CURATOR_SNAPSHOT_DISABLED $CURATOR_SNAPSHOT_DISABLED -ENV DEBIAN_FRONTEND noninteractive +ENV SUPERCRONIC_URL "https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-amd64" +ENV SUPERCRONIC "supercronic-linux-amd64" +ENV SUPERCRONIC_SHA1SUM "5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85" +ENV SUPERCRONIC_CRONTAB "/etc/crontab" + ENV CURATOR_VERSION "5.8.1" ENV CRON "5 0 * * *" ENV CONFIG_FILE "/config/config_file.yml" ENV ACTION_FILE "/config/action_file.yml" -ENV CURATOR_USER "curator" RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list && \ apt-get update && \ apt-get -y -q install \ build-essential \ - cron \ curl \ procps \ psmisc \ @@ -63,20 +76,27 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list python3-dev \ python3-pip && \ pip3 install elasticsearch-curator==${CURATOR_VERSION} && \ - groupadd --gid 1000 ${CURATOR_USER} && \ - useradd -M --uid 1000 --gid 1000 ${CURATOR_USER} && \ + groupadd --gid ${DEFAULT_GID} ${PUSER} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ apt-get -q -y --purge remove guile-2.2-libs python3-dev build-essential && \ apt-get -q -y autoremove && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* && \ - bash -c 'echo -e "${CRON} su -c \"/usr/local/bin/curator --config ${CONFIG_FILE} ${ACTION_FILE}\" ${CURATOR_USER} >/proc/1/fd/1 2>/proc/1/fd/2\n@reboot su -c \"/usr/local/bin/elastic_search_status.sh -w && /usr/local/bin/register-elasticsearch-snapshot-repo.sh\" ${CURATOR_USER} >/proc/1/fd/1 2>/proc/1/fd/2" | crontab -' + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ + bash -c 'echo -e "${CRON} /usr/local/bin/curator --config ${CONFIG_FILE} ${ACTION_FILE}" > ${SUPERCRONIC_CRONTAB}' -ADD shared/bin/cron_env_deb.sh /usr/local/bin/ +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD shared/bin/elastic_search_status.sh /usr/local/bin/ ADD curator/scripts /usr/local/bin/ ADD curator/config /config/ -CMD ["/usr/local/bin/cron_env_deb.sh"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/local/bin/docker-entrypoint.sh"] # to be populated at build-time: diff --git a/Dockerfiles/elastalert.Dockerfile b/Dockerfiles/elastalert.Dockerfile index 4e98db70e..af77c9367 100644 --- a/Dockerfiles/elastalert.Dockerfile +++ b/Dockerfiles/elastalert.Dockerfile @@ -10,26 +10,35 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/elastalert' LABEL org.opencontainers.image.description='Malcolm container providing an alerting framework for Elasticsearch' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "node" +ENV PGROUP "node" +ENV PUSER_PRIV_DROP true + +ENV TERM xterm USER root RUN apk update && \ - apk add bash curl && \ + apk add bash curl shadow && \ rm -rf /var/cache/apk/* +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD elastalert/elastalert-start.sh /usr/local/bin/ ADD shared/bin/elastic_search_status.sh /usr/local/bin/ RUN chmod +x /usr/local/bin/elastalert-start.sh && \ mkdir -p /opt/elastalert/server_data/tests && \ - chown -R node:node /opt + chown -R ${PUSER}:${PGROUP} /opt VOLUME ["/opt/elastalert/server_data"] -USER node - -ENTRYPOINT ["/usr/local/bin/elastalert-start.sh"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] +CMD ["/usr/local/bin/elastalert-start.sh"] # to be populated at build-time: ARG BUILD_DATE diff --git a/Dockerfiles/elasticsearch.Dockerfile b/Dockerfiles/elasticsearch.Dockerfile new file mode 100644 index 000000000..2f8b7f32c --- /dev/null +++ b/Dockerfiles/elasticsearch.Dockerfile @@ -0,0 +1,35 @@ +FROM docker.elastic.co/elasticsearch/elasticsearch-oss:7.6.2 + +# Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. +LABEL maintainer="malcolm.netsec@gmail.com" +LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' +LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md' +LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.vendor='Idaho National Laboratory' +LABEL org.opencontainers.image.title='malcolmnetsec/elasticsearch-oss' +LABEL org.opencontainers.image.description='Malcolm container providing Elasticsearch (the Apache-licensed variant)' + +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "elasticsearch" +ENV PGROUP "elasticsearch" +ENV PUSER_PRIV_DROP true + +ENV TERM xterm + +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ + +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh", "/usr/local/bin/docker-entrypoint.sh"] + + +# to be populated at build-time: +ARG BUILD_DATE +ARG MALCOLM_VERSION +ARG VCS_REVISION + +LABEL org.opencontainers.image.created=$BUILD_DATE +LABEL org.opencontainers.image.version=$MALCOLM_VERSION +LABEL org.opencontainers.image.revision=$VCS_REVISION diff --git a/Dockerfiles/file-monitor.Dockerfile b/Dockerfiles/file-monitor.Dockerfile index 66ecf6473..1ac169ecd 100644 --- a/Dockerfiles/file-monitor.Dockerfile +++ b/Dockerfiles/file-monitor.Dockerfile @@ -10,8 +10,16 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/file-monitor' LABEL org.opencontainers.image.description='Malcolm container for scanning files extracted by Zeek' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "monitor" +ENV PGROUP "monitor" +ENV PUSER_PRIV_DROP true ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm ARG ZEEK_EXTRACTOR_PATH=/data/zeek/extract_files ARG ZEEK_LOG_DIRECTORY=/data/zeek/logs @@ -74,7 +82,6 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list python3-requests \ python3-zmq && \ pip3 install clamd supervisor && \ - mkdir -p /var/log/supervisor && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages --purge remove python3-dev build-essential && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages autoremove && \ apt-get clean && \ @@ -82,23 +89,25 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list wget -O /var/lib/clamav/main.cvd http://database.clamav.net/main.cvd && \ wget -O /var/lib/clamav/daily.cvd http://database.clamav.net/daily.cvd && \ wget -O /var/lib/clamav/bytecode.cvd http://database.clamav.net/bytecode.cvd && \ - groupadd --gid 1000 monitor && \ - useradd -M --uid 1000 --gid 1000 monitor && \ + groupadd --gid ${DEFAULT_GID} ${PGROUP} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ + usermod -a -G tty ${PUSER} && \ mkdir -p /var/log/clamav /var/lib/clamav && \ - chown -R monitor:monitor /var/log/clamav /var/lib/clamav && \ + chown -R ${PUSER}:${PGROUP} /var/log/clamav /var/lib/clamav && \ chmod -R 750 /var/log/clamav /var/lib/clamav && \ sed -i 's/^Foreground .*$/Foreground true/g' /etc/clamav/clamd.conf && \ - sed -i 's/^User .*$/User monitor/g' /etc/clamav/clamd.conf && \ + sed -i "s/^User .*$/User ${PUSER}/g" /etc/clamav/clamd.conf && \ sed -i "s|^LocalSocket .*$|LocalSocket $CLAMD_SOCKET_FILE|g" /etc/clamav/clamd.conf && \ - sed -i 's/^LocalSocketGroup .*$/LocalSocketGroup monitor/g' /etc/clamav/clamd.conf && \ + sed -i "s/^LocalSocketGroup .*$/LocalSocketGroup ${PGROUP}/g" /etc/clamav/clamd.conf && \ sed -i "s/^MaxFileSize .*$/MaxFileSize $EXTRACTED_FILE_MAX_BYTES/g" /etc/clamav/clamd.conf && \ sed -i "s/^MaxScanSize .*$/MaxScanSize $(echo "$EXTRACTED_FILE_MAX_BYTES * 4" | bc)/g" /etc/clamav/clamd.conf && \ echo "TCPSocket 3310" >> /etc/clamav/clamd.conf && \ if ! [ -z $HTTPProxyServer ]; then echo "HTTPProxyServer $HTTPProxyServer" >> /etc/clamav/freshclam.conf; fi && \ if ! [ -z $HTTPProxyPort ]; then echo "HTTPProxyPort $HTTPProxyPort" >> /etc/clamav/freshclam.conf; fi && \ sed -i 's/^Foreground .*$/Foreground true/g' /etc/clamav/freshclam.conf && \ - sed -i 's/^DatabaseOwner .*$/DatabaseOwner monitor/g' /etc/clamav/freshclam.conf + sed -i "s/^DatabaseOwner .*$/DatabaseOwner ${PUSER}/g" /etc/clamav/freshclam.conf +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD shared/bin/zeek_carve_*.py /usr/local/bin/ ADD shared/bin/malass_client.py /usr/local/bin/ ADD file-monitor/supervisord.conf /etc/supervisord.conf @@ -109,7 +118,9 @@ VOLUME ["/var/lib/clamav"] EXPOSE 3310 -CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] # to be populated at build-time: diff --git a/Dockerfiles/file-upload.Dockerfile b/Dockerfiles/file-upload.Dockerfile index 2279d458b..b7a9b98fc 100644 --- a/Dockerfiles/file-upload.Dockerfile +++ b/Dockerfiles/file-upload.Dockerfile @@ -1,15 +1,8 @@ FROM debian:buster-slim AS build # Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. -LABEL maintainer="malcolm.netsec@gmail.com" -LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' -LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' -LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md' -LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm' -LABEL org.opencontainers.image.vendor='Idaho National Laboratory' -LABEL org.opencontainers.image.title='malcolmnetsec/file-upload' -LABEL org.opencontainers.image.description='Malcolm container providing an interface for uploading PCAP files and Zeek logs for processing' +ENV DEBIAN_FRONTEND noninteractive ARG SITE_NAME="Capture File and Log Archive Upload" @@ -32,11 +25,31 @@ RUN apt-get update && \ FROM debian:buster-slim AS runtime -COPY --from=build /jQuery-File-Upload/ /var/www/upload/ +LABEL maintainer="malcolm.netsec@gmail.com" +LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' +LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md' +LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.vendor='Idaho National Laboratory' +LABEL org.opencontainers.image.title='malcolmnetsec/file-upload' +LABEL org.opencontainers.image.description='Malcolm container providing an interface for uploading PCAP files and Zeek logs for processing' + +ARG DEFAULT_UID=33 +ARG DEFAULT_GID=33 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "www-data" +ENV PGROUP "www-data" +# not dropping privileges globally in this container as required to run SFTP server. this can +# be handled by supervisord instead on an as-needed basis, and/or php-fpm/nginx itself +# will drop privileges to www-data as well. +ENV PUSER_PRIV_DROP false ENV DEBIAN_FRONTEND noninteractive ENV TERM xterm +COPY --from=build /jQuery-File-Upload/ /var/www/upload/ + RUN apt-get update && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages install --no-install-recommends \ wget \ @@ -52,6 +65,7 @@ RUN apt-get update && \ apt-get clean -y -q && \ rm -rf /var/lib/apt/lists/* +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD docs/images/logo/Malcolm_banner.png /var/www/upload/Malcolm_banner.png ADD file-upload/docker-entrypoint.sh /docker-entrypoint.sh ADD file-upload/jquery-file-upload/bootstrap.min.css /var/www/upload/bower_components/bootstrap/dist/css/bootstrap.min.css @@ -73,14 +87,15 @@ RUN mkdir -p /var/run/sshd /var/www/upload/server/php/chroot /run/php && \ chmod 775 /var/www/upload/server/php/chroot/files && \ chmod 755 /var /var/www /var/www/upload /var/www/upload/server /var/www/upload/server/php \ /var/www/upload/server/php/chroot && \ - echo "Put your files into /files. Don't use subdirectories.\nThey cannot be accessed via the web user interface!" \ + echo "Put your files into /files. Don't use subdirectories." \ >/var/www/upload/server/php/chroot/README.txt && \ rm -rf /var/lib/apt/lists/* /var/cache/* /tmp/* /var/tmp/* /var/www/upload/server/php/chroot/files/.gitignore /tmp/sshd_config VOLUME [ "/var/www/upload/server/php/chroot/files" ] EXPOSE 22 80 -ENTRYPOINT [ "/docker-entrypoint.sh" ] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh", "/docker-entrypoint.sh"] + CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf", "-u", "root", "-n"] diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile index eb657945d..7ac032b4a 100644 --- a/Dockerfiles/filebeat.Dockerfile +++ b/Dockerfiles/filebeat.Dockerfile @@ -10,6 +10,19 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/filebeat-oss' LABEL org.opencontainers.image.description='Malcolm container providing Filebeat (the Apache-licensed variant)' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "filebeat" +ENV PGROUP "filebeat" +# not dropping privileges globally: supervisord will take care of it +# on a case-by-case basis so that one script (filebeat-watch-zeeklogs-uploads-folder.sh) +# can chown uploaded files +ENV PUSER_PRIV_DROP false + +ENV TERM xterm + ARG FILEBEAT_LOG_CLEANUP_MINUTES=0 ARG FILEBEAT_ZIP_CLEANUP_MINUTES=0 ARG FILEBEAT_SCAN_FREQUENCY=10s @@ -25,29 +38,39 @@ ARG FILEBEAT_NGINX_LOG_PATH="/data/nginx" ARG NGINX_LOG_ACCESS_AND_ERRORS=false ARG AUTO_TAG=true +ENV SUPERCRONIC_URL "https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-amd64" +ENV SUPERCRONIC "supercronic-linux-amd64" +ENV SUPERCRONIC_SHA1SUM "5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85" +ENV SUPERCRONIC_CRONTAB "/etc/crontab" + USER root RUN yum install -y epel-release && \ - yum update -y && \ - yum install -y cronie inotify-tools file psmisc tar gzip unzip cpio bzip2 lzma xz p7zip p7zip-plugins unar python-setuptools python-pip && \ - yum clean all && \ + yum update -y && \ + yum install -y curl inotify-tools file psmisc tar gzip unzip cpio bzip2 lzma xz p7zip p7zip-plugins unar python-setuptools python-pip && \ + yum clean all && \ + ln -sr /usr/sbin/fuser /bin/fuser && \ easy_install supervisor && \ pip install patool entrypoint2 pyunpack python-magic ordered-set==3.1.1 && \ - ln -sr /usr/sbin/fuser /bin/fuser + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic -ADD shared/bin/cron_env_centos.sh /data/ +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD filebeat/filebeat.yml /usr/share/filebeat/filebeat.yml ADD filebeat/filebeat-nginx.yml /usr/share/filebeat-nginx/filebeat-nginx.yml ADD filebeat/scripts /data/ ADD shared/bin/elastic_search_status.sh /data/ ADD filebeat/supervisord.conf /etc/supervisord.conf -RUN mkdir -p /var/log/supervisor /usr/share/filebeat-nginx/data && \ - chown -R root:filebeat /usr/share/filebeat-nginx && \ +RUN mkdir -p /usr/share/filebeat-nginx/data && \ + chown -R root:${PGROUP} /usr/share/filebeat-nginx && \ cp -a /usr/share/filebeat/module /usr/share/filebeat-nginx/module && \ chmod 750 /usr/share/filebeat-nginx && \ chmod 770 /usr/share/filebeat-nginx/data && \ chmod 755 /data/*.sh /data/*.py && \ - (echo -e "* * * * * su -c /data/filebeat-process-zeek-folder.sh filebeat >/dev/null 2>&1\n*/5 * * * * su -c /data/filebeat-clean-zeeklogs-processed-folder.py filebeat >/dev/null 2>&1" | crontab -) + (echo -e "* * * * * /data/filebeat-process-zeek-folder.sh\n*/5 * * * * /data/filebeat-clean-zeeklogs-processed-folder.py" > ${SUPERCRONIC_CRONTAB}) ENV FILEBEAT_LOG_CLEANUP_MINUTES $FILEBEAT_LOG_CLEANUP_MINUTES ENV FILEBEAT_ZIP_CLEANUP_MINUTES $FILEBEAT_ZIP_CLEANUP_MINUTES @@ -70,6 +93,8 @@ ENV PATH="/data:${PATH}" VOLUME ["/usr/share/filebeat/data", "/usr/share/filebeat-nginx/data"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] diff --git a/Dockerfiles/freq.Dockerfile b/Dockerfiles/freq.Dockerfile index add924d34..24ae6bc5b 100644 --- a/Dockerfiles/freq.Dockerfile +++ b/Dockerfiles/freq.Dockerfile @@ -10,11 +10,20 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/freq' LABEL org.opencontainers.image.description='Malcolm container providing an interface to Mark Baggett''s freq_server.py' -ARG FREQ_USER=freq +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "freq" +ENV PGROUP "freq" +ENV PUSER_PRIV_DROP true + +ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm + ARG FREQ_PORT=10004 ARG FREQ_LOOKUP=true -ENV FREQ_USER $FREQ_USER ENV FREQ_PORT $FREQ_PORT ENV FREQ_LOOKUP $FREQ_LOOKUP @@ -30,27 +39,31 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list python3-dev \ python3-pip && \ pip3 install supervisor && \ - mkdir -p /var/log/supervisor && \ cd /opt && \ mkdir -p ./freq_server && \ curl -sSL "$FREQ_URL" | tar xzvf - -C ./freq_server --strip-components 1 && \ rm -rf /opt/freq_server/systemd /opt/freq_server/upstart /opt/freq_server/*.md /opt/freq_server/*.exe && \ mv -v "$(ls /opt/freq_server/*.freq | tail -n 1)" /opt/freq_server/freq_table.freq && \ - groupadd --gid 1000 $FREQ_USER && \ - useradd -M --uid 1000 --gid 1000 --home /nonexistant $FREQ_USER && \ - chown -R $FREQ_USER:$FREQ_USER /opt/freq_server && \ + groupadd --gid ${DEFAULT_GID} ${PGROUP} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} --home /nonexistant ${PUSER} && \ + chown -R ${PUSER}:${PGROUP} /opt/freq_server && \ + usermod -a -G tty ${PUSER} && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages --purge remove git python3-dev && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages autoremove && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD freq-server/supervisord.conf /etc/supervisord.conf WORKDIR /opt/freq_server EXPOSE $FREQ_PORT -CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/local/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] + # to be populated at build-time: ARG BUILD_DATE diff --git a/Dockerfiles/htadmin.Dockerfile b/Dockerfiles/htadmin.Dockerfile index f590979a9..e11e0970e 100644 --- a/Dockerfiles/htadmin.Dockerfile +++ b/Dockerfiles/htadmin.Dockerfile @@ -10,6 +10,15 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/htadmin' LABEL org.opencontainers.image.description='Malcolm container providing htadmin for managing login accounts in an htpasswd file' +ARG DEFAULT_UID=33 +ARG DEFAULT_GID=33 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "www-data" +ENV PGROUP "www-data" +# not dropping privileges globally so nginx can bind privileged ports internally. +# nginx and php-fpm will drop privileges to "www-data" user for worker processes +ENV PUSER_PRIV_DROP false ENV DEBIAN_FRONTEND noninteractive ENV TERM xterm @@ -58,15 +67,14 @@ RUN apt-get update && \ curl -s -S -L -J -O "https://maxcdn.bootstrapcdn.com/bootstrap/$BOOTSTRAP_VERSION/fonts/glyphicons-halflings-regular.ttf" && \ curl -s -S -L -J -O "https://maxcdn.bootstrapcdn.com/bootstrap/$BOOTSTRAP_VERSION/fonts/glyphicons-halflings-regular.woff" && \ curl -s -S -L -J -O "https://maxcdn.bootstrapcdn.com/bootstrap/$BOOTSTRAP_VERSION/fonts/glyphicons-halflings-regular.woff2" && \ - usermod --non-unique --uid 1000 www-data && \ - groupmod --non-unique --gid 1000 www-data && \ - chown -R www-data:www-data /var/www && \ + chown -R ${PUSER}:${PGROUP} /var/www && \ apt-get -y -q --allow-downgrades --allow-remove-essential --allow-change-held-packages --purge remove \ make libmcrypt-dev php-pear php-dev && \ apt-get autoremove -y -q && \ apt-get clean -y -q && \ rm -rf /var/lib/apt/lists/* /var/cache/* /tmp/* /var/tmp/* /var/www/html +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD docs/images/favicon/favicon.ico /var/www/htadmin/ ADD htadmin/supervisord.conf /supervisord.conf ADD htadmin/src /var/www/htadmin/ @@ -75,6 +83,8 @@ ADD htadmin/nginx/sites-available/default /etc/nginx/sites-available/default EXPOSE 80 +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + CMD ["/usr/bin/supervisord", "-c", "/supervisord.conf", "-u", "root", "-n"] diff --git a/Dockerfiles/kibana.Dockerfile b/Dockerfiles/kibana.Dockerfile index 3c16e3662..25446a8a9 100644 --- a/Dockerfiles/kibana.Dockerfile +++ b/Dockerfiles/kibana.Dockerfile @@ -10,6 +10,15 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/kibana-oss' LABEL org.opencontainers.image.description='Malcolm container providing Kibana (the Apache-licensed variant)' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "kibana" +ENV PGROUP "kibana" +ENV PUSER_PRIV_DROP true + +ENV TERM xterm ARG ELASTICSEARCH_URL="http://elasticsearch:9200" ARG CREATE_ES_MOLOCH_SESSION_INDEX="true" @@ -30,6 +39,11 @@ ENV KIBANA_OFFLINE_REGION_MAPS_PORT $KIBANA_OFFLINE_REGION_MAPS_PORT ENV PATH="/data:${PATH}" ENV ELASTICSEARCH_URL $ELASTICSEARCH_URL +ENV SUPERCRONIC_URL "https://github.com/aptible/supercronic/releases/download/v0.1.9/supercronic-linux-amd64" +ENV SUPERCRONIC "supercronic-linux-amd64" +ENV SUPERCRONIC_SHA1SUM "5ddf8ea26b56d4a7ff6faecdd8966610d5cb9d85" +ENV SUPERCRONIC_CRONTAB "/etc/crontab" + USER root ADD kibana/plugin-patches /tmp/plugin-patches @@ -66,11 +80,16 @@ RUN sed -i "s/d\.name\.split/d\.name\.toString()\.split/" /usr/share/kibana/src/ curl -sSL -o /tmp/kibana-drilldown.zip "https://codeload.github.com/mmguero-dev/kibana-plugin-drilldownmenu/zip/master" && \ yum install -y epel-release && \ yum update -y && \ - yum install -y curl cronie inotify-tools npm patch psmisc python-requests python-setuptools zip unzip && \ + yum install -y curl inotify-tools npm patch psmisc python-requests python-setuptools zip unzip && \ yum clean all && \ easy_install supervisor && \ npm install -g http-server && \ - mkdir -p /var/log/supervisor && \ + usermod -a -G tty ${PUSER} && \ + curl -fsSLO "$SUPERCRONIC_URL" && \ + echo "${SUPERCRONIC_SHA1SUM} ${SUPERCRONIC}" | sha1sum -c - && \ + chmod +x "$SUPERCRONIC" && \ + mv "$SUPERCRONIC" "/usr/local/bin/${SUPERCRONIC}" && \ + ln -s "/usr/local/bin/${SUPERCRONIC}" /usr/local/bin/supercronic && \ cd /tmp && \ echo "Installing ElastAlert plugin..." && \ unzip elastalert-kibana-plugin.zip kibana/elastalert-kibana-plugin/package.json kibana/elastalert-kibana-plugin/public/components/main/main.js && \ @@ -141,6 +160,7 @@ RUN sed -i "s/d\.name\.split/d\.name\.toString()\.split/" /usr/share/kibana/src/ rm -rf /tmp/kibana-swimlane.zip /tmp/kibana && \ rm -rf /tmp/plugin-patches /tmp/elastalert-server-routes.js /tmp/npm-* +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD kibana/dashboards /opt/kibana/dashboards ADD kibana/kibana-offline-maps.yml /opt/kibana/config/kibana-offline-maps.yml ADD kibana/kibana-standard.yml /opt/kibana/config/kibana-standard.yml @@ -148,15 +168,17 @@ ADD kibana/maps /opt/maps ADD kibana/scripts /data/ ADD kibana/supervisord.conf /etc/supervisord.conf ADD kibana/zeek_template.json /data/zeek_template.json -ADD shared/bin/cron_env_centos.sh /data/ ADD shared/bin/elastic_search_status.sh /data/ RUN chmod 755 /data/*.sh /data/*.py && \ - chown -R kibana:kibana /opt/kibana/dashboards /opt/maps /opt/kibana/config/kibana*.yml && \ + chown -R ${PUSER}:${PGROUP} /opt/kibana/dashboards /opt/maps /opt/kibana/config/kibana*.yml && \ chmod 400 /opt/maps/* && \ - (echo -e "*/2 * * * * su -c /data/kibana-create-moloch-sessions-index.sh kibana >/dev/null 2>&1\n0 10 * * * su -c /data/kibana_index_refresh.py kibana >/dev/null 2>&1\n" | crontab -) + (echo -e "*/2 * * * * /data/kibana-create-moloch-sessions-index.sh\n0 10 * * * /data/kibana_index_refresh.py" > ${SUPERCRONIC_CRONTAB}) + +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] -CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] # to be populated at build-time: ARG BUILD_DATE diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index 8066c626c..5c99f1c6a 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -1,14 +1,6 @@ FROM centos:7 AS build # Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. -LABEL maintainer="malcolm.netsec@gmail.com" -LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' -LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' -LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md' -LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm' -LABEL org.opencontainers.image.vendor='Idaho National Laboratory' -LABEL org.opencontainers.image.title='malcolmnetsec/logstash-oss' -LABEL org.opencontainers.image.description='Malcolm container providing Logstash (the Apache-licensed variant)' RUN yum install -y epel-release && \ yum update -y && \ @@ -32,6 +24,25 @@ RUN /bin/bash -lc "command curl -sSL https://rvm.io/mpapis.asc | gpg2 --import - FROM docker.elastic.co/logstash/logstash-oss:7.6.2 +LABEL maintainer="malcolm.netsec@gmail.com" +LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' +LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.documentation='https://github.com/idaholab/Malcolm/blob/master/README.md' +LABEL org.opencontainers.image.source='https://github.com/idaholab/Malcolm' +LABEL org.opencontainers.image.vendor='Idaho National Laboratory' +LABEL org.opencontainers.image.title='malcolmnetsec/logstash-oss' +LABEL org.opencontainers.image.description='Malcolm container providing Logstash (the Apache-licensed variant)' + +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "logstash" +ENV PGROUP "logstash" +ENV PUSER_PRIV_DROP true + +ENV TERM xterm + ARG LOGSTASH_ENRICHMENT_PIPELINE=enrichment ARG LOGSTASH_PARSE_PIPELINE_ADDRESSES=zeek-parse ARG LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL=internal-es @@ -61,6 +72,7 @@ RUN yum install -y epel-release && \ logstash-plugin install /opt/logstash-filter-ieee_oui/logstash-filter-ieee_oui-1.0.6.gem && \ rm -rf /opt/logstash-filter-ieee_oui /root/.cache /root/.gem /root/.bundle +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD logstash/maps/*.yaml /etc/ ADD logstash/config/log4j2.properties /usr/share/logstash/config/ ADD logstash/config/logstash.yml /usr/share/logstash/config/ @@ -69,11 +81,11 @@ ADD logstash/scripts /usr/local/bin/ ADD logstash/supervisord.conf /etc/supervisord.conf RUN bash -c "chmod --silent 755 /usr/local/bin/*.sh /usr/local/bin/*.py || true" && \ - mkdir -p /var/log/supervisor && \ + usermod -a -G tty ${PUSER} && \ rm -f /usr/share/logstash/pipeline/logstash.conf && \ rmdir /usr/share/logstash/pipeline && \ mkdir /logstash-persistent-queue && \ - bash -c "chown --silent -R logstash:root /usr/share/logstash/malcolm-pipelines /logstash-persistent-queue" && \ + chown --silent -R ${PUSER}:root /usr/share/logstash/malcolm-pipelines /logstash-persistent-queue && \ curl -sSL -o /usr/share/logstash/config/oui.txt "https://raw.githubusercontent.com/wireshark/wireshark/master/manuf" && \ ( awk -F '\t' '{gsub(":", "", $1); if (length($1) == 6) {if ($3) {print $1"\t"$3} else if ($2) {print $1"\t"$2}}}' /usr/share/logstash/config/oui.txt > /usr/share/logstash/config/oui-logstash.txt) && \ python /usr/local/bin/ja3_build_list.py -o /etc/ja3.yaml @@ -90,7 +102,10 @@ EXPOSE 5044 EXPOSE 9001 EXPOSE 9600 -CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] + # to be populated at build-time: ARG BUILD_DATE diff --git a/Dockerfiles/moloch.Dockerfile b/Dockerfiles/moloch.Dockerfile index 72851cd40..8822a857b 100644 --- a/Dockerfiles/moloch.Dockerfile +++ b/Dockerfiles/moloch.Dockerfile @@ -4,9 +4,11 @@ FROM debian:buster-slim AS build ENV DEBIAN_FRONTEND noninteractive -ENV MOLOCH_VERSION "2.3.1" +ENV MOLOCH_VERSION "2.3.2" ENV MOLOCHDIR "/data/moloch" ENV MOLOCH_URL "https://codeload.github.com/aol/moloch/tar.gz/v${MOLOCH_VERSION}" +ENV MOLOCH_LOCALELASTICSEARCH no +ENV MOLOCH_INET yes ADD moloch/scripts/bs4_remove_div.py /data/ ADD moloch/patch/* /data/patches/ @@ -78,13 +80,14 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list python3 /data/bs4_remove_div.py -i ./viewer/vueapp/src/components/users/Users.vue -o ./viewer/vueapp/src/components/users/Users.new -c "new-user-form" && \ mv -vf ./viewer/vueapp/src/components/users/Users.new ./viewer/vueapp/src/components/users/Users.vue && \ rm -rf ./viewer/vueapp/src/components/upload && \ + sed -i "s/^\(MOLOCH_LOCALELASTICSEARCH=\).*/\1"$MOLOCH_LOCALELASTICSEARCH"/" ./release/Configure && \ + sed -i "s/^\(MOLOCH_INET=\).*/\1"$MOLOCH_INET"/" ./release/Configure && \ ./easybutton-build.sh --install && \ npm cache clean --force && \ bash -c "file ${MOLOCHDIR}/bin/* ${MOLOCHDIR}/node-v*/bin/* | grep 'ELF 64-bit' | sed 's/:.*//' | xargs -l -r strip -v --strip-unneeded" FROM debian:buster-slim - LABEL maintainer="malcolm.netsec@gmail.com" LABEL org.opencontainers.image.authors='malcolm.netsec@gmail.com' LABEL org.opencontainers.image.url='https://github.com/idaholab/Malcolm' @@ -94,8 +97,16 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/moloch' LABEL org.opencontainers.image.description='Malcolm container providing Moloch' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "moloch" +ENV PGROUP "moloch" +ENV PUSER_PRIV_DROP true ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm ARG ES_HOST=elasticsearch ARG ES_PORT=9200 @@ -116,15 +127,12 @@ ARG MAXMIND_GEOIP_DB_LICENSE_KEY="" # Declare envs vars for each arg ENV ES_HOST $ES_HOST ENV ES_PORT $ES_PORT -ENV MOLOCH_LOCALELASTICSEARCH no -ENV MOLOCH_INET yes ENV MOLOCH_ELASTICSEARCH "http://"$ES_HOST":"$ES_PORT ENV MOLOCH_INTERFACE $MOLOCH_INTERFACE ENV MALCOLM_USERNAME $MALCOLM_USERNAME # this needs to be present, but is unused as nginx is going to handle auth for us ENV MOLOCH_PASSWORD "ignored" ENV MOLOCHDIR "/data/moloch" -ENV MOLOCHUSER "moloch" ENV MOLOCH_ANALYZE_PCAP_THREADS $MOLOCH_ANALYZE_PCAP_THREADS ENV WISE $WISE ENV VIEWER $VIEWER @@ -176,6 +184,7 @@ RUN sed -i "s/buster main/buster main contrib non-free/" /etc/apt/sources.list & rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # add configuration and scripts +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD moloch/scripts /data/ ADD shared/bin/pcap_moloch_and_zeek_processor.py /data/ ADD shared/bin/pcap_utils.py /data/ @@ -193,22 +202,23 @@ RUN [ ${#MAXMIND_GEOIP_DB_LICENSE_KEY} -gt 1 ] && for DB in ASN Country City; do cd /tmp && \ curl -s -S -L -o "GeoLite2-$DB.mmdb.tar.gz" "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-$DB&license_key=$MAXMIND_GEOIP_DB_LICENSE_KEY&suffix=tar.gz" && \ tar xf "GeoLite2-$DB.mmdb.tar.gz" --wildcards --no-anchored '*.mmdb' --strip=1 && \ - mkdir -p $MOLOCHDIR/etc/ && \ + mkdir -p $MOLOCHDIR/etc/ $MOLOCHDIR/logs/ && \ mv -v "GeoLite2-$DB.mmdb" $MOLOCHDIR/etc/; \ rm -f "GeoLite2-$DB*"; \ done; \ curl -s -S -L -o $MOLOCHDIR/etc/ipv4-address-space.csv "https://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.csv" && \ curl -s -S -L -o $MOLOCHDIR/etc/oui.txt "https://raw.githubusercontent.com/wireshark/wireshark/master/manuf" -RUN groupadd --gid 1000 $MOLOCHUSER && \ - useradd -M --uid 1000 --gid 1000 --home $MOLOCHDIR $MOLOCHUSER && \ +RUN groupadd --gid $DEFAULT_GID $PGROUP && \ + useradd -M --uid $DEFAULT_UID --gid $DEFAULT_GID --home $MOLOCHDIR $PUSER && \ + usermod -a -G tty $PUSER && \ chmod 755 /data/*.sh && \ ln -sfr /data/pcap_moloch_and_zeek_processor.py /data/pcap_moloch_processor.py && \ cp -f /data/moloch_update_geo.sh $MOLOCHDIR/bin/moloch_update_geo.sh && \ - sed -i "s/^\(MOLOCH_LOCALELASTICSEARCH=\).*/\1"$MOLOCH_LOCALELASTICSEARCH"/" $MOLOCHDIR/bin/Configure && \ - sed -i "s/^\(MOLOCH_INET=\).*/\1"$MOLOCH_INET"/" $MOLOCHDIR/bin/Configure && \ chmod u+s $MOLOCHDIR/bin/moloch-capture && \ - chown -R 1000:1000 $MOLOCHDIR/logs + chown -R $PUSER:$PGROUP $MOLOCHDIR/logs && \ + mkdir -p /var/run/moloch && \ + chown -R $PUSER:$PGROUP /var/run/moloch #Update Path ENV PATH="/data:$MOLOCHDIR/bin:${PATH}" @@ -216,8 +226,9 @@ ENV PATH="/data:$MOLOCHDIR/bin:${PATH}" EXPOSE 8000 8005 8081 WORKDIR $MOLOCHDIR -# ENTRYPOINT ["/data/startmoloch.sh"] -CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] # to be populated at build-time: diff --git a/Dockerfiles/name-map-ui.Dockerfile b/Dockerfiles/name-map-ui.Dockerfile index 4726cf9fd..b78d567bb 100644 --- a/Dockerfiles/name-map-ui.Dockerfile +++ b/Dockerfiles/name-map-ui.Dockerfile @@ -10,12 +10,23 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/name-map-ui' LABEL org.opencontainers.image.description='Malcolm container providing a user interface for mapping names to network hosts and subnets' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "nginxsrv" +ENV PGROUP "nginxsrv" +ENV PUSER_PRIV_DROP true +ENV PUSER_CHOWN "/var/www/html;/var/lib/nginx;/var/log/nginx" + +ENV TERM xterm + ENV JQUERY_VERSION 1.6.4 ENV LISTJS_VERSION v1.5.0 RUN apk --no-cache add bash php7 php7-fpm php7-mysqli php7-json php7-openssl php7-curl php7-fileinfo \ php7-zlib php7-xml php7-phar php7-intl php7-dom php7-xmlreader php7-ctype php7-session \ - php7-mbstring php7-gd nginx supervisor curl inotify-tools file psmisc + php7-mbstring php7-gd nginx supervisor curl inotify-tools file psmisc shadow COPY name-map-ui/config/nginx.conf /etc/nginx/nginx.conf COPY name-map-ui/config/fpm-pool.conf /etc/php7/php-fpm.d/www.conf @@ -32,29 +43,33 @@ RUN curl -sSL -o /tmp/jquery.min.js "https://code.jquery.com/jquery-${JQUERY_VER mv /tmp/jquery.min.js /tmp/list.min.js ./ && \ chmod 644 ./jquery.min.js ./list.min.js && \ ln -s . name-map-ui && \ - addgroup -g 1000 nginxsrv ; \ - adduser -D -H -u 1000 -h /var/www/html -s /sbin/nologin -G nginxsrv -g nginxsrv nginxsrv ; \ - addgroup nginxsrv nginx ; \ - addgroup nginxsrv shadow ; \ - chown -R nginxsrv.nginxsrv /var/www/html && \ - chown -R nginxsrv.nginxsrv /run && \ - chown -R nginxsrv.nginxsrv /var/lib/nginx && \ - chown -R nginxsrv.nginxsrv /var/log/nginx && \ + addgroup -g ${DEFAULT_GID} ${PGROUP} ; \ + adduser -D -H -u ${DEFAULT_UID} -h /var/www/html -s /sbin/nologin -G ${PGROUP} -g ${PUSER} ${PUSER} ; \ + addgroup ${PUSER} nginx ; \ + addgroup ${PUSER} shadow ; \ + addgroup ${PUSER} tty ; \ + addgroup nginx tty ; \ + chown -R ${PUSER}:${PGROUP} /var/www/html && \ + chown -R ${PUSER}:${PGROUP} /var/lib/nginx && \ + chown -R ${PUSER}:${PGROUP} /var/log/nginx && \ chmod 755 /usr/local/bin/*.sh VOLUME /var/www/html -USER nginxsrv - WORKDIR /var/www/html -COPY --chown=1000 name-map-ui/site/ /var/www/html/ -COPY --chown=1000 docs/images/logo/Malcolm_banner.png /var/www/html/ -COPY --chown=1000 docs/images/favicon/favicon.ico /var/www/html/ +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ +COPY name-map-ui/site/ /var/www/html/ +COPY docs/images/logo/Malcolm_banner.png /var/www/html/ +COPY docs/images/favicon/favicon.ico /var/www/html/ EXPOSE 8080 -CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] + +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] + # to be populated at build-time: ARG BUILD_DATE diff --git a/Dockerfiles/nginx.Dockerfile b/Dockerfiles/nginx.Dockerfile index 8a2c275f6..10affc80f 100644 --- a/Dockerfiles/nginx.Dockerfile +++ b/Dockerfiles/nginx.Dockerfile @@ -12,6 +12,13 @@ FROM alpine:3.11 as stunnel_build +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=300 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "builder" +ENV PGROUP "abuild" + ADD https://codeload.github.com/alpinelinux/aports/tar.gz/master /aports-master.tar.gz ADD nginx/src/*.patch /usr/src/patches/ @@ -20,11 +27,11 @@ USER root RUN set -x ; \ apk add --no-cache alpine-sdk patchutils sudo openssl-dev linux-headers; \ sed -i 's/^#\s*\(%wheel\s\+ALL=(ALL)\s\+NOPASSWD:\s\+ALL\)/\1/' /etc/sudoers ; \ - adduser -D -u 1000 -h /apkbuild -G abuild builder ; \ - addgroup builder wheel ; \ + adduser -D -u ${DEFAULT_UID} -h /apkbuild -G ${PGROUP} ${PUSER} ; \ + addgroup ${PUSER} wheel ; \ chmod 644 /aports-master.tar.gz -USER builder +USER ${PUSER} RUN set -x ; \ cd /apkbuild ; \ @@ -50,6 +57,20 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/nginx-proxy' LABEL org.opencontainers.image.description='Malcolm container providing an NGINX reverse proxy for the other services' +ARG DEFAULT_UID=101 +ARG DEFAULT_GID=101 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "nginx" +ENV PGROUP "nginx" +# not dropping privileges globally so nginx and stunnel can bind privileged ports internally. +# nginx itself will drop privileges to "nginx" user for worker processes +ENV PUSER_PRIV_DROP false + +ENV TERM xterm + +USER root + # authentication method: encrypted HTTP basic authentication ('true') vs nginx-auth-ldap ('false') ARG NGINX_BASIC_AUTH=true @@ -102,8 +123,8 @@ RUN set -x ; \ --http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp \ --http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp \ --http-scgi-temp-path=/var/cache/nginx/scgi_temp \ - --user=nginx \ - --group=nginx \ + --user=${PUSER} \ + --group=${PGROUP} \ --with-http_ssl_module \ --with-http_realip_module \ --with-http_addition_module \ @@ -135,12 +156,12 @@ RUN set -x ; \ --with-http_v2_module \ --add-module=/usr/src/nginx-auth-ldap \ " ; \ - addgroup -g 101 -S nginx ; \ - adduser -S -D -H -u 101 -h /var/cache/nginx -s /sbin/nologin -G nginx -g nginx nginx ; \ - addgroup nginx shadow ; \ + apk add --no-cache curl shadow; \ + addgroup -g ${DEFAULT_GID} -S ${PGROUP} ; \ + adduser -S -D -H -u ${DEFAULT_UID} -h /var/cache/nginx -s /sbin/nologin -G ${PGROUP} -g ${PUSER} ${PUSER} ; \ + addgroup ${PUSER} shadow ; \ mkdir -p /var/cache/nginx ; \ - chown nginx:nginx /var/cache/nginx ; \ - apk add --no-cache curl; \ + chown ${PUSER}:${PGROUP} /var/cache/nginx ; \ apk add --no-cache --virtual .nginx-build-deps \ gcc \ gd-dev \ @@ -209,7 +230,6 @@ RUN set -x ; \ apk del .nginx-build-deps ; \ apk del .gettext ; \ mv /tmp/envsubst /usr/local/bin/ ; \ - mkdir -p /var/log/supervisor ; \ rm -rf /usr/src/* /var/tmp/* /var/cache/apk/* /tmp/stunnel-*.apk /nginx.tar.gz /nginx-auth-ldap.tar.gz; \ touch /etc/nginx/nginx_ldap.conf /etc/nginx/nginx_blank.conf; @@ -217,6 +237,7 @@ COPY --from=jwilder/nginx-proxy:alpine /app/nginx.tmpl /etc/nginx/ COPY --from=jwilder/nginx-proxy:alpine /etc/nginx/network_internal.conf /etc/nginx/ COPY --from=jwilder/nginx-proxy:alpine /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/ +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD nginx/scripts /usr/local/bin/ ADD nginx/*.conf /etc/nginx/ ADD nginx/supervisord.conf /etc/ @@ -226,10 +247,11 @@ EXPOSE 80 VOLUME ["/etc/nginx/certs", "/etc/nginx/dhparam"] -ENTRYPOINT ["/usr/local/bin/docker_entrypoint.sh"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh", "/usr/local/bin/docker_entrypoint.sh"] CMD ["supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] + # to be populated at build-time: ARG BUILD_DATE ARG MALCOLM_VERSION diff --git a/Dockerfiles/pcap-capture.Dockerfile b/Dockerfiles/pcap-capture.Dockerfile index 71bbfa016..654a4c424 100644 --- a/Dockerfiles/pcap-capture.Dockerfile +++ b/Dockerfiles/pcap-capture.Dockerfile @@ -10,8 +10,22 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/pcap-capture' LABEL org.opencontainers.image.description='Malcolm container providing network traffic capture capabilities via netsniff-ng and tcpdump' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "pcap" +ENV PGROUP "pcap" +# not dropping privileges globally: supervisord will take care of it +# for all processes, but first we need root to sure capabilities for +# traffic capturing tools are in-place before they are started. +# despite doing setcap here in the Dockerfile, the chown in +# docker-uid-gid-setup.sh will cause them to be lost, so we need +# a final check in supervisor.sh before startup +ENV PUSER_PRIV_DROP false ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm ARG PCAP_ENABLE_TCPDUMP=false ARG PCAP_ENABLE_NETSNIFF=false @@ -24,7 +38,6 @@ ARG PCAP_ROTATE_MEGABYTES=500 ARG PCAP_PATH=/pcap ARG PCAP_FILTER= ARG PCAP_SNAPLEN=0 -ARG PCAP_USER=pcap ENV PCAP_ENABLE_TCPDUMP $PCAP_ENABLE_TCPDUMP ENV PCAP_ENABLE_NETSNIFF $PCAP_ENABLE_NETSNIFF @@ -36,8 +49,8 @@ ENV PCAP_ROTATE_MEGABYTES $PCAP_ROTATE_MEGABYTES ENV PCAP_PATH $PCAP_PATH ENV PCAP_FILTER $PCAP_FILTER ENV PCAP_SNAPLEN $PCAP_SNAPLEN -ENV PCAP_USER $PCAP_USER +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD pcap-capture/supervisord.conf /etc/supervisord.conf ADD pcap-capture/scripts/*.sh /usr/local/bin/ ADD pcap-capture/templates/*.template /etc/supervisor.d/ @@ -54,19 +67,23 @@ RUN apt-get update && \ tcpdump && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ - mkdir -p /var/log/supervisor /etc/supervisor.d && \ - groupadd --gid 1000 $PCAP_USER && \ - useradd -M --uid 1000 --gid 1000 $PCAP_USER && \ - chown root:$PCAP_USER /sbin/ethtool && \ + groupadd --gid ${DEFAULT_GID} ${PGROUP} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} && \ + mkdir -p /etc/supervisor.d && \ + chown -R ${PUSER}:${PGROUP} /etc/supervisor.d && \ + chmod -R 750 /etc/supervisor.d && \ + chown root:${PGROUP} /sbin/ethtool && \ setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /sbin/ethtool && \ - chown root:$PCAP_USER /usr/sbin/tcpdump && \ + chown root:${PGROUP} /usr/sbin/tcpdump && \ setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /usr/sbin/tcpdump && \ - chown root:$PCAP_USER /usr/sbin/netsniff-ng && \ + chown root:${PGROUP} /usr/sbin/netsniff-ng && \ setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip CAP_IPC_LOCK+eip CAP_SYS_ADMIN+eip' /usr/sbin/netsniff-ng && \ chmod 755 /usr/local/bin/*.sh WORKDIR "$PCAP_PATH" +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + CMD ["/usr/local/bin/supervisor.sh"] diff --git a/Dockerfiles/pcap-monitor.Dockerfile b/Dockerfiles/pcap-monitor.Dockerfile index c02a981bf..12ef9333f 100644 --- a/Dockerfiles/pcap-monitor.Dockerfile +++ b/Dockerfiles/pcap-monitor.Dockerfile @@ -10,11 +10,21 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/pcap-monitor' LABEL org.opencontainers.image.description='Malcolm container watching for captured or uploaded artifacts to be processed' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "watcher" +ENV PGROUP "watcher" +# not dropping privileges globally: supervisord will take care of it +# on a case-by-case basis so that one script (watch-pcap-uploads-folder.sh) +# can chown uploaded files +ENV PUSER_PRIV_DROP false ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm ARG ELASTICSEARCH_URL="http://elasticsearch:9200" -ARG MONITOR_USER=watcher ARG PCAP_PATH=/pcap ARG PCAP_PIPELINE_DEBUG=false ARG PCAP_PIPELINE_DEBUG_EXTRA=false @@ -22,7 +32,6 @@ ARG PCAP_PIPELINE_IGNORE_PREEXISTING=false ARG ZEEK_PATH=/zeek ENV ELASTICSEARCH_URL $ELASTICSEARCH_URL -ENV MONITOR_USER $MONITOR_USER ENV PCAP_PATH $PCAP_PATH ENV PCAP_PIPELINE_DEBUG $PCAP_PIPELINE_DEBUG ENV PCAP_PIPELINE_DEBUG_EXTRA $PCAP_PIPELINE_DEBUG_EXTRA @@ -45,10 +54,10 @@ RUN apt-get update && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ pip3 install --no-cache-dir elasticsearch elasticsearch_dsl pyzmq pyinotify python-magic && \ - mkdir -p /var/log/supervisor && \ - groupadd --gid 1000 $MONITOR_USER && \ - useradd -M --uid 1000 --gid 1000 $MONITOR_USER + groupadd --gid ${DEFAULT_GID} ${PGROUP} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} ${PUSER} +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD pcap-monitor/supervisord.conf /etc/supervisord.conf ADD pcap-monitor/scripts/ /usr/local/bin/ ADD shared/bin/pcap_watcher.py /usr/local/bin/ @@ -56,8 +65,11 @@ ADD shared/bin/pcap_utils.py /usr/local/bin/ EXPOSE 30441 +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] + # to be populated at build-time: ARG BUILD_DATE ARG MALCOLM_VERSION diff --git a/Dockerfiles/zeek.Dockerfile b/Dockerfiles/zeek.Dockerfile index 232a92955..46c5e01aa 100644 --- a/Dockerfiles/zeek.Dockerfile +++ b/Dockerfiles/zeek.Dockerfile @@ -98,7 +98,16 @@ LABEL org.opencontainers.image.vendor='Idaho National Laboratory' LABEL org.opencontainers.image.title='malcolmnetsec/zeek' LABEL org.opencontainers.image.description='Malcolm container providing Zeek' +ARG DEFAULT_UID=1000 +ARG DEFAULT_GID=1000 +ENV DEFAULT_UID $DEFAULT_UID +ENV DEFAULT_GID $DEFAULT_GID +ENV PUSER "zeek" +ENV PGROUP "zeek" +ENV PUSER_PRIV_DROP true + ENV DEBIAN_FRONTEND noninteractive +ENV TERM xterm ENV LLVM_VERSION "10" ENV ZEEK_DIR "/opt/zeek" @@ -144,6 +153,7 @@ RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* # add configuration and scripts +ADD shared/bin/docker-uid-gid-setup.sh /usr/local/bin/ ADD shared/bin/pcap_moloch_and_zeek_processor.py /usr/local/bin/ ADD shared/bin/pcap_utils.py /usr/local/bin/ ADD shared/pcaps /tmp/pcaps @@ -165,7 +175,6 @@ RUN mkdir -p /tmp/logs && \ #Whether or not to auto-tag logs based on filename ARG AUTO_TAG=true #Whether or not to run "zeek -r XXXXX.pcap local" on each pcap file -ARG ZEEKUSER=zeek ARG ZEEK_AUTO_ANALYZE_PCAP_FILES=false ARG ZEEK_AUTO_ANALYZE_PCAP_THREADS=1 ARG ZEEK_EXTRACTOR_MODE=none @@ -175,7 +184,6 @@ ARG PCAP_PIPELINE_DEBUG_EXTRA=false ARG PCAP_MONITOR_HOST=pcap-monitor ENV AUTO_TAG $AUTO_TAG -ENV ZEEKUSER $ZEEKUSER ENV ZEEK_AUTO_ANALYZE_PCAP_FILES $ZEEK_AUTO_ANALYZE_PCAP_FILES ENV ZEEK_AUTO_ANALYZE_PCAP_THREADS $ZEEK_AUTO_ANALYZE_PCAP_THREADS ENV ZEEK_EXTRACTOR_MODE $ZEEK_EXTRACTOR_MODE @@ -184,14 +192,17 @@ ENV PCAP_PIPELINE_DEBUG $PCAP_PIPELINE_DEBUG ENV PCAP_PIPELINE_DEBUG_EXTRA $PCAP_PIPELINE_DEBUG_EXTRA ENV PCAP_MONITOR_HOST $PCAP_MONITOR_HOST -RUN groupadd --gid 1000 ${ZEEKUSER} && \ - useradd -M --uid 1000 --gid 1000 --home /nonexistant ${ZEEKUSER} && \ +RUN groupadd --gid ${DEFAULT_GID} ${PUSER} && \ + useradd -M --uid ${DEFAULT_UID} --gid ${DEFAULT_GID} --home /nonexistant ${PUSER} && \ + usermod -a -G tty ${PUSER} && \ ln -sfr /usr/local/bin/pcap_moloch_and_zeek_processor.py /usr/local/bin/pcap_zeek_processor.py #Update Path ENV PATH "${ZEEK_DIR}/bin:${SPICY_DIR}/bin:${PATH}" -CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-u", "root", "-n"] +ENTRYPOINT ["/usr/local/bin/docker-uid-gid-setup.sh"] + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf", "-n"] # to be populated at build-time: diff --git a/README.md b/README.md index 9df2a20a6..dab20d96b 100644 --- a/README.md +++ b/README.md @@ -131,22 +131,22 @@ You can then observe that the images have been retrieved by running `docker imag ``` $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -malcolmnetsec/curator 2.1.0 xxxxxxxxxxxx 20 hours ago 246MB -malcolmnetsec/elastalert 2.1.0 xxxxxxxxxxxx 20 hours ago 408MB -malcolmnetsec/filebeat-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 474MB -malcolmnetsec/file-monitor 2.1.0 xxxxxxxxxxxx 20 hours ago 386MB -malcolmnetsec/file-upload 2.1.0 xxxxxxxxxxxx 20 hours ago 199MB -malcolmnetsec/freq 2.1.0 xxxxxxxxxxxx 20 hours ago 390MB -malcolmnetsec/htadmin 2.1.0 xxxxxxxxxxxx 20 hours ago 180MB -malcolmnetsec/kibana-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 1.07GB -malcolmnetsec/logstash-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 1.05GB -malcolmnetsec/moloch 2.1.0 xxxxxxxxxxxx 20 hours ago 667MB -malcolmnetsec/name-map-ui 2.1.0 xxxxxxxxxxxx 20 hours ago 134MB -malcolmnetsec/nginx-proxy 2.1.0 xxxxxxxxxxxx 20 hours ago 118MB -malcolmnetsec/pcap-capture 2.1.0 xxxxxxxxxxxx 20 hours ago 111MB -malcolmnetsec/pcap-monitor 2.1.0 xxxxxxxxxxxx 20 hours ago 156MB -malcolmnetsec/zeek 2.1.0 xxxxxxxxxxxx 20 hours ago 442MB -docker.elastic.co/elasticsearch/elasticsearch-oss 7.6.2 xxxxxxxxxxxx 20 hours ago 693MB +malcolmnetsec/curator 2.1.1 xxxxxxxxxxxx 20 hours ago 246MB +malcolmnetsec/elastalert 2.1.1 xxxxxxxxxxxx 20 hours ago 408MB +malcolmnetsec/elasticsearch-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 693MB +malcolmnetsec/filebeat-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 474MB +malcolmnetsec/file-monitor 2.1.1 xxxxxxxxxxxx 20 hours ago 386MB +malcolmnetsec/file-upload 2.1.1 xxxxxxxxxxxx 20 hours ago 199MB +malcolmnetsec/freq 2.1.1 xxxxxxxxxxxx 20 hours ago 390MB +malcolmnetsec/htadmin 2.1.1 xxxxxxxxxxxx 20 hours ago 180MB +malcolmnetsec/kibana-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 1.07GB +malcolmnetsec/logstash-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 1.05GB +malcolmnetsec/moloch 2.1.1 xxxxxxxxxxxx 20 hours ago 667MB +malcolmnetsec/name-map-ui 2.1.1 xxxxxxxxxxxx 20 hours ago 134MB +malcolmnetsec/nginx-proxy 2.1.1 xxxxxxxxxxxx 20 hours ago 118MB +malcolmnetsec/pcap-capture 2.1.1 xxxxxxxxxxxx 20 hours ago 111MB +malcolmnetsec/pcap-monitor 2.1.1 xxxxxxxxxxxx 20 hours ago 156MB +malcolmnetsec/zeek 2.1.1 xxxxxxxxxxxx 20 hours ago 442MB ``` You must run [`auth_setup`](#AuthSetup) prior to running `docker-compose pull`. You should also ensure your system configuration and `docker-compose.yml` settings are tuned by running `./scripts/install.py` or `./scripts/install.py --configure` (see [System configuration and tuning](#ConfigAndTuning)). @@ -327,6 +327,7 @@ Then, go take a walk or something since it will be a while. When you're done, yo * `malcolmnetsec/curator` (based on `debian:buster-slim`) * `malcolmnetsec/elastalert` (based on `bitsensor/elastalert`) +* `malcolmnetsec/elasticsearch-oss` (based on `docker.elastic.co/elasticsearch/elasticsearch-oss`) * `malcolmnetsec/filebeat-oss` (based on `docker.elastic.co/beats/filebeat-oss`) * `malcolmnetsec/file-monitor` (based on `debian:buster-slim`) * `malcolmnetsec/file-upload` (based on `debian:buster-slim`) @@ -341,10 +342,6 @@ Then, go take a walk or something since it will be a while. When you're done, yo * `malcolmnetsec/pcap-monitor` (based on `debian:buster-slim`) * `malcolmnetsec/pcap-zeek` (based on `debian:buster-slim`) -Additionally, the command will pull from Docker Hub: - -* `docker.elastic.co/elasticsearch/elasticsearch-oss` - ## Pre-Packaged installation files ### Creating pre-packaged installation files @@ -441,6 +438,8 @@ Edit `docker-compose.yml` and search for the `ES_JAVA_OPTS` key. Edit the `-Xms4 Various other environment variables inside of `docker-compose.yml` can be tweaked to control aspects of how Malcolm behaves, particularly with regards to processing PCAP files and Zeek logs. The environment variables of particular interest are located near the top of that file under **Commonly tweaked configuration options**, which include: +* `PUID` and `PGID` - Docker runs all of its containers as the privileged `root` user by default. For better security, Malcolm immediately drops to non-privileged user accounts for executing internal processes wherever possible. The `PUID` (**p**rocess **u**ser **ID**) and `PGID` (**p**rocess **g**roup **ID**) environment variables allow Malcolm to map internal non-privileged user accounts to a corresponding [user account](https://en.wikipedia.org/wiki/User_identifier) on the host. + * `NGINX_BASIC_AUTH` - if set to `true`, use [TLS-encrypted HTTP basic](#AuthBasicAccountManagement) authentication (default); if set to `false`, use [Lightweight Directory Access Protocol (LDAP)](#AuthLDAP) authentication * `NGINX_LOG_ACCESS_AND_ERRORS` - if set to `true`, all access to Malcolm via its [web interfaces](#UserInterfaceURLs) will be logged to Elasticsearch (default `false`) @@ -646,7 +645,7 @@ Installing and configuring Docker to run under Windows must be done manually, ra + `Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All` + `Enable-WindowsOptionalFeature -Online -FeatureName Containers –All` 1. If you have not yet done so after enabling the Windows features, reboot. -1. Install [Docker Desktop for Windows](https://hub.docker.com/editions/community/docker-ce-desktop-windows) either by downloading the installer from the official Docker site or installing it through [chocolatey](https://chocolatey.org/packages/docker-desktop/2.1.0.2). +1. Install [Docker Desktop for Windows](https://hub.docker.com/editions/community/docker-ce-desktop-windows) either by downloading the installer from the official Docker site or installing it through [chocolatey](https://chocolatey.org/packages/docker-desktop). 1. Run **Docker Desktop**, click the **Settings** option in the Docker system tray menu and make the following adjustments: + **General** * Ensure *Start Docker Desktop when you log in* is checked. @@ -1381,7 +1380,7 @@ Building the ISO may take 30 minutes or more depending on your system. As the bu ``` … -Finished, created "/malcolm-build/malcolm-iso/malcolm-2.1.0.iso" +Finished, created "/malcolm-build/malcolm-iso/malcolm-2.1.1.iso" … ``` @@ -1669,6 +1668,8 @@ user@host:~/Malcolm$ python3 scripts/install.py --configure Now that any necessary system configuration changes have been made, the local Malcolm instance will be configured: ``` +Malcolm processes will run as UID 1000 and GID 1000. Is this OK? (Y/n): + Setting 10g for Elasticsearch and 3g for Logstash. Is this OK? (Y/n): y Restart Malcolm upon system or Docker daemon restart? (y/N): y @@ -1772,22 +1773,22 @@ Pulling zeek ... done user@host:~/Malcolm$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -malcolmnetsec/curator 2.1.0 xxxxxxxxxxxx 20 hours ago 246MB -malcolmnetsec/elastalert 2.1.0 xxxxxxxxxxxx 20 hours ago 408MB -malcolmnetsec/filebeat-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 474MB -malcolmnetsec/file-monitor 2.1.0 xxxxxxxxxxxx 20 hours ago 386MB -malcolmnetsec/file-upload 2.1.0 xxxxxxxxxxxx 20 hours ago 199MB -malcolmnetsec/freq 2.1.0 xxxxxxxxxxxx 20 hours ago 390MB -malcolmnetsec/htadmin 2.1.0 xxxxxxxxxxxx 20 hours ago 180MB -malcolmnetsec/kibana-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 1.07GB -malcolmnetsec/logstash-oss 2.1.0 xxxxxxxxxxxx 20 hours ago 1.05GB -malcolmnetsec/moloch 2.1.0 xxxxxxxxxxxx 20 hours ago 667MB -malcolmnetsec/name-map-ui 2.1.0 xxxxxxxxxxxx 20 hours ago 134MB -malcolmnetsec/nginx-proxy 2.1.0 xxxxxxxxxxxx 20 hours ago 118MB -malcolmnetsec/pcap-capture 2.1.0 xxxxxxxxxxxx 20 hours ago 111MB -malcolmnetsec/pcap-monitor 2.1.0 xxxxxxxxxxxx 20 hours ago 156MB -malcolmnetsec/zeek 2.1.0 xxxxxxxxxxxx 20 hours ago 442MB -docker.elastic.co/elasticsearch/elasticsearch-oss 7.6.2 xxxxxxxxxxxx 20 hours ago 693MB +malcolmnetsec/curator 2.1.1 xxxxxxxxxxxx 20 hours ago 246MB +malcolmnetsec/elastalert 2.1.1 xxxxxxxxxxxx 20 hours ago 408MB +malcolmnetsec/elasticsearch-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 693MB +malcolmnetsec/filebeat-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 474MB +malcolmnetsec/file-monitor 2.1.1 xxxxxxxxxxxx 20 hours ago 386MB +malcolmnetsec/file-upload 2.1.1 xxxxxxxxxxxx 20 hours ago 199MB +malcolmnetsec/freq 2.1.1 xxxxxxxxxxxx 20 hours ago 390MB +malcolmnetsec/htadmin 2.1.1 xxxxxxxxxxxx 20 hours ago 180MB +malcolmnetsec/kibana-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 1.07GB +malcolmnetsec/logstash-oss 2.1.1 xxxxxxxxxxxx 20 hours ago 1.05GB +malcolmnetsec/moloch 2.1.1 xxxxxxxxxxxx 20 hours ago 667MB +malcolmnetsec/name-map-ui 2.1.1 xxxxxxxxxxxx 20 hours ago 134MB +malcolmnetsec/nginx-proxy 2.1.1 xxxxxxxxxxxx 20 hours ago 118MB +malcolmnetsec/pcap-capture 2.1.1 xxxxxxxxxxxx 20 hours ago 111MB +malcolmnetsec/pcap-monitor 2.1.1 xxxxxxxxxxxx 20 hours ago 156MB +malcolmnetsec/zeek 2.1.1 xxxxxxxxxxxx 20 hours ago 442MB ``` Finally, we can start Malcolm. When Malcolm starts it will stream informational and debug messages to the console. If you wish, you can safely close the console or use `Ctrl+C` to stop these messages; Malcolm will continue running in the background. diff --git a/curator/scripts/docker-entrypoint.sh b/curator/scripts/docker-entrypoint.sh new file mode 100755 index 000000000..98075344e --- /dev/null +++ b/curator/scripts/docker-entrypoint.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. + +/usr/local/bin/elastic_search_status.sh -w && /usr/local/bin/register-elasticsearch-snapshot-repo.sh + +/usr/local/bin/supercronic -json "${SUPERCRONIC_CRONTAB:-/etc/crontab}" \ No newline at end of file diff --git a/docker-compose-standalone.yml b/docker-compose-standalone.yml index 9b8550eab..d3bc2889a 100644 --- a/docker-compose-standalone.yml +++ b/docker-compose-standalone.yml @@ -5,6 +5,11 @@ version: '3.7' ################################################################################ # Commonly tweaked configuration options #------------------------------------------------------------------------------- +x-process-variables: &process-variables + # docker containers will run processes as unprivileged user with UID:GID + PUID : 1000 + PGID : 1000 + x-auth-variables: &auth-variables # authentication method: encrypted HTTP basic authentication ('true') vs LDAP ('false') NGINX_BASIC_AUTH : 'true' @@ -103,10 +108,13 @@ x-pcap-capture-variables: &pcap-capture-variables services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.6.2 + image: malcolmnetsec/elasticsearch-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: elasticsearch environment: + << : *process-variables logger.level : 'WARN' bootstrap.memory_lock : 'true' ES_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom' @@ -135,10 +143,13 @@ services: retries: 3 start_period: 180s kibana: - image: malcolmnetsec/kibana-oss:2.1.0 + image: malcolmnetsec/kibana-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: kibana environment: + << : *process-variables << : *kibana-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' VIRTUAL_HOST : 'kibana.malcolm.local' @@ -158,10 +169,13 @@ services: retries: 3 start_period: 210s elastalert: - image: malcolmnetsec/elastalert:2.1.0 + image: malcolmnetsec/elastalert:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: elastalert environment: + << : *process-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' ES_HOST : 'elasticsearch' ES_PORT : 9200 @@ -183,26 +197,32 @@ services: retries: 3 start_period: 210s curator: - image: malcolmnetsec/curator:2.1.0 + image: malcolmnetsec/curator:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: curator environment: + << : *process-variables << : *curator-variables ES_HOST : 'elasticsearch' ES_PORT : 9200 depends_on: - elasticsearch healthcheck: - test: ["CMD", "pidof", "cron"] + test: ["CMD", "pidof", "supercronic"] interval: 30s timeout: 5s retries: 3 start_period: 30s logstash: - image: malcolmnetsec/logstash-oss:2.1.0 + image: malcolmnetsec/logstash-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: logstash environment: + << : *process-variables << : *logstash-variables << : *common-beats-variables << : *common-lookup-variables @@ -229,10 +249,13 @@ services: retries: 3 start_period: 600s filebeat: - image: malcolmnetsec/filebeat-oss:2.1.0 + image: malcolmnetsec/filebeat-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: filebeat environment: + << : *process-variables << : *nginx-variables << : *common-upload-variables << : *common-beats-variables @@ -263,15 +286,18 @@ services: retries: 3 start_period: 60s moloch: - image: malcolmnetsec/moloch:2.1.0 + image: malcolmnetsec/moloch:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: moloch env_file: - ./auth.env environment: + << : *process-variables << : *common-upload-variables << : *moloch-variables - MOLOCH_VERSION : '2.3.1' + MOLOCH_VERSION : '2.3.2' VIRTUAL_HOST : 'moloch.malcolm.local' ES_HOST : 'elasticsearch' ES_PORT : 9200 @@ -299,10 +325,13 @@ services: retries: 3 start_period: 210s zeek: - image: malcolmnetsec/zeek:2.1.0 + image: malcolmnetsec/zeek:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: zeek environment: + << : *process-variables << : *common-upload-variables << : *zeek-variables ulimits: @@ -322,10 +351,13 @@ services: retries: 3 start_period: 60s file-monitor: - image: malcolmnetsec/file-monitor:2.1.0 + image: malcolmnetsec/file-monitor:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: filemon environment: + << : *process-variables << : *zeek-variables expose: - 3310 @@ -339,8 +371,10 @@ services: retries: 3 start_period: 60s pcap-capture: - image: malcolmnetsec/pcap-capture:2.1.0 + image: malcolmnetsec/pcap-capture:2.1.1 restart: "no" + stdin_open: false + tty: true network_mode: host ulimits: memlock: @@ -352,6 +386,7 @@ services: - NET_RAW - SYS_ADMIN environment: + << : *process-variables << : *pcap-capture-variables volumes: - ./pcap/upload:/pcap @@ -362,10 +397,13 @@ services: retries: 3 start_period: 60s pcap-monitor: - image: malcolmnetsec/pcap-monitor:2.1.0 + image: malcolmnetsec/pcap-monitor:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: pcapmon environment: + << : *process-variables << : *common-upload-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' depends_on: @@ -382,12 +420,15 @@ services: retries: 3 start_period: 90s upload: - image: malcolmnetsec/file-upload:2.1.0 + image: malcolmnetsec/file-upload:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: upload env_file: - ./auth.env environment: + << : *process-variables SITE_NAME : 'Capture File and Log Archive Upload' VIRTUAL_HOST : 'upload.malcolm.local' depends_on: @@ -405,10 +446,13 @@ services: retries: 3 start_period: 60s htadmin: - image: malcolmnetsec/htadmin:2.1.0 + image: malcolmnetsec/htadmin:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: htadmin environment: + << : *process-variables << : *auth-variables VIRTUAL_HOST : 'htadmin.malcolm.local' expose: @@ -424,10 +468,13 @@ services: retries: 3 start_period: 60s freq: - image: malcolmnetsec/freq:2.1.0 + image: malcolmnetsec/freq:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: freq environment: + << : *process-variables << : *common-lookup-variables VIRTUAL_HOST : 'freq.malcolm.local' expose: @@ -439,10 +486,13 @@ services: retries: 3 start_period: 60s name-map-ui: - image: malcolmnetsec/name-map-ui:2.1.0 + image: malcolmnetsec/name-map-ui:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: name-map-ui environment: + << : *process-variables VIRTUAL_HOST : 'name-map-ui.malcolm.local' expose: - 8080 @@ -457,10 +507,13 @@ services: retries: 3 start_period: 60s nginx-proxy: - image: malcolmnetsec/nginx-proxy:2.1.0 + image: malcolmnetsec/nginx-proxy:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: nginx-proxy environment: + << : *process-variables << : *auth-variables << : *nginx-variables depends_on: diff --git a/docker-compose.yml b/docker-compose.yml index d39b9cc41..d56d10359 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,6 +5,11 @@ version: '3.7' ################################################################################ # Commonly tweaked configuration options #------------------------------------------------------------------------------- +x-process-variables: &process-variables + # docker containers will run processes as unprivileged user with UID:GID + PUID : 1000 + PGID : 1000 + x-auth-variables: &auth-variables # authentication method: encrypted HTTP basic authentication ('true') vs LDAP ('false') NGINX_BASIC_AUTH : 'true' @@ -103,10 +108,16 @@ x-pcap-capture-variables: &pcap-capture-variables services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.6.2 + build: + context: . + dockerfile: Dockerfiles/elasticsearch.Dockerfile + image: malcolmnetsec/elasticsearch-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: elasticsearch environment: + << : *process-variables logger.level : 'INFO' bootstrap.memory_lock : 'true' ES_JAVA_OPTS : '-Xms4g -Xmx4g -Xss256k -Djava.security.egd=file:/dev/./urandom' @@ -138,10 +149,13 @@ services: build: context: . dockerfile: Dockerfiles/kibana.Dockerfile - image: malcolmnetsec/kibana-oss:2.1.0 + image: malcolmnetsec/kibana-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: kibana environment: + << : *process-variables << : *kibana-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' VIRTUAL_HOST : 'kibana.malcolm.local' @@ -164,10 +178,13 @@ services: build: context: . dockerfile: Dockerfiles/elastalert.Dockerfile - image: malcolmnetsec/elastalert:2.1.0 + image: malcolmnetsec/elastalert:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: elastalert environment: + << : *process-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' ES_HOST : 'elasticsearch' ES_PORT : 9200 @@ -192,10 +209,13 @@ services: build: context: . dockerfile: Dockerfiles/curator.Dockerfile - image: malcolmnetsec/curator:2.1.0 + image: malcolmnetsec/curator:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: curator environment: + << : *process-variables << : *curator-variables ES_HOST : 'elasticsearch' ES_PORT : 9200 @@ -204,7 +224,7 @@ services: volumes: - ./curator/config/action_file.yml:/config/action_file.yml healthcheck: - test: ["CMD", "pidof", "cron"] + test: ["CMD", "pidof", "supercronic"] interval: 30s timeout: 5s retries: 3 @@ -213,10 +233,13 @@ services: build: context: . dockerfile: Dockerfiles/logstash.Dockerfile - image: malcolmnetsec/logstash-oss:2.1.0 + image: malcolmnetsec/logstash-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: logstash environment: + << : *process-variables << : *logstash-variables << : *common-beats-variables << : *common-lookup-variables @@ -248,10 +271,13 @@ services: build: context: . dockerfile: Dockerfiles/filebeat.Dockerfile - image: malcolmnetsec/filebeat-oss:2.1.0 + image: malcolmnetsec/filebeat-oss:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: filebeat environment: + << : *process-variables << : *nginx-variables << : *common-upload-variables << : *common-beats-variables @@ -286,15 +312,18 @@ services: build: context: . dockerfile: Dockerfiles/moloch.Dockerfile - image: malcolmnetsec/moloch:2.1.0 + image: malcolmnetsec/moloch:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: moloch env_file: - ./auth.env environment: + << : *process-variables << : *common-upload-variables << : *moloch-variables - MOLOCH_VERSION : '2.3.1' + MOLOCH_VERSION : '2.3.2' VIRTUAL_HOST : 'moloch.malcolm.local' ES_HOST : 'elasticsearch' ES_PORT : 9200 @@ -328,10 +357,13 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: malcolmnetsec/zeek:2.1.0 + image: malcolmnetsec/zeek:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: zeek environment: + << : *process-variables << : *common-upload-variables << : *zeek-variables ulimits: @@ -355,10 +387,13 @@ services: build: context: . dockerfile: Dockerfiles/file-monitor.Dockerfile - image: malcolmnetsec/file-monitor:2.1.0 + image: malcolmnetsec/file-monitor:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: filemon environment: + << : *process-variables << : *zeek-variables expose: - 3310 @@ -375,8 +410,10 @@ services: build: context: . dockerfile: Dockerfiles/pcap-capture.Dockerfile - image: malcolmnetsec/pcap-capture:2.1.0 + image: malcolmnetsec/pcap-capture:2.1.1 restart: "no" + stdin_open: false + tty: true network_mode: host ulimits: memlock: @@ -388,6 +425,7 @@ services: - NET_RAW - SYS_ADMIN environment: + << : *process-variables << : *pcap-capture-variables volumes: - ./pcap/upload:/pcap @@ -401,10 +439,13 @@ services: build: context: . dockerfile: Dockerfiles/pcap-monitor.Dockerfile - image: malcolmnetsec/pcap-monitor:2.1.0 + image: malcolmnetsec/pcap-monitor:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: pcapmon environment: + << : *process-variables << : *common-upload-variables ELASTICSEARCH_URL : 'http://elasticsearch:9200' depends_on: @@ -424,12 +465,15 @@ services: build: context: . dockerfile: Dockerfiles/file-upload.Dockerfile - image: malcolmnetsec/file-upload:2.1.0 + image: malcolmnetsec/file-upload:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: upload env_file: - ./auth.env environment: + << : *process-variables SITE_NAME : 'Capture File and Log Archive Upload' VIRTUAL_HOST : 'upload.malcolm.local' depends_on: @@ -447,13 +491,16 @@ services: retries: 3 start_period: 60s htadmin: - image: malcolmnetsec/htadmin:2.1.0 + image: malcolmnetsec/htadmin:2.1.1 build: context: . dockerfile: Dockerfiles/htadmin.Dockerfile restart: "no" + stdin_open: false + tty: true hostname: htadmin environment: + << : *process-variables << : *auth-variables VIRTUAL_HOST : 'htadmin.malcolm.local' expose: @@ -469,13 +516,16 @@ services: retries: 3 start_period: 60s freq: - image: malcolmnetsec/freq:2.1.0 + image: malcolmnetsec/freq:2.1.1 build: context: . dockerfile: Dockerfiles/freq.Dockerfile restart: "no" + stdin_open: false + tty: true hostname: freq environment: + << : *process-variables << : *common-lookup-variables VIRTUAL_HOST : 'freq.malcolm.local' expose: @@ -487,13 +537,16 @@ services: retries: 3 start_period: 60s name-map-ui: - image: malcolmnetsec/name-map-ui:2.1.0 + image: malcolmnetsec/name-map-ui:2.1.1 build: context: . dockerfile: Dockerfiles/name-map-ui.Dockerfile restart: "no" + stdin_open: false + tty: true hostname: name-map-ui environment: + << : *process-variables VIRTUAL_HOST : 'name-map-ui.malcolm.local' expose: - 8080 @@ -511,10 +564,13 @@ services: build: context: . dockerfile: Dockerfiles/nginx.Dockerfile - image: malcolmnetsec/nginx-proxy:2.1.0 + image: malcolmnetsec/nginx-proxy:2.1.1 restart: "no" + stdin_open: false + tty: true hostname: nginx-proxy environment: + << : *process-variables << : *auth-variables << : *nginx-variables depends_on: diff --git a/elastalert/elastalert-start.sh b/elastalert/elastalert-start.sh index 892beafef..1021a8456 100755 --- a/elastalert/elastalert-start.sh +++ b/elastalert/elastalert-start.sh @@ -4,6 +4,8 @@ set -e +export PATH="/usr/local/bin:$PATH" + echo "Giving Elasticsearch at $ELASTICSEARCH_URL time to start..." elastic_search_status.sh -w diff --git a/file-monitor/supervisord.conf b/file-monitor/supervisord.conf index 5cef37486..2d12a0763 100644 --- a/file-monitor/supervisord.conf +++ b/file-monitor/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:watcher] command=/usr/local/bin/zeek_carve_watcher.py @@ -25,7 +25,6 @@ command=/usr/local/bin/zeek_carve_watcher.py --min-bytes %(ENV_EXTRACTED_FILE_MIN_BYTES)s --max-bytes %(ENV_EXTRACTED_FILE_MAX_BYTES)s --directory "%(ENV_ZEEK_EXTRACTOR_PATH)s" -user=monitor autostart=true startsecs=%(ENV_EXTRACTED_FILE_WATCHER_START_SLEEP)s startretries=0 @@ -48,7 +47,6 @@ command=/usr/local/bin/zeek_carve_scanner.py --malass-limit %(ENV_MALASS_MAX_REQUESTS)s --clamav %(ENV_EXTRACTED_FILE_ENABLE_CLAMAV)s --clamav-socket "%(ENV_CLAMD_SOCKET_FILE)s" -user=monitor autostart=true startsecs=%(ENV_EXTRACTED_FILE_WATCHER_START_SLEEP)s startretries=0 @@ -67,7 +65,6 @@ command=/usr/local/bin/zeek_carve_logger.py --preserve %(ENV_EXTRACTED_FILE_PRESERVATION)s --directory "%(ENV_ZEEK_EXTRACTOR_PATH)s" --zeek-log "%(ENV_ZEEK_LOG_DIRECTORY)s" -user=monitor autostart=true startsecs=%(ENV_EXTRACTED_FILE_WATCHER_START_SLEEP)s startretries=0 @@ -79,8 +76,7 @@ stdout_logfile_maxbytes=0 redirect_stderr=true [program:freshclam] -command=/usr/bin/freshclam freshclam --user monitor --config-file=/etc/clamav/freshclam.conf --daemon -user=monitor +command=/usr/bin/freshclam freshclam --config-file=/etc/clamav/freshclam.conf --daemon autostart=%(ENV_EXTRACTED_FILE_ENABLE_FRESHCLAM)s autorestart=true startsecs=0 @@ -94,7 +90,6 @@ redirect_stderr=true [program:clamd] command=/usr/sbin/clamd -c /etc/clamav/clamd.conf -user=monitor autostart=%(ENV_EXTRACTED_FILE_ENABLE_CLAMAV)s autorestart=true startsecs=0 diff --git a/file-upload/docker-entrypoint.sh b/file-upload/docker-entrypoint.sh index 96251eeda..716035bd5 100755 --- a/file-upload/docker-entrypoint.sh +++ b/file-upload/docker-entrypoint.sh @@ -18,9 +18,9 @@ then rm -f /etc/ssh/ssh_host_* dpkg-reconfigure openssh-server - useradd -g www-data -d /var/www/upload/server/php/chroot -s /sbin/nologin "$MALCOLM_USERNAME" + useradd -g $PGROUP -d /var/www/upload/server/php/chroot -s /sbin/nologin "$MALCOLM_USERNAME" usermod --password "$MALCOLM_PASSWORD" "$MALCOLM_USERNAME" - chown "$MALCOLM_USERNAME:www-data" /var/www/upload/server/php/chroot/files + chown :$PGROUP /var/www/upload/server/php/chroot/files chmod 775 /var/www/upload/server/php/chroot/files # This will break if $SITE_NAME contains a slash... diff --git a/file-upload/supervisord.conf b/file-upload/supervisord.conf index ff0d303a2..d11d54feb 100644 --- a/file-upload/supervisord.conf +++ b/file-upload/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:sshd] command=/usr/sbin/sshd -D @@ -25,7 +25,7 @@ stdout_logfile_maxbytes=0 redirect_stderr=true [program:php] -command=php-fpm7.3 -F -R -g /var/run/php-fpm.pid +command=php-fpm7.3 -F -R -g /tmp/php-fpm.pid stdout_logfile=/dev/fd/1 stdout_logfile_maxbytes=0 redirect_stderr=true diff --git a/filebeat/scripts/filebeat-watch-zeeklogs-uploads-folder.sh b/filebeat/scripts/filebeat-watch-zeeklogs-uploads-folder.sh index df3c993e4..8203f7b5b 100755 --- a/filebeat/scripts/filebeat-watch-zeeklogs-uploads-folder.sh +++ b/filebeat/scripts/filebeat-watch-zeeklogs-uploads-folder.sh @@ -13,9 +13,10 @@ do FILEMIME=$(file -b --mime-type "$NEWFILE") if ( echo "$FILEMIME" | grep --quiet -P "(application/gzip|application/x-gzip|application/x-7z-compressed|application/x-bzip2|application/x-cpio|application/x-lzip|application/x-lzma|application/x-rar-compressed|application/x-tar|application/x-xz|application/zip)" ); then # looks like this is a compressed file, we're assuming it's a zeek log archive to be processed by filebeat - sleep 0.1 && chown 1000:1000 "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$PROCESS_DIR/") + sleep 0.1 && chown ${PUID:-${DEFAULT_UID}}:${PGID:-${DEFAULT_GID}} "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$PROCESS_DIR/") else # unhandled file type uploaded, delete it - sleep 0.1 && chown 1000:1000 "$NEWFILE" && (>&2 rm "$NEWFILE") && echo "Removed \"$NEWFILE\", unhandled file type \"$FILEMIME\"" + sleep 0.1 && chown ${PUID:-${DEFAULT_UID}}:${PGID:-${DEFAULT_GID}} && (>&2 rm "$NEWFILE") && echo "Removed \"$NEWFILE\", unhandled file type \"$FILEMIME\"" fi done + diff --git a/filebeat/supervisord.conf b/filebeat/supervisord.conf index 2650ded0e..a275edd42 100644 --- a/filebeat/supervisord.conf +++ b/filebeat/supervisord.conf @@ -1,24 +1,24 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:filebeat] command=/usr/local/bin/docker-entrypoint -e --strict.perms=false -user=filebeat +user=%(ENV_PUSER)s startsecs=0 startretries=0 stopasgroup=true @@ -35,7 +35,7 @@ command=bash -c "/data/elastic_search_status.sh && /usr/local/bin/docker-entrypo --path.data /usr/share/filebeat-nginx/data \ -c /usr/share/filebeat-nginx/filebeat-nginx.yml \ --modules nginx" -user=filebeat +user=%(ENV_PUSER)s autostart=%(ENV_NGINX_LOG_ACCESS_AND_ERRORS)s startsecs=30 startretries=2000000000 @@ -48,6 +48,7 @@ redirect_stderr=true [program:watch-upload] command=/bin/bash -c "sleep 30 && /data/filebeat-watch-zeeklogs-uploads-folder.sh" +user=root startsecs=35 startretries=1 stopasgroup=true @@ -58,8 +59,9 @@ stdout_logfile_maxbytes=0 redirect_stderr=true [program:cron] -autorestart=false -command=/data/cron_env_centos.sh +autorestart=true +command=/usr/local/bin/supercronic -json "%(ENV_SUPERCRONIC_CRONTAB)s" +user=%(ENV_PUSER)s stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 diff --git a/freq-server/supervisord.conf b/freq-server/supervisord.conf index af652db7b..86b621107 100644 --- a/freq-server/supervisord.conf +++ b/freq-server/supervisord.conf @@ -1,24 +1,23 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:freq] command=/usr/bin/python3 /opt/freq_server/freq_server.py -ip 0.0.0.0 %(ENV_FREQ_PORT)s /opt/freq_server/freq_table.freq -user=%(ENV_FREQ_USER)s autostart=%(ENV_FREQ_LOOKUP)s startsecs=5 startretries=2000000000 diff --git a/htadmin/supervisord.conf b/htadmin/supervisord.conf index c81749341..4bca840b4 100644 --- a/htadmin/supervisord.conf +++ b/htadmin/supervisord.conf @@ -1,23 +1,23 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:php] -command=php-fpm7.3 -F -R -g /var/run/php-fpm.pid +command=php-fpm7.3 -F -R -g /tmp/php-fpm.pid stdout_logfile=/dev/fd/1 stdout_logfile_maxbytes=0 redirect_stderr=true diff --git a/kibana/scripts/kibana-create-moloch-sessions-index.sh b/kibana/scripts/kibana-create-moloch-sessions-index.sh index f6ce95c1e..24e3b4d93 100755 --- a/kibana/scripts/kibana-create-moloch-sessions-index.sh +++ b/kibana/scripts/kibana-create-moloch-sessions-index.sh @@ -22,48 +22,48 @@ INDEX_TIME_FIELD=${MOLOCH_INDEX_TIME_FIELD:-"firstPacket"} # is the argument to automatically create this index enabled? if [[ "$CREATE_ES_MOLOCH_SESSION_INDEX" = "true" ]] ; then - echo "Giving Elasticsearch time to start before configuring Kibana..." - /data/elastic_search_status.sh 2>&1 && echo "Elasticsearch is running!" + # give Elasticsearch time to start before configuring Kibana + /data/elastic_search_status.sh >/dev/null 2>&1 # is the kibana process server up and responding to requests? - if curl -f -XGET "$KIBANA_URL/api/saved_objects/index-pattern/" ; then + if curl --silent --output /dev/null --fail -XGET "$KIBANA_URL/api/saved_objects/index-pattern/" ; then # have we not not already created the index pattern? - if ! curl -f -XGET "$KIBANA_URL/api/saved_objects/index-pattern/$INDEX_PATTERN_ID" ; then + if ! curl --silent --output /dev/null --fail -XGET "$KIBANA_URL/api/saved_objects/index-pattern/$INDEX_PATTERN_ID" ; then - echo "Importing Kibana saved objects..." + echo "Elasticsearch is running! Importing Kibana saved objects..." # load zeek_template containing zeek field type mappings - curl -XPOST -H "Content-Type: application/json" "$ES_URL/_template/zeek_template?include_type_name=true" -d "@/data/zeek_template.json" + curl --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" "$ES_URL/_template/zeek_template?include_type_name=true" -d "@/data/zeek_template.json" # From https://github.com/elastic/kibana/issues/3709 # Create index pattern - curl -f -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + curl --silent --output /dev/null --show-error --fail -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ "$KIBANA_URL/api/saved_objects/index-pattern/$INDEX_PATTERN_ID" \ -d"{\"attributes\":{\"title\":\"$INDEX_PATTERN\",\"timeFieldName\":\"$INDEX_TIME_FIELD\"}}" # Make it the default index - curl -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ + curl --silent --output /dev/null --show-error -XPOST -H "Content-Type: application/json" -H "kbn-xsrf: anything" \ "$KIBANA_URL/api/kibana/settings/defaultIndex" \ -d"{\"value\":\"$INDEX_PATTERN_ID\"}" # install default dashboards, index patterns, etc. for i in /opt/kibana/dashboards/*.json; do - curl -XPOST "$KIBANA_URL/api/kibana/dashboards/import?force=true" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d "@$i" + curl --silent --output /dev/null --show-error -XPOST "$KIBANA_URL/api/kibana/dashboards/import?force=true" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d "@$i" done # set dark theme - curl -XPOST "$KIBANA_URL/api/kibana/settings/theme:darkMode" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"value":true}' + curl --silent --output /dev/null --show-error -XPOST "$KIBANA_URL/api/kibana/settings/theme:darkMode" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"value":true}' # set default query time range - curl -XPOST "$KIBANA_URL/api/kibana/settings" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d \ + curl --silent --output /dev/null --show-error -XPOST "$KIBANA_URL/api/kibana/settings" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d \ '{"changes":{"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"}"}}' # turn off telemetry - curl -XPOST "$KIBANA_URL/api/telemetry/v2/optIn" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"enabled":false}' + curl --silent --output /dev/null --show-error -XPOST "$KIBANA_URL/api/telemetry/v2/optIn" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"enabled":false}' # pin filters by default - curl -XPOST "$KIBANA_URL/api/kibana/settings/filters:pinnedByDefault" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"value":true}' + curl --silent --output /dev/null --show-error -XPOST "$KIBANA_URL/api/kibana/settings/filters:pinnedByDefault" -H 'kbn-xsrf:true' -H 'Content-type:application/json' -d '{"value":true}' fi fi fi diff --git a/kibana/supervisord.conf b/kibana/supervisord.conf index 940b05ef7..05710bd62 100644 --- a/kibana/supervisord.conf +++ b/kibana/supervisord.conf @@ -1,24 +1,23 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:kibana] command=/data/kibana.sh -user=kibana autostart=true startsecs=0 startretries=0 @@ -31,7 +30,6 @@ redirect_stderr=true [program:idxinit] command=bash -c "sleep 180 && /data/elastic_search_status.sh -w && /data/kibana_index_refresh.py -v" -user=kibana autostart=true autorestart=false startsecs=0 @@ -45,7 +43,6 @@ redirect_stderr=true [program:maps] command=/usr/bin/http-server /opt/maps --cors='*' -d false -i false --no-dotfiles -p %(ENV_KIBANA_OFFLINE_REGION_MAPS_PORT)s -user=kibana autostart=%(ENV_KIBANA_OFFLINE_REGION_MAPS)s startsecs=0 startretries=0 @@ -57,8 +54,8 @@ stdout_logfile_maxbytes=0 redirect_stderr=true [program:cron] -autorestart=false -command=/data/cron_env_centos.sh +autorestart=true +command=/usr/local/bin/supercronic -json "%(ENV_SUPERCRONIC_CRONTAB)s" stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 diff --git a/logstash/supervisord.conf b/logstash/supervisord.conf index 7e23e2ee3..c90f652ad 100644 --- a/logstash/supervisord.conf +++ b/logstash/supervisord.conf @@ -5,9 +5,9 @@ port=0.0.0.0:9001 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface @@ -17,7 +17,6 @@ serverurl=http://127.0.0.1:9001 [program:logstash] command=/usr/local/bin/logstash-start.sh -user=logstash autostart=true startsecs=0 startretries=0 diff --git a/moloch/patch/query_db_fields_pr1463.patch b/moloch/patch/query_db_fields_pr1463.patch deleted file mode 100644 index df2a6bf72..000000000 --- a/moloch/patch/query_db_fields_pr1463.patch +++ /dev/null @@ -1,264 +0,0 @@ -diff --git a/tests/general.t b/tests/general.t -index c9206ead..d7a991ab 100644 ---- a/tests/general.t -+++ b/tests/general.t -@@ -1,4 +1,4 @@ --use Test::More tests => 649; -+use Test::More tests => 659; - use Cwd; - use URI::Escape; - use MolochTest; -@@ -421,3 +421,11 @@ if (0) { - - # communityId - countTest(1, "date=-1&expression=" . uri_escape("(file=$pwd/socks-http-pass.pcap||file=$pwd/gre-sample.pcap)&&communityId=\"1:eMRxQSkNuVRbgi0elxmjkFvRujg=\"")); -+ -+# query DB field names by using db: prefix (#1461) -+ errTest("date=-1&expression=" . uri_escape("db:noSuchField=10.0.0.2")); -+ errTest("date=-1&expression=" . uri_escape("srcIp=10.0.0.2")); -+ countTest(1, "date=-1&expression=" . uri_escape("file=$pwd/bt-udp.pcap&&db:srcIp=10.0.0.2")); -+ countTest(3, "date=-1&expression=" . uri_escape("file=$pwd/bt-udp.pcap&&db:srcIp>=10.0.0.2")); -+ countTest(1, "date=-1&expression=" . uri_escape("(file=$pwd/dns-flags0110.pcap||file=$pwd/dns-dnskey.pcap)&&db:dstOui=Juniper*")); -+ countTest(24, "date=-1&expression=" . uri_escape("file=$pwd/wireshark-esp.pcap&&db:protocol=esp")); -diff --git a/viewer/molochparser.jison b/viewer/molochparser.jison -index 98038d81..56fa4bfa 100644 ---- a/viewer/molochparser.jison -+++ b/viewer/molochparser.jison -@@ -86,6 +86,26 @@ e - var util = require('util'); - var moment = require('moment'); - -+/* Given a field name, if prefixed with 'db:' return dbFieldsMap entry (i.e., looked up according to -+ * the Elasticsearch field name); otherwise return fieldsMap entry (see #1461) -+ */ -+function getFieldInfo(yy, field) -+{ -+ var info = null; -+ -+ if (field.startsWith('db:')) { -+ var dbField = field.substring(3); -+ if (yy.dbFieldsMap[dbField]) { -+ info = yy.dbFieldsMap[dbField]; -+ } -+ } else if (yy.fieldsMap[field]) { -+ info = yy.fieldsMap[field]; -+ } -+ -+ // console.log('getFieldInfo', field, info); -+ return info; -+} -+ - /* Build a list of all the field infos for ip field types. - * Can specify if a port field needs to be available for the type or not - */ -@@ -132,7 +152,7 @@ function getIpInfoList(yy, needPort) - * Arrays of all of the above - */ - function parseIpPort(yy, field, ipPortStr) { -- var dbField = yy.fieldsMap[field].dbField; -+ var dbField = getFieldInfo(yy, field).dbField; - - // Have just a single Ip, create obj for it - function singleIp(exp, dbField, ip, port) { -@@ -155,9 +175,10 @@ function parseIpPort(yy, field, ipPortStr) { - } - - if (port !== -1) { -- if (yy.fieldsMap[exp].portField) { -+ var expInfo = getFieldInfo(yy, exp); -+ if (expInfo.portField) { - obj = {bool: {must: [obj, {term: {}}]}}; -- obj.bool.must[1].term[yy.fieldsMap[exp].portField] = port; -+ obj.bool.must[1].term[expInfo.portField] = port; - } else { - throw exp + " doesn't support port"; - } -@@ -280,11 +301,10 @@ function stripQuotes (str) { - - function formatExists(yy, field, op) - { -- if (!yy.fieldsMap[field]) -+ var info = getFieldInfo(yy, field); -+ if (!info) - throw "Unknown field " + field; - -- var info = yy.fieldsMap[field]; -- - if (info.requiredRight && yy[info.requiredRight] !== true) { - throw field + " - permission denied"; - } -@@ -325,11 +345,10 @@ function formatQuery(yy, field, op, value) - checkRegex(value); - } - -- if (!yy.fieldsMap[field]) -+ var info = getFieldInfo(yy, field); -+ if (!info) - throw "Unknown field " + field; - -- var info = yy.fieldsMap[field]; -- - if (info.requiredRight && yy[info.requiredRight] !== true) { - throw field + " - permission denied"; - } -@@ -573,7 +592,7 @@ function checkRegex(str) { - } - - function field2Raw(yy, field) { -- var info = yy.fieldsMap[field]; -+ var info = getFieldInfo(yy, field); - var dbField = info.dbField; - if (info.rawField) - return info.rawField; -@@ -586,10 +605,9 @@ function field2Raw(yy, field) { - - function stringQuery(yy, field, str) { - -- var info = yy.fieldsMap[field]; -+ var info = getFieldInfo(yy, field); - var dbField = info.dbField; - -- - if (str[0] === "/" && str[str.length -1] === "/") { - checkRegex(str); - -diff --git a/viewer/molochparser.js b/viewer/molochparser.js -index 5b8e4912..8793b163 100644 ---- a/viewer/molochparser.js -+++ b/viewer/molochparser.js -@@ -84,7 +84,7 @@ performAction: function anonymous(yytext, yyleng, yylineno, yy, yystate /* actio - var $0 = $$.length - 1; - switch (yystate) { - case 1: -- return $$[$0-1]; -+ return $$[$0-1]; - break; - case 2: - this.$ = 'lt' -@@ -128,7 +128,7 @@ break; - case 19: - this.$ = formatQuery(yy, $$[$0-2], $$[$0-1], $$[$0]); - //console.log(util.inspect(this.$, false, 50)); -- -+ - break; - } - }, -@@ -285,6 +285,26 @@ parse: function parse(input) { - var util = require('util'); - var moment = require('moment'); - -+/* Given a field name, if prefixed with 'db:' return dbFieldsMap entry (i.e., looked up according to -+ * the Elasticsearch field name); otherwise return fieldsMap entry (see #1461) -+ */ -+function getFieldInfo(yy, field) -+{ -+ var info = null; -+ -+ if (field.startsWith('db:')) { -+ var dbField = field.substring(3); -+ if (yy.dbFieldsMap[dbField]) { -+ info = yy.dbFieldsMap[dbField]; -+ } -+ } else if (yy.fieldsMap[field]) { -+ info = yy.fieldsMap[field]; -+ } -+ -+ // console.log('getFieldInfo', field, info); -+ return info; -+} -+ - /* Build a list of all the field infos for ip field types. - * Can specify if a port field needs to be available for the type or not - */ -@@ -331,7 +351,7 @@ function getIpInfoList(yy, needPort) - * Arrays of all of the above - */ - function parseIpPort(yy, field, ipPortStr) { -- var dbField = yy.fieldsMap[field].dbField; -+ var dbField = getFieldInfo(yy, field).dbField; - - // Have just a single Ip, create obj for it - function singleIp(exp, dbField, ip, port) { -@@ -354,9 +374,10 @@ function parseIpPort(yy, field, ipPortStr) { - } - - if (port !== -1) { -- if (yy.fieldsMap[exp].portField) { -+ var expInfo = getFieldInfo(yy, exp); -+ if (expInfo.portField) { - obj = {bool: {must: [obj, {term: {}}]}}; -- obj.bool.must[1].term[yy.fieldsMap[exp].portField] = port; -+ obj.bool.must[1].term[expInfo.portField] = port; - } else { - throw exp + " doesn't support port"; - } -@@ -479,11 +500,10 @@ function stripQuotes (str) { - - function formatExists(yy, field, op) - { -- if (!yy.fieldsMap[field]) -+ var info = getFieldInfo(yy, field); -+ if (!info) - throw "Unknown field " + field; - -- var info = yy.fieldsMap[field]; -- - if (info.requiredRight && yy[info.requiredRight] !== true) { - throw field + " - permission denied"; - } -@@ -524,11 +544,10 @@ function formatQuery(yy, field, op, value) - checkRegex(value); - } - -- if (!yy.fieldsMap[field]) -+ var info = getFieldInfo(yy, field); -+ if (!info) - throw "Unknown field " + field; - -- var info = yy.fieldsMap[field]; -- - if (info.requiredRight && yy[info.requiredRight] !== true) { - throw field + " - permission denied"; - } -@@ -772,7 +791,7 @@ function checkRegex(str) { - } - - function field2Raw(yy, field) { -- var info = yy.fieldsMap[field]; -+ var info = getFieldInfo(yy, field); - var dbField = info.dbField; - if (info.rawField) - return info.rawField; -@@ -785,10 +804,9 @@ function field2Raw(yy, field) { - - function stringQuery(yy, field, str) { - -- var info = yy.fieldsMap[field]; -+ var info = getFieldInfo(yy, field); - var dbField = info.dbField; - -- - if (str[0] === "/" && str[str.length -1] === "/") { - checkRegex(str); - -diff --git a/viewer/viewer.js b/viewer/viewer.js -index ce2ae3bd..c9a60d82 100644 ---- a/viewer/viewer.js -+++ b/viewer/viewer.js -@@ -3026,6 +3026,7 @@ function buildSessionQuery (req, buildCb, queryOverride = null) { - molochparser.parser.yy = { - views: req.user.views, - fieldsMap: Config.getFieldsMap(), -+ dbFieldsMap: Config.getDBFieldsMap(), - prefix: internals.prefix, - emailSearch: req.user.emailSearch === true, - lookups: req.lookups, -@@ -9212,6 +9213,7 @@ function processCronQueries () { - molochparser.parser.yy = { - emailSearch: user.emailSearch === true, - fieldsMap: Config.getFieldsMap(), -+ dbFieldsMap: Config.getDBFieldsMap(), - prefix: internals.prefix, - lookups: lookups, - lookupTypeMap: internals.lookupTypeMap diff --git a/moloch/scripts/initmoloch.sh b/moloch/scripts/initmoloch.sh index 324f151f5..323d6f532 100755 --- a/moloch/scripts/initmoloch.sh +++ b/moloch/scripts/initmoloch.sh @@ -2,14 +2,14 @@ # Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. -rm -f $MOLOCHDIR/initialized $MOLOCHDIR/runwise +rm -f /var/run/moloch/initialized /var/run/moloch/runwise echo "Giving Elasticsearch time to start..." /data/elastic_search_status.sh 2>&1 && echo "Elasticsearch is running!" #Configure Moloch to Run -if [ ! -f $MOLOCHDIR/configured ]; then - touch $MOLOCHDIR/configured +if [ ! -f /var/run/moloch/configured ]; then + touch /var/run/moloch/configured if [[ "$WISE" = "on" ]] ; then $MOLOCHDIR/bin/Configure --wise fi @@ -17,7 +17,7 @@ if [ ! -f $MOLOCHDIR/configured ]; then fi if [[ "$WISE" = "on" ]] ; then - touch $MOLOCHDIR/runwise + touch /var/run/moloch/runwise echo "Giving WISE time to start..." sleep 5 until curl -sS --output /dev/null "http://127.0.0.1:8081/fields?ver=1" @@ -72,6 +72,6 @@ if [[ -n $ES_MAX_SHARDS_PER_NODE ]]; then curl -sS -H'Content-Type: application/json' -XPUT http://$ES_HOST:$ES_PORT/_cluster/settings -d "{ \"persistent\": { \"cluster.max_shards_per_node\": \"$ES_MAX_SHARDS_PER_NODE\" } }" fi -touch $MOLOCHDIR/initialized +touch /var/run/moloch/initialized # the (viewer|wise)_service.sh scripts will start/restart those processes diff --git a/moloch/scripts/viewer_service.sh b/moloch/scripts/viewer_service.sh index eea39a825..482f44e76 100755 --- a/moloch/scripts/viewer_service.sh +++ b/moloch/scripts/viewer_service.sh @@ -4,7 +4,7 @@ while true; do - if [[ -e $MOLOCHDIR/configured && -f $MOLOCHDIR/initialized && "$VIEWER" == "on" ]]; then + if [[ -e /var/run/moloch/configured && -f /var/run/moloch/initialized && "$VIEWER" == "on" ]]; then echo "Launch viewer..." cd $MOLOCHDIR/viewer $MOLOCHDIR/bin/node viewer.js -c $MOLOCHDIR/etc/config.ini | tee -a $MOLOCHDIR/logs/viewer.log 2>&1 diff --git a/moloch/scripts/wise_service.sh b/moloch/scripts/wise_service.sh index ef4380dd4..a333f2dae 100755 --- a/moloch/scripts/wise_service.sh +++ b/moloch/scripts/wise_service.sh @@ -4,7 +4,7 @@ while true; do - if [[ ("$WISE" == "on") && (-f $MOLOCHDIR/runwise) && (-f $MOLOCHDIR/etc/wise.ini) ]]; then + if [[ ("$WISE" == "on") && (-f /var/run/moloch/runwise) && (-f $MOLOCHDIR/etc/wise.ini) ]]; then echo "Launch wise..." pushd $MOLOCHDIR/wiseService >/dev/null 2>&1 $MOLOCHDIR/bin/node wiseService.js -c $MOLOCHDIR/etc/wise.ini diff --git a/moloch/supervisord.conf b/moloch/supervisord.conf index adccd3b80..880d6a9de 100644 --- a/moloch/supervisord.conf +++ b/moloch/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:initialize] command=/data/initmoloch.sh @@ -30,7 +30,6 @@ redirect_stderr=true [program:wise] command=/data/wise_service.sh -user=%(ENV_MOLOCHUSER)s startsecs=0 startretries=0 stopasgroup=true @@ -41,7 +40,6 @@ redirect_stderr=true [program:viewer] command=/data/viewer_service.sh -user=%(ENV_MOLOCHUSER)s startsecs=0 startretries=0 stopasgroup=true @@ -62,7 +60,6 @@ command=python3 /data/pcap_moloch_processor.py --moloch /data/moloch/bin/moloch-capture --autotag "%(ENV_AUTO_TAG)s" --managed "%(ENV_MANAGE_PCAP_FILES)s" -user=%(ENV_MOLOCHUSER)s startsecs=15 startretries=1 stopasgroup=true @@ -74,7 +71,6 @@ redirect_stderr=true [program:readme] command=python3 -m http.server 8000 -user=%(ENV_MOLOCHUSER)s directory=%(ENV_MOLOCHDIR)s/doc stopasgroup=true killasgroup=true diff --git a/name-map-ui/config/nginx.conf b/name-map-ui/config/nginx.conf index 97b316e81..b93d077a4 100644 --- a/name-map-ui/config/nginx.conf +++ b/name-map-ui/config/nginx.conf @@ -1,6 +1,6 @@ worker_processes 1; error_log stderr warn; -pid /run/nginx.pid; +pid /tmp/nginx.pid; events { worker_connections 1024; @@ -16,8 +16,8 @@ http { '"$http_user_agent" "$http_x_forwarded_for" ' '$request_time $upstream_response_time $pipe $upstream_cache_status'; - access_log /dev/stdout main_timed; - error_log /dev/stderr notice; + access_log /var/log/nginx/access.log main_timed; + error_log /var/log/nginx/error.log notice; keepalive_timeout 65; diff --git a/name-map-ui/config/supervisor_logstash_ctl.conf b/name-map-ui/config/supervisor_logstash_ctl.conf index de572adcf..56ba9af18 100644 --- a/name-map-ui/config/supervisor_logstash_ctl.conf +++ b/name-map-ui/config/supervisor_logstash_ctl.conf @@ -4,7 +4,7 @@ nodaemon=true logfile=/dev/null logfile_maxbytes=0 -pidfile=/run/supervisord.pid +pidfile=/tmp/supervisord-logstash.pid [supervisorctl] serverurl=http://logstash:9001 diff --git a/name-map-ui/config/supervisord.conf b/name-map-ui/config/supervisord.conf index c01f68e10..3096496b0 100644 --- a/name-map-ui/config/supervisord.conf +++ b/name-map-ui/config/supervisord.conf @@ -1,23 +1,23 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/tmp/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor-main.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true logfile=/dev/null logfile_maxbytes=0 -pidfile=/run/supervisord.pid +pidfile=/tmp/supervisor-main.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///tmp/supervisor.sock +serverurl=unix:///tmp/supervisor-main.sock [program:php-fpm] -command=php-fpm7 -F +command=/usr/sbin/php-fpm7 -F stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 @@ -27,7 +27,7 @@ autorestart=false startretries=0 [program:nginx] -command=nginx -g 'daemon off;' +command=/usr/sbin/nginx -g 'daemon off;' stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 @@ -36,6 +36,30 @@ redirect_stderr=true autorestart=false startretries=0 +[program:logaccess] +command=/usr/bin/tail -F /var/log/nginx/access.log +startsecs=10 +startretries=2000000000 +stopasgroup=true +killasgroup=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/null +stdout_logfile_maxbytes=0 +redirect_stderr=false + +[program:logerrors] +command=/usr/bin/tail -F /var/log/nginx/error.log +startsecs=10 +startretries=2000000000 +stopasgroup=true +killasgroup=true +stdout_logfile=/dev/fd/1 +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/null +stdout_logfile_maxbytes=0 +redirect_stderr=false + [program:watch-upload] command=/bin/bash -c "sleep 15 && /usr/local/bin/name-map-save-watch.sh" startsecs=20 diff --git a/name-map-ui/scripts/name-map-save-watch.sh b/name-map-ui/scripts/name-map-save-watch.sh index 666bf8437..5ca9b7cae 100755 --- a/name-map-ui/scripts/name-map-save-watch.sh +++ b/name-map-ui/scripts/name-map-save-watch.sh @@ -16,7 +16,7 @@ do (>/dev/null tee "$DEST_MAP" < "$NEWFILE") && echo "\"$NEWFILE\" -> \"$DEST_MAP\"" rm -f "$NEWFILE" else - # invalud or unhandled file type uploaded, delete it + # invalid or unhandled file type uploaded, delete it (>&2 rm -f "$NEWFILE") && echo "Removed \"$NEWFILE\" (\"$FILEMIME\"): invalid file type or format" fi done diff --git a/nginx/supervisord.conf b/nginx/supervisord.conf index 0382d9163..82275f5b7 100644 --- a/nginx/supervisord.conf +++ b/nginx/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:nginx] command=/usr/sbin/nginx @@ -28,6 +28,7 @@ redirect_stderr=true [program:logaccess] command=/usr/bin/tail -F /var/log/nginx/access.log +user=%(ENV_PUSER)s startsecs=10 startretries=2000000000 stopasgroup=true @@ -40,6 +41,7 @@ redirect_stderr=false [program:logerrors] command=/usr/bin/tail -F /var/log/nginx/error.log +user=%(ENV_PUSER)s startsecs=10 startretries=2000000000 stopasgroup=true diff --git a/pcap-capture/scripts/supervisor.sh b/pcap-capture/scripts/supervisor.sh index c17490f1e..ad1c84676 100755 --- a/pcap-capture/scripts/supervisor.sh +++ b/pcap-capture/scripts/supervisor.sh @@ -58,10 +58,17 @@ function CreateCaptureConfigs() { fi # config dir exists } +function SetCaptureCapabilities() { + setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /sbin/ethtool || true + setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /usr/sbin/tcpdump || true + setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip CAP_IPC_LOCK+eip CAP_SYS_ADMIN+eip' /usr/sbin/netsniff-ng || true +} + CreateCaptureConfigs +SetCaptureCapabilities if [[ -z $PCAP_ROTATE_SECONDS ]] && [[ -n $PCAP_ROTATE_MINUTES ]]; then export PCAP_ROTATE_SECONDS=$(echo "$PCAP_ROTATE_MINUTES * 60" | bc) fi -supervisord -c "$CONFIG_FILE" +supervisord -c "$CONFIG_FILE" -n diff --git a/pcap-capture/supervisord.conf b/pcap-capture/supervisord.conf index e779e8b99..5eec3d81f 100644 --- a/pcap-capture/supervisord.conf +++ b/pcap-capture/supervisord.conf @@ -1,24 +1,26 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +user=root +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:netsniff-roll] startsecs=15 command=/usr/local/bin/netsniff-roll.sh +user=%(ENV_PUSER)s stopasgroup=true killasgroup=true stdout_logfile=/dev/fd/1 @@ -26,7 +28,6 @@ stdout_logfile_maxbytes=0 redirect_stderr=true autostart=%(ENV_PCAP_ENABLE_NETSNIFF)s directory=%(ENV_PCAP_PATH)s -user=%(ENV_PCAP_USER)s [include] files = /etc/supervisor.d/*.conf diff --git a/pcap-capture/templates/netsniff.template b/pcap-capture/templates/netsniff.template index 8dd32f17b..b69c083e3 100644 --- a/pcap-capture/templates/netsniff.template +++ b/pcap-capture/templates/netsniff.template @@ -1,9 +1,9 @@ [program:netsniff-$IFACE] command=/usr/sbin/netsniff-ng -i "$IFACE" -T "%(ENV_PCAP_NETSNIFF_MAGIC)s" -o "%(ENV_PCAP_PATH)s" -P "netsniff-$IFACE_" -F "%(ENV_PCAP_ROTATE_MEGABYTES)sMiB" --silent "%(ENV_PCAP_FILTER)s" +user=%(ENV_PUSER)s startsecs=5 startretries=3 stopasgroup=true killasgroup=true autostart=%(ENV_PCAP_ENABLE_NETSNIFF)s directory=%(ENV_PCAP_PATH)s -user=%(ENV_PCAP_USER)s diff --git a/pcap-capture/templates/tcpdump.template b/pcap-capture/templates/tcpdump.template index aa9f7c3d3..7061fdf42 100644 --- a/pcap-capture/templates/tcpdump.template +++ b/pcap-capture/templates/tcpdump.template @@ -1,9 +1,9 @@ [program:tcpdump-$IFACE] command=/usr/sbin/tcpdump -i "$IFACE" -s %(ENV_PCAP_SNAPLEN)s -w "tcpdump-$IFACE_%(ENV_PCAP_TCPDUMP_FILENAME_PATTERN)s" -G %(ENV_PCAP_ROTATE_SECONDS)s -C %(ENV_PCAP_ROTATE_MEGABYTES)s -K -n "%(ENV_PCAP_FILTER)s" +user=%(ENV_PUSER)s startsecs=5 startretries=3 stopasgroup=true killasgroup=true autostart=%(ENV_PCAP_ENABLE_TCPDUMP)s directory=%(ENV_PCAP_PATH)s -user=%(ENV_PCAP_USER)s diff --git a/pcap-monitor/scripts/watch-pcap-uploads-folder.sh b/pcap-monitor/scripts/watch-pcap-uploads-folder.sh index e751eef22..b071480e0 100755 --- a/pcap-monitor/scripts/watch-pcap-uploads-folder.sh +++ b/pcap-monitor/scripts/watch-pcap-uploads-folder.sh @@ -17,11 +17,11 @@ do FILEMIME=$(file -b --mime-type "$NEWFILE") if [[ "$FILEMIME" == 'application/vnd.tcpdump.pcap' ]] || [[ "$FILEMIME" == 'application/x-pcapng' ]] || [[ "$FILEMAGIC" == *"pcap-ng"* ]]; then # a pcap file to be processed by dropping it into $PROCESS_DIR - sleep 0.1 && chown 1000:1000 "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$PROCESS_DIR/") + sleep 0.1 && chown ${PUID:-${DEFAULT_UID}}:${PGID:-${DEFAULT_GID}} "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$PROCESS_DIR/") elif [[ -d "$ZEEK_UPLOAD_DIR" ]] && ( echo "$FILEMIME" | grep --quiet -P "(application/gzip|application/x-gzip|application/x-7z-compressed|application/x-bzip2|application/x-cpio|application/x-lzip|application/x-lzma|application/x-rar-compressed|application/x-tar|application/x-xz|application/zip)" ); then # looks like this is a compressed file, we're assuming it's a zeek log archive to be processed by filebeat - sleep 0.1 && chown 1000:1000 "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$ZEEK_UPLOAD_DIR/") + sleep 0.1 && chown ${PUID:-${DEFAULT_UID}}:${PGID:-${DEFAULT_GID}} "$NEWFILE" && (>&2 mv -v "$NEWFILE" "$ZEEK_UPLOAD_DIR/") else # unhandled file type uploaded, delete it diff --git a/pcap-monitor/supervisord.conf b/pcap-monitor/supervisord.conf index 50a95152c..975602ddf 100644 --- a/pcap-monitor/supervisord.conf +++ b/pcap-monitor/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:watch-upload] command=/bin/bash -c "sleep 30 && /usr/local/bin/watch-pcap-uploads-folder.sh" @@ -36,7 +36,7 @@ command=python3 /usr/local/bin/pcap_watcher.py --ignore-existing "%(ENV_PCAP_PIPELINE_IGNORE_PREEXISTING)s" --start-sleep 60 --directory "%(ENV_PCAP_PATH)s"/processed -user=%(ENV_MONITOR_USER)s +user=%(ENV_PUSER)s startsecs=65 startretries=1 stopasgroup=true diff --git a/scripts/control.py b/scripts/control.py index f8f164800..9db3dabf7 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -7,7 +7,9 @@ import argparse import errno +import getpass import glob +import json import os import platform import re @@ -75,10 +77,16 @@ def logs(): ) """, re.VERBOSE | re.IGNORECASE) + serviceRegEx = re.compile(r'^(?P.+?\|)\s*(?P.*)$') + err, out = run_process([dockerComposeBin, '-f', args.composeFile, 'ps'], debug=args.debug) print("\n".join(out)) - process = Popen([dockerComposeBin, '-f', args.composeFile, 'logs', '-f'], stdout=PIPE) + # increase COMPOSE_HTTP_TIMEOUT to be ridiculously large so docker-compose never times out the TTY doing debug output + osEnv = os.environ.copy() + osEnv['COMPOSE_HTTP_TIMEOUT'] = '999999999' + + process = Popen([dockerComposeBin, '-f', args.composeFile, 'logs', '-f'], env=osEnv, stdout=PIPE) while True: output = process.stdout.readline() if (len(output) == 0) and (process.poll() is not None): @@ -86,11 +94,67 @@ def logs(): if output: outputStr = output.decode().strip() outputStrEscaped = EscapeAnsi(outputStr) - if not ignoreRegEx.match(outputStrEscaped): - print(outputStr if coloramaImported else outputStrEscaped) + if ignoreRegEx.match(outputStrEscaped): + pass ### print('!!!!!!!: {}'.format(outputStr)) else: - pass - # print('!!!!!!!: {}'.format(outputStr)) + serviceMatch = serviceRegEx.search(outputStrEscaped) + serviceMatchFmt = serviceRegEx.search(outputStr) if coloramaImported else serviceMatch + serviceStr = serviceMatchFmt.group('service') if (serviceMatchFmt is not None) else '' + messageStr = serviceMatch.group('message') if (serviceMatch is not None) else '' + outputJson = LoadStrIfJson(messageStr) + if (outputJson is not None): + + # if there's a timestamp in the JSON, move it outside of the JSON to the beginning of the log string + timeKey = None + if 'time' in outputJson: + timeKey = 'time' + elif 'timestamp' in outputJson: + timeKey = 'timestamp' + elif '@timestamp' in outputJson: + timeKey = '@timestamp' + timeStr = '' + if timeKey is not None: + timeStr = outputJson[timeKey] + ' ' + outputJson.pop(timeKey, None) + + if ('job.schedule' in outputJson) and ('job.position' in outputJson) and ('job.command' in outputJson): + + # this is a status output line from supercronic, let's format and cleant it up so it fits in better with the rest of the logs + + # remove some clutter for the display + for noisyKey in ['level', 'channel', 'iteration', 'job.position', 'job.schedule']: + outputJson.pop(noisyKey, None) + + # if it's just command and message, format those NOT as JSON + jobCmd = outputJson['job.command'] + jobStatus = outputJson['msg'] + if (len(outputJson.keys()) == 2) and ('job.command' in outputJson) and ('msg' in outputJson): + # if it's the most common status (starting or job succeeded) then don't print unless debug mode + if args.debug or ((jobStatus != 'starting') and (jobStatus != 'job succeeded')): + print('{}{} {} {}: {}'.format(serviceStr, Style.RESET_ALL if coloramaImported else '', timeStr, jobCmd, jobStatus)) + else: + pass + + else: + # standardize and print the JSON output + print('{}{} {}{}'.format(serviceStr, Style.RESET_ALL if coloramaImported else '', timeStr, json.dumps(outputJson))) + + elif ('kibana' in serviceStr): + # this is an output line from kibana, let's clean it up a bit: remove some clutter for the display + for noisyKey in ['type', 'tags', 'pid', 'method', 'prevState', 'prevMsg']: + outputJson.pop(noisyKey, None) + + # standardize and print the JSON output + print('{}{} {}{}'.format(serviceStr, Style.RESET_ALL if coloramaImported else '', timeStr, json.dumps(outputJson))) + + else: + # standardize and print the JSON output + print('{}{} {}{}'.format(serviceStr, Style.RESET_ALL if coloramaImported else '', timeStr, json.dumps(outputJson))) + + else: + # just a regular non-JSON string, print as-is + print(outputStr if coloramaImported else outputStrEscaped) + else: time.sleep(0.5) process.poll() @@ -188,8 +252,12 @@ def start(): else: raise + # increase COMPOSE_HTTP_TIMEOUT to be ridiculously large so docker-compose never times out the TTY doing debug output + osEnv = os.environ.copy() + osEnv['COMPOSE_HTTP_TIMEOUT'] = '999999999' + # start docker - err, out = run_process([dockerComposeBin, '-f', args.composeFile, 'up', '--detach'], debug=args.debug) + err, out = run_process([dockerComposeBin, '-f', args.composeFile, 'up', '--detach'], env=osEnv, debug=args.debug) if (err == 0): eprint("Started Malcolm\n\n") eprint("In a few minutes, Malcolm services will be accessible via the following URLs:") @@ -541,6 +609,10 @@ def main(): os.chdir(MalcolmPath) + # don't run this as root + if (pyPlatform != PLATFORM_WINDOWS) and (('SUDO_UID' in os.environ.keys()) or (getpass.getuser() == 'root')): + raise Exception('{} should not be run as root'.format(ScriptName)) + # make sure docker/docker-compose is available dockerBin = 'docker.exe' if ((pyPlatform == PLATFORM_WINDOWS) and Which('docker.exe')) else 'docker' dockerComposeBin = 'docker-compose.exe' if ((pyPlatform == PLATFORM_WINDOWS) and Which('docker-compose.exe')) else 'docker-compose' diff --git a/scripts/install.py b/scripts/install.py index 30b02de77..0518e7eb3 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -31,7 +31,7 @@ from malcolm_common import * ################################################################################################### -DOCKER_COMPOSE_INSTALL_VERSION="1.25.1" +DOCKER_COMPOSE_INSTALL_VERSION="1.26.2" DEB_GPG_KEY_FINGERPRINT = '0EBFCD88' # used to verify GPG key for Docker Debian repository @@ -97,7 +97,7 @@ def run_process(self, command, stdout=True, stderr=True, stdin=None, privileged= if privileged and (len(self.sudoCmd) > 0): command = self.sudoCmd + command - return run_process(command, stdout, stderr, stdin, retry, retrySleepSec, self.debug) + return run_process(command, stdout=stdout, stderr=stderr, stdin=stdin, retry=retry, retrySleepSec=retrySleepSec, debug=self.debug) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def package_is_installed(self, package): @@ -214,10 +214,28 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_logstash_default=Fa composeFiles = [os.path.realpath(args.configFile)] malcolm_install_path = os.path.dirname(composeFiles[0]) + # figure out what UID/GID to run non-rood processes under docker as + puid = '1000' + pgid = '1000' + try: + if (self.platform == PLATFORM_LINUX): + puid = str(os.getuid()) + pgid = str(os.getgid()) + if (puid == '0') or (pgid == '0'): + raise Exception('it is preferrable not to run Malcolm as root, prompting for UID/GID instead') + except: + puid = '1000' + pgid = '1000' + + while (not puid.isdigit()) or (not pgid.isdigit()) or (not InstallerYesOrNo('Malcolm processes will run as UID {} and GID {}. Is this OK?'.format(puid, pgid), default=True)): + puid = InstallerAskForString('Enter user ID (UID) for running non-root Malcolm processes') + pgid = InstallerAskForString('Enter group ID (GID) for running non-root Malcolm processes') + + # guestimate how much memory we should use based on total system memory + if self.debug: eprint("{} contains {}, system memory is {} GiB".format(malcolm_install_path, composeFiles, self.totalMemoryGigs)) - # guestimate how much memory we should use based on total system memory if self.totalMemoryGigs >= 63.0: esMemory = '30g' lsMemory = '6g' @@ -333,9 +351,9 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_logstash_default=Fa autoZeek = InstallerYesOrNo('Automatically analyze all PCAP files with Zeek?', default=True) reverseDns = InstallerYesOrNo('Perform reverse DNS lookup locally for source and destination IP addresses in Zeek logs?', default=False) autoOui = InstallerYesOrNo('Perform hardware vendor OUI lookups for MAC addresses?', default=True) - autoFreq = InstallerYesOrNo('Perform string randomness scoring on some fields?', default=False) + autoFreq = InstallerYesOrNo('Perform string randomness scoring on some fields?', default=True) logstashOpen = InstallerYesOrNo('Expose Logstash port to external hosts?', default=expose_logstash_default) - logstashSsl = logstashOpen and InstallerYesOrNo('Should Logstash require SSL for Zeek logs? (Note: This requires the forwarder to be similarly configured and a corresponding copy of the client SSL files.)', default=False) + logstashSsl = logstashOpen and InstallerYesOrNo('Should Logstash require SSL for Zeek logs? (Note: This requires the forwarder to be similarly configured and a corresponding copy of the client SSL files.)', default=True) externalEsForward = InstallerYesOrNo('Forward Logstash logs to external Elasticstack instance?', default=False) if externalEsForward: externalEsHost = InstallerAskForString('Enter external Elasticstack host:port (e.g., 10.0.0.123:9200)') @@ -425,6 +443,12 @@ def tweak_malcolm_runtime(self, malcolm_install_path, expose_logstash_default=Fa if (currentService is not None) and (restartMode is not None) and re.match(r'^\s*restart\s*:.*$', line): # elasticsearch backup directory line = "{}restart: {}".format(serviceIndent * 2, restartMode) + elif 'PUID' in line: + # process UID + line = re.sub(r'(PUID\s*:\s*)(\S+)', r"\g<1>{}".format(puid), line) + elif 'PGID' in line: + # process GID + line = re.sub(r'(PGID\s*:\s*)(\S+)', r"\g<1>{}".format(pgid), line) elif 'NGINX_BASIC_AUTH' in line: # basic (useBasicAuth=true) vs ldap (useBasicAuth=false) line = re.sub(r'(NGINX_BASIC_AUTH\s*:\s*)(\S+)', r'\g<1>{}'.format("'true'" if useBasicAuth else "'false'"), line) diff --git a/scripts/malcolm_common.py b/scripts/malcolm_common.py index 645f69bc9..9a02d0e2d 100644 --- a/scripts/malcolm_common.py +++ b/scripts/malcolm_common.py @@ -6,6 +6,7 @@ from __future__ import print_function import getpass +import json import os import platform import re @@ -151,6 +152,14 @@ def SizeHumanFormat(num, suffix='B'): num /= 1024.0 return "%.1f%s%s" % (num, 'Yi', suffix) +################################################################################################### +# is this string valid json? if so, load and return it +def LoadStrIfJson(jsonStr): + try: + return json.loads(jsonStr) + except ValueError as e: + return None + ################################################################################################### # run command with arguments and return its exit code, stdout, and stderr def check_output_input(*popenargs, **kwargs): @@ -182,14 +191,16 @@ def check_output_input(*popenargs, **kwargs): return retcode, output, errput -#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -def run_process(command, stdout=True, stderr=True, stdin=None, retry=0, retrySleepSec=5, debug=False): +################################################################################################### +# run command with arguments and return its exit code, stdout, and stderr +def run_process(command, stdout=True, stderr=True, stdin=None, retry=0, retrySleepSec=5, cwd=None, env=None, debug=False): + retcode = -1 output = [] try: # run the command - retcode, cmdout, cmderr = check_output_input(command, input=stdin.encode() if (PY3 and stdin) else stdin) + retcode, cmdout, cmderr = check_output_input(command, input=stdin.encode() if (PY3 and stdin) else stdin, cwd=cwd, env=env) # split the output on newlines to return a list if PY3: @@ -209,7 +220,7 @@ def run_process(command, stdout=True, stderr=True, stdin=None, retry=0, retrySle if (retcode != 0) and retry and (retry > 0): # sleep then retry time.sleep(retrySleepSec) - return run_process(command, stdout, stderr, stdin, retry-1, retrySleepSec, debug) + return run_process(command, stdout, stderr, stdin, retry-1, retrySleepSec, cwd, env, debug) else: return retcode, output diff --git a/sensor-iso/README.md b/sensor-iso/README.md index 1ad05c959..59a10a48f 100644 --- a/sensor-iso/README.md +++ b/sensor-iso/README.md @@ -399,7 +399,7 @@ Building the ISO may take 90 minutes or more depending on your system. As the bu ``` … -Finished, created "/sensor-build/hedgehog-2.1.0.iso" +Finished, created "/sensor-build/hedgehog-2.1.1.iso" … ``` diff --git a/sensor-iso/docs/Notes.md b/sensor-iso/docs/Notes.md index 380fb0115..ca4bdbe4e 100644 --- a/sensor-iso/docs/Notes.md +++ b/sensor-iso/docs/Notes.md @@ -113,12 +113,12 @@ $ /usr/sbin/tcpdump \ ### Compiling Moloch from source -At the time of writing, the [current stable release](https://github.com/aol/moloch/blob/master/CHANGELOG) of Moloch is [v2.3.1](https://github.com/aol/moloch/releases/tag/v2.3.1). The following bash script was used to install Moloch's build dependencies, download Moloch, build a Debian .deb package using [fpm](https://github.com/jordansissel/fpm) and install it. In building Hedgehog Linux, the building of this .deb is done inside a Docker container dedicated to that purpose. +At the time of writing, the [current stable release](https://github.com/aol/moloch/blob/master/CHANGELOG) of Moloch is [v2.3.2](https://github.com/aol/moloch/releases/tag/v2.3.2). The following bash script was used to install Moloch's build dependencies, download Moloch, build a Debian .deb package using [fpm](https://github.com/jordansissel/fpm) and install it. In building Hedgehog Linux, the building of this .deb is done inside a Docker container dedicated to that purpose. ```bash #!/bin/bash -MOLOCH_VERSION="2.3.1" +MOLOCH_VERSION="2.3.2" MOLOCHDIR="/opt/moloch" OUTPUT_DIR="/tmp" diff --git a/sensor-iso/moloch/Dockerfile b/sensor-iso/moloch/Dockerfile index 4aedfdaf4..8c0a3a836 100644 --- a/sensor-iso/moloch/Dockerfile +++ b/sensor-iso/moloch/Dockerfile @@ -6,7 +6,7 @@ LABEL maintainer="malcolm.netsec@gmail.com" ENV DEBIAN_FRONTEND noninteractive -ENV MOLOCH_VERSION "2.3.1" +ENV MOLOCH_VERSION "2.3.2" ENV MOLOCHDIR "/opt/moloch" RUN sed -i "s/buster main/buster main contrib non-free/g" /etc/apt/sources.list && \ diff --git a/shared/bin/configure-capture.py b/shared/bin/configure-capture.py index 04981a17d..e02068be5 100755 --- a/shared/bin/configure-capture.py +++ b/shared/bin/configure-capture.py @@ -788,7 +788,7 @@ def main(): # optionally, filebeat can use SSL if Logstash is configured for it logstash_ssl = "false" logstash_ssl_verify = "none" - if (d.yesno("Forward Zeek logs over SSL? (Note: This requires the destination to be similarly configured and a corresponding copy of the client SSL files.)", yes_label="Unencrypted", no_label="SSL") != Dialog.OK): + if (d.yesno("Forward Zeek logs over SSL? (Note: This requires the destination to be similarly configured and a corresponding copy of the client SSL files.)", yes_label="SSL", no_label="Unencrypted") == Dialog.OK): logstash_ssl = "true" if (d.yesno("Logstash SSL verification", yes_label="None", no_label="Force Peer") != Dialog.OK): logstash_ssl_verify = "force_peer" diff --git a/shared/bin/docker-uid-gid-setup.sh b/shared/bin/docker-uid-gid-setup.sh new file mode 100755 index 000000000..e2e64474d --- /dev/null +++ b/shared/bin/docker-uid-gid-setup.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -e + +unset ENTRYPOINT_CMD +unset ENTRYPOINT_ARGS +[ "$#" -ge 1 ] && ENTRYPOINT_CMD="$1" && [ "$#" -gt 1 ] && shift 1 && ENTRYPOINT_ARGS=( "$@" ) + +# modify the UID/GID for the default user/group (for example, 1000 -> 1001) +usermod --non-unique --uid ${PUID:-${DEFAULT_UID}} ${PUSER} +groupmod --non-unique --gid ${PGID:-${DEFAULT_GID}} ${PGROUP} + +# change user/group ownership of any files/directories belonging to the original IDs +if [[ -n ${PUID} ]] && [[ "${PUID}" != "${DEFAULT_UID}" ]]; then + find / -path /sys -prune -o -path /proc -prune -o -user ${DEFAULT_UID} -exec chown -f ${PUID} "{}" \; || true +fi +if [[ -n ${PGID} ]] && [[ "${PGID}" != "${DEFAULT_GID}" ]]; then + find / -path /sys -prune -o -path /proc -prune -o -group ${DEFAULT_GID} -exec chown -f :${PGID} "{}" \; || true +fi + +# if there are semicolon-separated PUSER_CHOWN entries explicitly specified, chown them too +if [[ -n ${PUSER_CHOWN} ]]; then + IFS=';' read -ra ENTITIES <<< "${PUSER_CHOWN}" + for ENTITY in "${ENTITIES[@]}"; do + chown -R ${PUSER}:${PGROUP} "${ENTITY}" || true + done +fi + +# determine if we are now dropping privileges to exec ENTRYPOINT_CMD +if [[ "$PUSER_PRIV_DROP" == "true" ]]; then + EXEC_USER="${PUSER}" + USER_HOME="$(getent passwd ${PUSER} | cut -d: -f6)" +else + EXEC_USER="${USER:-root}" + USER_HOME="${HOME:-/root}" +fi + +# execute the entrypoint command specified +su --shell /bin/bash --preserve-environment ${EXEC_USER} << EOF +export USER="${EXEC_USER}" +export HOME="${USER_HOME}" +whoami +id +if [ ! -z "${ENTRYPOINT_CMD}" ]; then + if [ -z "${ENTRYPOINT_ARGS}" ]; then + "${ENTRYPOINT_CMD}" + else + "${ENTRYPOINT_CMD}" $(printf "%q " "${ENTRYPOINT_ARGS[@]}") + fi +fi +EOF diff --git a/zeek/supervisord.conf b/zeek/supervisord.conf index db115cde3..1e96d264b 100644 --- a/zeek/supervisord.conf +++ b/zeek/supervisord.conf @@ -1,20 +1,20 @@ ; Copyright (c) 2020 Battelle Energy Alliance, LLC. All rights reserved. [unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) +file=/tmp/supervisor.sock ; (the path to the socket file) chmod=0700 [supervisord] nodaemon=true -logfile=/var/log/supervisor/supervisord.log -pidfile=/var/run/supervisord.pid -childlogdir=/var/log/supervisor +logfile=/dev/null +logfile_maxbytes=0 +pidfile=/tmp/supervisord.pid [rpcinterface:supervisor] supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface [supervisorctl] -serverurl=unix:///var/run/supervisor.sock +serverurl=unix:///tmp/supervisor.sock [program:pcap-zeek] command=python3 /usr/local/bin/pcap_zeek_processor.py @@ -29,7 +29,6 @@ command=python3 /usr/local/bin/pcap_zeek_processor.py --autozeek "%(ENV_ZEEK_AUTO_ANALYZE_PCAP_FILES)s" --extract "%(ENV_ZEEK_EXTRACTOR_MODE)s" --zeek-directory /zeek/upload -user=%(ENV_ZEEKUSER)s startsecs=15 startretries=1 stopasgroup=true