diff --git a/docs/en/getting-started/docker/.env b/docs/en/getting-started/docker/.env index 2d2edc9db..1ab47e8d5 100644 --- a/docs/en/getting-started/docker/.env +++ b/docs/en/getting-started/docker/.env @@ -1,3 +1,29 @@ -COMPOSE_PROJECT_NAME=es -CERTS_DIR=/usr/share/elasticsearch/config/certificates -VERSION={version} +# Password for the 'elastic' user (at least 6 characters) +ELASTIC_PASSWORD= + +# Password for the 'kibana_system' user (at least 6 characters) +KIBANA_PASSWORD= + +# Version of Elastic products +STACK_VERSION={version} + +# Set the cluster name +CLUSTER_NAME=docker-cluster + +# Set to 'basic' or 'trial' to automatically start the 30-day trial +LICENSE=basic +#LICENSE=trial + +# Port to expose Elasticsearch HTTP API to the host +ES_PORT=9200 +#ES_PORT=127.0.0.1:9200 + +# Port to expose Kibana to the host +KIBANA_PORT=5601 +#KIBANA_PORT=80 + +# Increase or decrease based on the available host memory (in bytes) +MEM_LIMIT=1073741824 + +# Project namespace (defaults to the current folder name if not set) +#COMPOSE_PROJECT_NAME=myproject \ No newline at end of file diff --git a/docs/en/getting-started/docker/create-certs.yml b/docs/en/getting-started/docker/create-certs.yml deleted file mode 100644 index 36c1573aa..000000000 --- a/docs/en/getting-started/docker/create-certs.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '2.2' - -services: - create_certs: - image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION} - container_name: create_certs - command: > - bash -c ' - yum install -y -q -e 0 unzip; - if [[ ! -f /certs/bundle.zip ]]; then - bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip; - unzip /certs/bundle.zip -d /certs; - fi; - chown -R 1000:0 /certs - ' - working_dir: /usr/share/elasticsearch - volumes: - - certs:/certs - - .:/usr/share/elasticsearch/config/certificates - networks: - - elastic - -volumes: - certs: - driver: local - -networks: - elastic: - driver: bridge diff --git a/docs/en/getting-started/docker/docker-compose.yml b/docs/en/getting-started/docker/docker-compose.yml index ab6ba579c..c783d4975 100644 --- a/docs/en/getting-started/docker/docker-compose.yml +++ b/docs/en/getting-started/docker/docker-compose.yml @@ -1,83 +1,230 @@ -version: "2.2" -services: - es-node01: - image: docker.elastic.co/elasticsearch/elasticsearch:{version} - container_name: es-node01 - environment: - - node.name=es-node01 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node02,es-node03 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data01:/usr/share/elasticsearch/data - ports: - - 9200:9200 - networks: - - elastic - - es-node02: - image: docker.elastic.co/elasticsearch/elasticsearch:{version} - container_name: es-node02 - environment: - - node.name=es-node02 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node01,es-node03 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data02:/usr/share/elasticsearch/data - networks: - - elastic - - es-node03: - image: docker.elastic.co/elasticsearch/elasticsearch:{version} - container_name: es-node03 - environment: - - node.name=es-node03 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node01,es-node02 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data03:/usr/share/elasticsearch/data - networks: - - elastic - - kib01: - image: docker.elastic.co/kibana/kibana:{version} - container_name: kib01 - ports: - - 5601:5601 - environment: - ELASTICSEARCH_URL: http://es-node01:9200 - ELASTICSEARCH_HOSTS: '["http://es-node01:9200","http://es-node02:9200","http://es-node03:9200"]' - networks: - - elastic - -volumes: - data01: - driver: local - data02: - driver: local - data03: - driver: local - -networks: - elastic: - driver: bridge +version: "2.2" + +services: + setup: + image: docker.elastic.co/elasticsearch/elasticsearch:{version} + volumes: + - certs:/usr/share/elasticsearch/config/certs + user: "0" + command: > + bash -c ' + if [ x${ELASTIC_PASSWORD} == x ]; then + echo "Set the ELASTIC_PASSWORD environment variable in the .env file"; + exit 1; + elif [ x${KIBANA_PASSWORD} == x ]; then + echo "Set the KIBANA_PASSWORD environment variable in the .env file"; + exit 1; + fi; + if [ ! -f certs/ca.zip ]; then + echo "Creating CA"; + bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip; + unzip config/certs/ca.zip -d config/certs; + fi; + if [ ! -f certs/certs.zip ]; then + echo "Creating certs"; + echo -ne \ + "instances:\n"\ + " - name: es01\n"\ + " dns:\n"\ + " - es01\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es02\n"\ + " dns:\n"\ + " - es02\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + " - name: es03\n"\ + " dns:\n"\ + " - es03\n"\ + " - localhost\n"\ + " ip:\n"\ + " - 127.0.0.1\n"\ + > config/certs/instances.yml; + bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key; + unzip config/certs/certs.zip -d config/certs; + fi; + echo "Setting file permissions" + chown -R root:root config/certs; + find . -type d -exec chmod 750 \{\} \;; + find . -type f -exec chmod 640 \{\} \;; + echo "Waiting for Elasticsearch availability"; + until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done; + echo "Setting kibana_system password"; + until curl -s -X POST --cacert config/certs/ca/ca.crt -u elastic:${ELASTIC_PASSWORD} -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done; + echo "All done!"; + ' + healthcheck: + test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"] + interval: 1s + timeout: 5s + retries: 120 + + es01: + depends_on: + setup: + condition: service_healthy + image: docker.elastic.co/elasticsearch/elasticsearch:{version} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata01:/usr/share/elasticsearch/data + ports: + - ${ES_PORT}:9200 + environment: + - node.name=es01 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es02,es03 + - ELASTIC_PASSWORD=${ELASTIC_PASSWORD} + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es01/es01.key + - xpack.security.http.ssl.certificate=certs/es01/es01.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es01/es01.key + - xpack.security.transport.ssl.certificate=certs/es01/es01.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + es02: + depends_on: + - es01 + image: docker.elastic.co/elasticsearch/elasticsearch:{version} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata02:/usr/share/elasticsearch/data + environment: + - node.name=es02 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es03 + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es02/es02.key + - xpack.security.http.ssl.certificate=certs/es02/es02.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es02/es02.key + - xpack.security.transport.ssl.certificate=certs/es02/es02.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + es03: + depends_on: + - es02 + image: docker.elastic.co/elasticsearch/elasticsearch:{version} + volumes: + - certs:/usr/share/elasticsearch/config/certs + - esdata03:/usr/share/elasticsearch/data + environment: + - node.name=es03 + - cluster.name=${CLUSTER_NAME} + - cluster.initial_master_nodes=es01,es02,es03 + - discovery.seed_hosts=es01,es02 + - bootstrap.memory_lock=true + - xpack.security.enabled=true + - xpack.security.http.ssl.enabled=true + - xpack.security.http.ssl.key=certs/es03/es03.key + - xpack.security.http.ssl.certificate=certs/es03/es03.crt + - xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.http.ssl.verification_mode=certificate + - xpack.security.transport.ssl.enabled=true + - xpack.security.transport.ssl.key=certs/es03/es03.key + - xpack.security.transport.ssl.certificate=certs/es03/es03.crt + - xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt + - xpack.security.transport.ssl.verification_mode=certificate + - xpack.license.self_generated.type=${LICENSE} + mem_limit: ${MEM_LIMIT} + ulimits: + memlock: + soft: -1 + hard: -1 + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'", + ] + interval: 10s + timeout: 10s + retries: 120 + + kibana: + depends_on: + es01: + condition: service_healthy + es02: + condition: service_healthy + es03: + condition: service_healthy + image: docker.elastic.co/kibana/kibana:{version} + volumes: + - certs:/usr/share/kibana/config/certs + - kibanadata:/usr/share/kibana/data + ports: + - ${KIBANA_PORT}:5601 + environment: + - SERVERNAME=kibana + - ELASTICSEARCH_HOSTS=https://es01:9200 + - ELASTICSEARCH_USERNAME=kibana_system + - ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD} + - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt + mem_limit: ${MEM_LIMIT} + healthcheck: + test: + [ + "CMD-SHELL", + "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'", + ] + interval: 10s + timeout: 10s + retries: 120 + +volumes: + certs: + driver: local + esdata01: + driver: local + esdata02: + driver: local + esdata03: + driver: local + kibanadata: + driver: local diff --git a/docs/en/getting-started/docker/elastic-docker-tls.yml b/docs/en/getting-started/docker/elastic-docker-tls.yml deleted file mode 100644 index 8ea9c68bd..000000000 --- a/docs/en/getting-started/docker/elastic-docker-tls.yml +++ /dev/null @@ -1,136 +0,0 @@ -version: "2.2" - -services: - es-node01: - image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION} - container_name: es-node01 - environment: - - node.name=es-node01 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node02,es-node03 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - xpack.license.self_generated.type=trial # <1> - - xpack.security.enabled=true - - xpack.security.http.ssl.enabled=true # <2> - - xpack.security.http.ssl.key=$CERTS_DIR/es-node01/es-node01.key - - xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.http.ssl.certificate=$CERTS_DIR/es-node01/es-node01.crt - - xpack.security.transport.ssl.enabled=true # <3> - - xpack.security.transport.ssl.verification_mode=certificate # <4> - - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.transport.ssl.certificate=$CERTS_DIR/es-node01/es-node01.crt - - xpack.security.transport.ssl.key=$CERTS_DIR/es-node01/es-node01.key - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data01:/usr/share/elasticsearch/data - - certs:$CERTS_DIR - ports: - - 9200:9200 - networks: - - elastic - - healthcheck: - test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi - interval: 30s - timeout: 10s - retries: 5 - - es-node02: - image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION} - container_name: es-node02 - environment: - - node.name=es-node02 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node01,es-node03 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - xpack.license.self_generated.type=trial - - xpack.security.enabled=true - - xpack.security.http.ssl.enabled=true - - xpack.security.http.ssl.key=$CERTS_DIR/es-node02/es-node02.key - - xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.http.ssl.certificate=$CERTS_DIR/es-node02/es-node02.crt - - xpack.security.transport.ssl.enabled=true - - xpack.security.transport.ssl.verification_mode=certificate - - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.transport.ssl.certificate=$CERTS_DIR/es-node02/es-node02.crt - - xpack.security.transport.ssl.key=$CERTS_DIR/es-node02/es-node02.key - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data02:/usr/share/elasticsearch/data - - certs:$CERTS_DIR - networks: - - elastic - - es-node03: - image: docker.elastic.co/elasticsearch/elasticsearch:${VERSION} - container_name: es-node03 - environment: - - node.name=es-node03 - - cluster.name=es-docker-cluster - - discovery.seed_hosts=es-node01,es-node02 - - cluster.initial_master_nodes=es-node01,es-node02,es-node03 - - bootstrap.memory_lock=true - - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - - xpack.license.self_generated.type=trial - - xpack.security.enabled=true - - xpack.security.http.ssl.enabled=true - - xpack.security.http.ssl.key=$CERTS_DIR/es-node03/es-node03.key - - xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.http.ssl.certificate=$CERTS_DIR/es-node03/es-node03.crt - - xpack.security.transport.ssl.enabled=true - - xpack.security.transport.ssl.verification_mode=certificate - - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt - - xpack.security.transport.ssl.certificate=$CERTS_DIR/es-node03/es-node03.crt - - xpack.security.transport.ssl.key=$CERTS_DIR/es-node03/es-node03.key - ulimits: - memlock: - soft: -1 - hard: -1 - volumes: - - data03:/usr/share/elasticsearch/data - - certs:$CERTS_DIR - networks: - - elastic - kib01: - image: docker.elastic.co/kibana/kibana:${VERSION} - container_name: kib01 - depends_on: { "es-node01": { "condition": "service_healthy" } } - ports: - - 5601:5601 - environment: - SERVERNAME: localhost - ELASTICSEARCH_URL: https://es-node01:9200 - ELASTICSEARCH_HOSTS: https://es-node01:9200 - ELASTICSEARCH_USERNAME: kibana_system - ELASTICSEARCH_PASSWORD: CHANGEME - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt - SERVER_SSL_ENABLED: "true" - SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key - SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt - volumes: - - certs:$CERTS_DIR - networks: - - elastic -volumes: - data01: - driver: local - data02: - driver: local - data03: - driver: local - certs: - driver: local - -networks: - elastic: - driver: bridge diff --git a/docs/en/getting-started/docker/instances.yml b/docs/en/getting-started/docker/instances.yml deleted file mode 100644 index c60e5f4a0..000000000 --- a/docs/en/getting-started/docker/instances.yml +++ /dev/null @@ -1,26 +0,0 @@ -instances: - - name: es-node01 - dns: - - es-node01 - - localhost - ip: - - 127.0.0.1 - - - name: es-node02 - dns: - - es-node02 - - localhost - ip: - - 127.0.0.1 - - - name: es-node03 - dns: - - es-node03 - - localhost - ip: - - 127.0.0.1 - - - name: "kib01" - dns: - - kib01 - - localhost diff --git a/docs/en/getting-started/get-started-docker.asciidoc b/docs/en/getting-started/get-started-docker.asciidoc index ef367e5ff..472260234 100644 --- a/docs/en/getting-started/get-started-docker.asciidoc +++ b/docs/en/getting-started/get-started-docker.asciidoc @@ -13,8 +13,7 @@ and configured by default. This option is great for quickly getting started with You can also <> to create a secured, multi-node cluster with a connected {kib} instance. This -option requires more manual steps, but results in a more resilient cluster with -greater capacity and reliability. +option results in a more resilient cluster with greater capacity and reliability. [[run-docker-secure]] [discrete] @@ -113,7 +112,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] [source,sh,subs="attributes"] ---- -docker run --name es-node01 --net elastic -p 9200:9200 -it docker.elastic.co/elasticsearch/elasticsearch:{version} +docker run --name es01 --net elastic -p 9200:9200 -it docker.elastic.co/elasticsearch/elasticsearch:{version} ---- endif::[] @@ -210,7 +209,7 @@ endif::[] ifeval::["{release-state}"!="unreleased"] [source,sh,subs="attributes"] ---- -docker run --name kib01 --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} +docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} ---- When you start {kib}, a unique link is output to your terminal. @@ -313,7 +312,7 @@ To remove the containers and their network, run: ---- docker network rm elastic docker rm es01 -docker rm kib01 +docker rm kibana ---- [discrete] @@ -323,12 +322,18 @@ docker rm kib01 To get a multi-node {es} cluster and {kib} up and running in Docker with security enabled, you can use Docker Compose. +This configuration provides a simple method of starting a secured cluster that +you can use for development before building a distributed deployment with +multiple hosts. + [discrete] ==== Prerequisites Install the appropriate https://docs.docker.com/get-docker/[Docker application] for your operating system. +If you're running on Linux, install https://docs.docker.com/compose/install/[Docker Compose]. + [NOTE] ==== Make sure that Docker is allotted at least 4GB of memory. In Docker Desktop, @@ -339,189 +344,121 @@ Settings (Windows). [discrete] ==== Prepare the environment -Create the following Docker Compose and configuration files. These files are also -available from the +Create the following configuration files in a new, empty directory. These files +are also available from the https://github.com/elastic/stack-docs/blob/master/docs/en/getting-started/docker/[elastic/stack-docs] repository on GitHub. -- ifeval::["{release-state}"=="unreleased"] NOTE: Version {version} of {es} has not been released, -so the sample compose and configuration files are not yet available for this version. -See the {stack-gs-current}/get-started-docker.html[current version] for the latest sample files. +so the sample Docker Compose and configuration files are not yet available for +this version. See the {stack-gs-current}/get-started-docker.html[current version] +for the latest sample files. endif::[] +-- -* `instances.yml` identifies the instances you need to create certificates for. -* `.env` sets environment variables to specify the {es} version and -the location where the {es} certificates will be created. -* `create-certs.yml` is a Docker Compose file that launches a container to generate the certificates -for {es} and {kib}. -* `elastic-docker-tls.yml` is a Docker Compose file that brings up a three-node {es} cluster -and a {kib} instance with TLS enabled so you can see how things work. -This all-in-one configuration is a handy way to bring up your first dev cluster before -you build a distributed deployment with multiple hosts. - +-- ifeval::["{release-state}"!="unreleased"] -[discrete] -===== `instances.yml` -["source","yaml"] ----- -include::docker/instances.yml[] ----- [discrete] +[[docker-env-file]] ===== `.env` + +The `.env` file sets environment variables that are used when you run the +`docker-compose.yml` configuration file. Ensure that you specify a strong +password for the `elastic` and `kibana_system` users with the +`ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. These variable are +referenced by the `docker-compose.yml` file. + ["source","txt",subs="attributes"] ---- include::docker/.env[] ---- [discrete] -===== `create-certs.yml` -["source","txt"] ----- -include::docker/create-certs.yml[] ----- +[[docker-compose-file]] +===== `docker-compose.yml` -[discrete] -===== `elastic-docker-tls.yml` -["source","txt"] +This `docker-compose.yml` file creates a three-node secure {es} cluster with authentication and network encryption enabled, and a {kib} instance securely connected to it. + +.Exposing ports +**** +This configuration exposes port `9200` on all network interfaces. Because +of how Docker handles ports, a port that isn't bound to `localhost` leaves your +{es} cluster publicly accessible, potentially ignoring any firewall settings. +If you don't want to expose port `9200` to external hosts, set the value for +`ES_PORT` in the `.env` file to something like `127.0.0.1:9200`. {es} will +then only be accessible from the host machine itself. +**** + +[source,yaml,subs="attributes"] ---- -include::docker/elastic-docker-tls.yml[] +include::docker/docker-compose.yml[] ---- -<1> Generate and apply a trial license that supports Transport Layer Security. -<2> Enable Transport Layer Security to encrypt client communications. -<3> Enable Transport Layer Security to encrypt internode communications. -<4> Allow the use of self-signed certificates by not requiring hostname verification. endif::[] -- [discrete] ==== Start your cluster with security enabled and configured -. Generate certificates for {es} by bringing up the `create-certs` container: +. Modify the `.env` file and enter strong password values for both the +`ELASTIC_PASSWORD` and `KIBANA_PASSWORD` variables. + --- -["source","sh"] ----- -docker-compose -f create-certs.yml run --rm create_certs ----- - --- - -. Bring up the three-node {es} cluster: -+ --- -["source","sh"] ----- -docker-compose -f elastic-docker-tls.yml up -d ----- - -IMPORTANT: At this point, {kib} cannot connect to the {es} cluster. -You must generate a password for the built-in `kibana_system` user, update the `ELASTICSEARCH_PASSWORD` -in the compose file, and restart to enable {kib} to communicate with the secured cluster. +NOTE: You must use the `ELASTIC_PASSWORD` value for further interactions with +the cluster. The `KIBANA_PASSWORD` value is only used internally when +configuring {kib}. --- - -. Run the `elasticsearch-setup-passwords` tool to generate passwords for all built-in users, -including the `kibana_system` user. If you don't use PowerShell on Windows, remove the trailing `\`characters -and join the lines before running this command. +. Create and start the three-node {es} cluster and {kib} instance: + --- ["source","sh"] ---- -docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \ -auto --batch --url https://es-node01:9200" +docker-compose up -d ---- -IMPORTANT: Make a note of the generated passwords. -You must configure the `kibana_system` user password in the compose file to enable {kib} to connect to {es}, -and you'll need the password for the `elastic` superuser to -log in to {kib} and submit requests to {es}. --- +. When the deployment has started, open a browser and navigate to http://localhost:5601[http://localhost:5601] to +access {kib}, where you can load sample data and interact with your cluster. -. Set `ELASTICSEARCH_PASSWORD` in the `elastic-docker-tls.yml` compose file to the password -generated for the `kibana_system` user. -+ --- -ifeval::["{release-state}"=="unreleased"] -NOTE: Version {version} of {es} has not been released, -so the sample compose file is not yet available for this version. -See the {stack-gs-current}/get-started-docker.html[current version] for the latest sample files. -endif::[] +[[docker-compose-remove-containers]] +[discrete] +==== Stop and remove the deployment -ifeval::["{release-state}"!="unreleased"] -["source","yaml",subs=+quotes] ----- - kib01: - image: docker.elastic.co/kibana/kibana:${VERSION} - container_name: kib01 - depends_on: {"es-node01": {"condition": "service_healthy"}} - ports: - - 5601:5601 - environment: - SERVERNAME: localhost - ELASTICSEARCH_URL: https://es-node01:9200 - ELASTICSEARCH_HOSTS: https://es-node01:9200 - ELASTICSEARCH_USERNAME: kibana_system - **ELASTICSEARCH_PASSWORD: CHANGEME** - ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt - SERVER_SSL_ENABLED: "true" - SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key - SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt - volumes: - - certs:$CERTS_DIR - networks: - - elastic ----- -endif::[] --- +When you're done experimenting, you can remove the network, containers, and +volumes: -. Use `docker-compose` to restart the cluster and {kib}: -+ --- -["source","sh"] +[source,sh] ---- -docker-compose stop -docker-compose -f elastic-docker-tls.yml up -d +docker-compose down -v ---- --- - -. Open {kib} to load sample data and interact with the cluster: -https://localhost:5601. -+ -NOTE: Because SSL is also enabled for communications between {kib} and client browsers, -you must access {kib} via the HTTPS protocol. - -When you're done experimenting, you can tear down the containers, network, and -volumes by running `docker-compose -f elastic-docker-tls.yml down -v`. [discrete] [[load-settings-file]] -==== Loading settings from a file +==== Load settings from a file -Specifying settings for {es} and {{kib}} directly in the compose file is a convenient way to get started, -but loading settings from a file is preferable once you get past the experimental stage. +Specifying settings for {es} and {kib} directly in the Docker Compose file is a +convenient way to get started, but loading settings from a file is preferable +after you get past the experimental stage. -For example, to use `es-node01.yml` as the configuration file for the `es-node01` {es} node, -you can create a bind mount in the volumes section. +For example, to use a custom `es01.yml` as the configuration file for the `es01` +{es} node, you can create a bind mount in the `volumes` section for the `es01` +service. ["source","yaml"] ---- volumes: - - data01:/usr/share/elasticsearch/data - - certs:$CERTS_DIR - - ./es-node01.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ./es01.yml:/usr/share/elasticsearch/config/elasticsearch.yml + - ... ---- -Similarly, to load {kib} settings from a file, you overwrite `/usr/share/kibana/config/kibana.yml`: +Similarly, to load {kib} settings from a file, you can add the following mount +in the `volumes` section for the `kibana` service. ["source","yaml"] ---- volumes: - - certs:$CERTS_DIR - ./kibana.yml:/usr/share/kibana/config/kibana.yml + - ... ---- [discrete] diff --git a/docs/en/getting-started/index.asciidoc b/docs/en/getting-started/index.asciidoc index 623a77cad..790e3b74a 100644 --- a/docs/en/getting-started/index.asciidoc +++ b/docs/en/getting-started/index.asciidoc @@ -13,6 +13,7 @@ :kib-repo-dir: {kibana-root}/docs :xes-repo-dir: {elasticsearch-root}/x-pack/docs/en +:es-repo-dir: {elasticsearch-root}/docs/reference include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] include::{docs-root}/shared/attributes.asciidoc[]