Commit 036fad9603a873f4b7b620f717407a472ba7ed28
1 parent
a39ce97f
Exists in
master
Adicionando Dockerfiles dos artefatos comuns e intermediários
Showing
25 changed files
with
1648 additions
and
0 deletions
Show diff stats
@@ -0,0 +1,16 @@ | @@ -0,0 +1,16 @@ | ||
1 | +FROM centos:7.2.1511 | ||
2 | + | ||
3 | +MAINTAINER CAPGov-INFRA | ||
4 | + | ||
5 | +LABEL name="CentOS 7.2.1511" \ | ||
6 | + description="Imagem do CentOS 7.2.1511 com timezone definido para São Paulo" \ | ||
7 | + dockerfiles-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +ARG timezone="America/Sao_Paulo" | ||
11 | + | ||
12 | +ENV TZ=$timezone | ||
13 | + | ||
14 | +RUN rm -f /etc/localtime && \ | ||
15 | + ln -s /usr/share/zoneinfo/${TZ} /etc/localtime && \ | ||
16 | + yum update -y && yum autoremove -y && yum clean all |
@@ -0,0 +1,16 @@ | @@ -0,0 +1,16 @@ | ||
1 | +FROM centos:7 | ||
2 | + | ||
3 | +MAINTAINER CAPGov-INFRA | ||
4 | + | ||
5 | +LABEL name="CentOS 7" \ | ||
6 | + description="Imagem do CentOS 7 com timezone definido para São Paulo" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +ARG timezone="America/Sao_Paulo" | ||
11 | + | ||
12 | +ENV TZ=$timezone | ||
13 | + | ||
14 | +RUN rm -f /etc/localtime && \ | ||
15 | + ln -s /usr/share/zoneinfo/${TZ} /etc/localtime && \ | ||
16 | + yum update -y && yum autoremove -y && yum clean all |
@@ -0,0 +1,17 @@ | @@ -0,0 +1,17 @@ | ||
1 | +FROM centos:7 | ||
2 | + | ||
3 | +MAINTAINER CAPGov-INFRA | ||
4 | + | ||
5 | +LABEL name="CentOS 7" \ | ||
6 | + description="Imagem do CentOS 7 com timezone definido para São Paulo" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +ARG timezone="America/Sao_Paulo" | ||
11 | + | ||
12 | +ENV TZ=$timezone | ||
13 | + | ||
14 | +RUN rm -f /etc/localtime && \ | ||
15 | + ln -s /usr/share/zoneinfo/${TZ} /etc/localtime && \ | ||
16 | + yum update -y && yum autoremove -y && yum clean all | ||
17 | + |
@@ -0,0 +1,35 @@ | @@ -0,0 +1,35 @@ | ||
1 | +FROM capgov/centos:latest | ||
2 | + | ||
3 | +MAINTAINER CAPGov-INFRA | ||
4 | + | ||
5 | +LABEL name="Java com R" \ | ||
6 | + description="Imagem do Java com R" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +SHELL ["/bin/bash", "-c"] | ||
11 | + | ||
12 | +COPY ./FILES/mongodb-entrypoint.sh / | ||
13 | + | ||
14 | +RUN groupadd -r -g 5000 mongodb && \ | ||
15 | + useradd -Mr -c "MongoDB User" -g 5000 -u 5000 mongodb && \ | ||
16 | + echo -e '[mongodb-org-3.4]\nname=MongoDB Repository\nbaseurl=https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/3.4/x86_64/\ngpgcheck=1\nenabled=1\ngpgkey=https://www.mongodb.org/static/pgp/server-3.4.asc' > /etc/yum.repos.d/mongodb-org-3.4.repo && \ | ||
17 | + yum update -y && \ | ||
18 | + yum install -y mongodb-org && \ | ||
19 | + yum clean all && \ | ||
20 | + mkdir -p /var/lib/mongodb/{db,configdb} /var/run/mongodb && \ | ||
21 | + sed -i 's|dbPath: /var/lib/mongo|dbPath: /var/lib/mongodb|g' /etc/mongod.conf && \ | ||
22 | + sed -i 's|fork: true|fork: false|g' /etc/mongod.conf && \ | ||
23 | + sed -i 's| bindIp: 127.0.0.1|# bindIp: 127.0.0.1|g' /etc/mongod.conf && \ | ||
24 | + sed -i 's|systemLog:|#systemLog:|g' /etc/mongod.conf && \ | ||
25 | + sed -i 's| destination: file|# destination: file|g' /etc/mongod.conf && \ | ||
26 | + sed -i 's| logAppend: true|# logAppend: true|g' /etc/mongod.conf && \ | ||
27 | + sed -i 's| path: /var/log/mongodb/mongod.log|# path: /var/log/mongodb/mongod.log|g' /etc/mongod.conf && \ | ||
28 | + chmod +x /mongodb-entrypoint.sh && \ | ||
29 | + chown mongodb:mongodb -R /var/lib/mongodb /var/log/mongodb /var/run/mongodb /mongodb-entrypoint.sh | ||
30 | + | ||
31 | +EXPOSE 27017 | ||
32 | + | ||
33 | +USER mongodb | ||
34 | + | ||
35 | +ENTRYPOINT ["/mongodb-entrypoint.sh"] |
@@ -0,0 +1,30 @@ | @@ -0,0 +1,30 @@ | ||
1 | +FROM capgov/centos:latest | ||
2 | +MAINTAINER CAPGov-Infra | ||
3 | + | ||
4 | +COPY FILES/docker-entrypoint.sh / | ||
5 | + | ||
6 | +ENV MYSQL_CONFIG_FILE /usr/my.cnf | ||
7 | + | ||
8 | +RUN groupadd --system --gid 5000 mysql && \ | ||
9 | + useradd --create-home --system --home-dir "/home/mysql" --comment "MySQL User" --gid 5000 --uid 5000 mysql && \ | ||
10 | + rpm -Uvh http://dev.mysql.com/get/mysql57-community-release-el7-8.noarch.rpm && \ | ||
11 | + yum update -y && \ | ||
12 | + yum-config-manager --disable mysql57-community && \ | ||
13 | + yum-config-manager --enable mysql56-community && \ | ||
14 | + yum install -y install mysql-community-server && \ | ||
15 | + yum clean all && \ | ||
16 | + touch $MYSQL_CONFIG_FILE && \ | ||
17 | + chmod 740 /docker-entrypoint.sh && \ | ||
18 | + chown mysql. /docker-entrypoint.sh $MYSQL_CONFIG_FILE && \ | ||
19 | + mkdir -p /docker-entrypoint-initdb.d && \ | ||
20 | + chown -R mysql: /docker-entrypoint-initdb.d | ||
21 | + | ||
22 | +EXPOSE 3306 | ||
23 | + | ||
24 | +USER mysql | ||
25 | + | ||
26 | +WORKDIR /var/lib/mysql | ||
27 | + | ||
28 | +VOLUME ["/var/lib/mysql"] | ||
29 | + | ||
30 | +ENTRYPOINT ["/docker-entrypoint.sh"] |
No preview for this file type
@@ -0,0 +1,102 @@ | @@ -0,0 +1,102 @@ | ||
1 | +#!/bin/bash | ||
2 | + | ||
3 | +MYSQL_DATA='/var/lib/mysql' | ||
4 | +MYSQL_FILE_BEGIN="$MYSQL_DATA/MYSQL_BEGIN" | ||
5 | + | ||
6 | +function VerifyCredintials { | ||
7 | + local USER=$1 | ||
8 | + local PASSWORD=$2 | ||
9 | + local DATABASE=$3 | ||
10 | + | ||
11 | + if [ -z "$USER" ] || [ -z "$PASSWORD" ]; then | ||
12 | + echo "ERROR: MYSQL_USER and MYSQL_PASSWORD cannot be empty." > /dev/stderr | ||
13 | + exit -1 | ||
14 | + fi | ||
15 | + | ||
16 | + if [ "$USER" = "root" ]; then | ||
17 | + echo "ERROR: MYSQL_USER cannot be the root account." > /dev/stderr | ||
18 | + exit -1 | ||
19 | + fi | ||
20 | + | ||
21 | + if [ -z "$DATABASE" ]; then | ||
22 | + MYSQL_DATABASE="$USER" | ||
23 | + fi | ||
24 | +} | ||
25 | + | ||
26 | +function CreateInitialDatabase { | ||
27 | + local DATADIR=$1 | ||
28 | + | ||
29 | + mysql_install_db --datadir="$DATADIR" --user='mysql' | ||
30 | + | ||
31 | + if [ $? -ne 0 ]; then | ||
32 | + echo "ERROR: Could not create an initial database." > /dev/stderr | ||
33 | + exit -1 | ||
34 | + fi | ||
35 | +} | ||
36 | + | ||
37 | +function CreateSuperuser { | ||
38 | + local USER=$1 | ||
39 | + local PASSWORD=$2 | ||
40 | + | ||
41 | + mysql -u root --protocol=socket --wait -e "CREATE USER '$USER' IDENTIFIED BY '$PASSWORD';" | ||
42 | + mysql -u root --protocol=socket --wait -e "GRANT ALL PRIVILEGES ON $MYSQL_DATABASE.* TO '$USER';" | ||
43 | + mysql -u root --protocol=socket --wait -e "FLUSH PRIVILEGES;" | ||
44 | +} | ||
45 | + | ||
46 | +function CreateDatabase { | ||
47 | + local DATABASE=$1 | ||
48 | + | ||
49 | + mysql -u root --protocol=socket --wait -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" | ||
50 | +} | ||
51 | + | ||
52 | +function CreateFileBegin { | ||
53 | + local FILE=$1 | ||
54 | + | ||
55 | + date +%c > $FILE | ||
56 | +} | ||
57 | + | ||
58 | +function StartDatabaseServer { | ||
59 | + mysqld $@ | ||
60 | +} | ||
61 | + | ||
62 | +function StartDatabaseServerBackground { | ||
63 | + mysqld & | ||
64 | + sleep 5 | ||
65 | +} | ||
66 | + | ||
67 | +function StopDatabaseServer { | ||
68 | + mysqladmin -u root shutdown | ||
69 | + sleep 5 | ||
70 | +} | ||
71 | + | ||
72 | +function RestoreDatabase { | ||
73 | + | ||
74 | + mysqladmin -u root refresh | ||
75 | + | ||
76 | + for file in `ls /docker-entrypoint-initdb.d/*`; do | ||
77 | + case $file in | ||
78 | + *.sh ) | ||
79 | + echo "Running '$file'..." | ||
80 | + . $file ;; | ||
81 | + esac | ||
82 | + done | ||
83 | + | ||
84 | +} | ||
85 | + | ||
86 | +function Main { | ||
87 | + | ||
88 | + if [ ! -f $MYSQL_FILE_BEGIN ]; then | ||
89 | + VerifyCredintials $MYSQL_USER $MYSQL_PASSWORD $MYSQL_DATABASE | ||
90 | + CreateInitialDatabase $MYSQL_DATA | ||
91 | + StartDatabaseServerBackground | ||
92 | + CreateDatabase $MYSQL_DATABASE | ||
93 | + CreateSuperuser $MYSQL_USER $MYSQL_PASSWORD | ||
94 | + RestoreDatabase | ||
95 | + StopDatabaseServer | ||
96 | + CreateFileBegin $MYSQL_FILE_BEGIN | ||
97 | + fi | ||
98 | + | ||
99 | + StartDatabaseServer $@ | ||
100 | +} | ||
101 | + | ||
102 | +Main $@ |
@@ -0,0 +1,39 @@ | @@ -0,0 +1,39 @@ | ||
1 | +FROM capgov/centos:latest | ||
2 | + | ||
3 | +MAINTAINER CAPGov-Infra | ||
4 | + | ||
5 | +LABEL name="MySQL 5.6.30" \ | ||
6 | + description="Imagem do MySQL 5.6.30" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +COPY FILES/docker-entrypoint.sh / | ||
11 | + | ||
12 | +ENV MYSQL_CONFIG_FILE /usr/my.cnf | ||
13 | + | ||
14 | +COPY ./FILES/mysql-community-server-5.6.30-2.el7.x86_64.rpm /tmp | ||
15 | +COPY ./FILES/mysql-community-common-5.6.30-2.el7.x86_64.rpm /tmp | ||
16 | +COPY ./FILES/mysql-community-client-5.6.30-2.el7.x86_64.rpm /tmp | ||
17 | +COPY ./FILES/mysql-community-libs-5.6.30-2.el7.x86_64.rpm /tmp | ||
18 | + | ||
19 | +RUN groupadd --system --gid 5000 mysql && \ | ||
20 | + useradd --create-home --system --home-dir "/home/mysql" --comment "MySQL User" --gid 5000 --uid 5000 mysql && \ | ||
21 | + yum install -y /tmp/mysql-community-server-5.6.30-2.el7.x86_64.rpm /tmp/mysql-community-common-5.6.30-2.el7.x86_64.rpm /tmp/mysql-community-client-5.6.30-2.el7.x86_64.rpm /tmp/mysql-community-libs-5.6.30-2.el7.x86_64.rpm && \ | ||
22 | + yum update -y && \ | ||
23 | + yum clean all && \ | ||
24 | + rm -f /tmp/*.rpm && \ | ||
25 | + touch $MYSQL_CONFIG_FILE && \ | ||
26 | + chmod 740 /docker-entrypoint.sh && \ | ||
27 | + chown mysql. /docker-entrypoint.sh $MYSQL_CONFIG_FILE && \ | ||
28 | + mkdir -p /docker-entrypoint-initdb.d && \ | ||
29 | + chown -R mysql: /docker-entrypoint-initdb.d | ||
30 | + | ||
31 | +EXPOSE 3306 | ||
32 | + | ||
33 | +USER mysql | ||
34 | + | ||
35 | +WORKDIR /var/lib/mysql | ||
36 | + | ||
37 | +VOLUME ["/var/lib/mysql"] | ||
38 | + | ||
39 | +ENTRYPOINT ["/docker-entrypoint.sh"] |
@@ -0,0 +1,102 @@ | @@ -0,0 +1,102 @@ | ||
1 | +#!/bin/bash | ||
2 | + | ||
3 | +MYSQL_DATA='/var/lib/mysql' | ||
4 | +MYSQL_FILE_BEGIN="$MYSQL_DATA/MYSQL_BEGIN" | ||
5 | + | ||
6 | +function VerifyCredintials { | ||
7 | + local USER=$1 | ||
8 | + local PASSWORD=$2 | ||
9 | + local DATABASE=$3 | ||
10 | + | ||
11 | + if [ -z "$USER" ] || [ -z "$PASSWORD" ]; then | ||
12 | + echo "ERROR: MYSQL_USER and MYSQL_PASSWORD cannot be empty." > /dev/stderr | ||
13 | + exit -1 | ||
14 | + fi | ||
15 | + | ||
16 | + if [ "$USER" = "root" ]; then | ||
17 | + echo "ERROR: MYSQL_USER cannot be the root account." > /dev/stderr | ||
18 | + exit -1 | ||
19 | + fi | ||
20 | + | ||
21 | + if [ -z "$DATABASE" ]; then | ||
22 | + MYSQL_DATABASE="$USER" | ||
23 | + fi | ||
24 | +} | ||
25 | + | ||
26 | +function CreateInitialDatabase { | ||
27 | + local DATADIR=$1 | ||
28 | + | ||
29 | + mysql_install_db --datadir="$DATADIR" --user='mysql' | ||
30 | + | ||
31 | + if [ $? -ne 0 ]; then | ||
32 | + echo "ERROR: Could not create an initial database." > /dev/stderr | ||
33 | + exit -1 | ||
34 | + fi | ||
35 | +} | ||
36 | + | ||
37 | +function CreateSuperuser { | ||
38 | + local USER=$1 | ||
39 | + local PASSWORD=$2 | ||
40 | + | ||
41 | + mysql -u root --protocol=socket --wait -e "CREATE USER '$USER' IDENTIFIED BY '$PASSWORD';" | ||
42 | + mysql -u root --protocol=socket --wait -e "GRANT ALL PRIVILEGES ON $MYSQL_DATABASE.* TO '$USER';" | ||
43 | + mysql -u root --protocol=socket --wait -e "FLUSH PRIVILEGES;" | ||
44 | +} | ||
45 | + | ||
46 | +function CreateDatabase { | ||
47 | + local DATABASE=$1 | ||
48 | + | ||
49 | + mysql -u root --protocol=socket --wait -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" | ||
50 | +} | ||
51 | + | ||
52 | +function CreateFileBegin { | ||
53 | + local FILE=$1 | ||
54 | + | ||
55 | + date +%c > $FILE | ||
56 | +} | ||
57 | + | ||
58 | +function StartDatabaseServer { | ||
59 | + mysqld $@ | ||
60 | +} | ||
61 | + | ||
62 | +function StartDatabaseServerBackground { | ||
63 | + mysqld & | ||
64 | + sleep 5 | ||
65 | +} | ||
66 | + | ||
67 | +function StopDatabaseServer { | ||
68 | + mysqladmin -u root shutdown | ||
69 | + sleep 5 | ||
70 | +} | ||
71 | + | ||
72 | +function RestoreDatabase { | ||
73 | + | ||
74 | + mysqladmin -u root refresh | ||
75 | + | ||
76 | + for file in `ls /docker-entrypoint-initdb.d/*`; do | ||
77 | + case $file in | ||
78 | + *.sh ) | ||
79 | + echo "Running '$file'..." | ||
80 | + . $file ;; | ||
81 | + esac | ||
82 | + done | ||
83 | + | ||
84 | +} | ||
85 | + | ||
86 | +function Main { | ||
87 | + | ||
88 | + if [ ! -f $MYSQL_FILE_BEGIN ]; then | ||
89 | + VerifyCredintials $MYSQL_USER $MYSQL_PASSWORD $MYSQL_DATABASE | ||
90 | + CreateInitialDatabase $MYSQL_DATA | ||
91 | + StartDatabaseServerBackground | ||
92 | + CreateDatabase $MYSQL_DATABASE | ||
93 | + CreateSuperuser $MYSQL_USER $MYSQL_PASSWORD | ||
94 | + RestoreDatabase | ||
95 | + StopDatabaseServer | ||
96 | + CreateFileBegin $MYSQL_FILE_BEGIN | ||
97 | + fi | ||
98 | + | ||
99 | + StartDatabaseServer $@ | ||
100 | +} | ||
101 | + | ||
102 | +Main $@ |
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-client-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-common-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-embedded-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-libs-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-server-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
commons/mysql/5.6.30/FILES/mysql-community-server-minimal-5.6.30-2.el7.x86_64.rpm
0 → 100644
No preview for this file type
@@ -0,0 +1,36 @@ | @@ -0,0 +1,36 @@ | ||
1 | +FROM capgov/centos:latest | ||
2 | + | ||
3 | +MAINTAINER CAPGov-Infra | ||
4 | + | ||
5 | +LABEL name="MySQL 5.6" \ | ||
6 | + description="Imagem do MySQL 5.6" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +COPY FILES/docker-entrypoint.sh / | ||
11 | + | ||
12 | +ENV MYSQL_CONFIG_FILE /usr/my.cnf | ||
13 | + | ||
14 | +RUN groupadd --system --gid 5000 mysql && \ | ||
15 | + useradd --create-home --system --home-dir "/home/mysql" --comment "MySQL User" --gid 5000 --uid 5000 mysql && \ | ||
16 | + rpm -Uvh http://dev.mysql.com/get/mysql57-community-release-el7-8.noarch.rpm && \ | ||
17 | + yum update -y && \ | ||
18 | + yum-config-manager --disable mysql57-community && \ | ||
19 | + yum-config-manager --enable mysql56-community && \ | ||
20 | + yum install -y install mysql-community-server && \ | ||
21 | + yum clean all && \ | ||
22 | + touch $MYSQL_CONFIG_FILE && \ | ||
23 | + chmod 740 /docker-entrypoint.sh && \ | ||
24 | + chown mysql. /docker-entrypoint.sh $MYSQL_CONFIG_FILE && \ | ||
25 | + mkdir -p /docker-entrypoint-initdb.d && \ | ||
26 | + chown -R mysql: /docker-entrypoint-initdb.d | ||
27 | + | ||
28 | +EXPOSE 3306 | ||
29 | + | ||
30 | +USER mysql | ||
31 | + | ||
32 | +WORKDIR /var/lib/mysql | ||
33 | + | ||
34 | +VOLUME ["/var/lib/mysql"] | ||
35 | + | ||
36 | +ENTRYPOINT ["/docker-entrypoint.sh"] |
@@ -0,0 +1,102 @@ | @@ -0,0 +1,102 @@ | ||
1 | +#!/bin/bash | ||
2 | + | ||
3 | +MYSQL_DATA='/var/lib/mysql' | ||
4 | +MYSQL_FILE_BEGIN="$MYSQL_DATA/MYSQL_BEGIN" | ||
5 | + | ||
6 | +function VerifyCredintials { | ||
7 | + local USER=$1 | ||
8 | + local PASSWORD=$2 | ||
9 | + local DATABASE=$3 | ||
10 | + | ||
11 | + if [ -z "$USER" ] || [ -z "$PASSWORD" ]; then | ||
12 | + echo "ERROR: MYSQL_USER and MYSQL_PASSWORD cannot be empty." > /dev/stderr | ||
13 | + exit -1 | ||
14 | + fi | ||
15 | + | ||
16 | + if [ "$USER" = "root" ]; then | ||
17 | + echo "ERROR: MYSQL_USER cannot be the root account." > /dev/stderr | ||
18 | + exit -1 | ||
19 | + fi | ||
20 | + | ||
21 | + if [ -z "$DATABASE" ]; then | ||
22 | + MYSQL_DATABASE="$USER" | ||
23 | + fi | ||
24 | +} | ||
25 | + | ||
26 | +function CreateInitialDatabase { | ||
27 | + local DATADIR=$1 | ||
28 | + | ||
29 | + mysql_install_db --datadir="$DATADIR" --user='mysql' | ||
30 | + | ||
31 | + if [ $? -ne 0 ]; then | ||
32 | + echo "ERROR: Could not create an initial database." > /dev/stderr | ||
33 | + exit -1 | ||
34 | + fi | ||
35 | +} | ||
36 | + | ||
37 | +function CreateSuperuser { | ||
38 | + local USER=$1 | ||
39 | + local PASSWORD=$2 | ||
40 | + | ||
41 | + mysql -u root --protocol=socket --wait -e "CREATE USER '$USER' IDENTIFIED BY '$PASSWORD';" | ||
42 | + mysql -u root --protocol=socket --wait -e "GRANT ALL PRIVILEGES ON $MYSQL_DATABASE.* TO '$USER';" | ||
43 | + mysql -u root --protocol=socket --wait -e "FLUSH PRIVILEGES;" | ||
44 | +} | ||
45 | + | ||
46 | +function CreateDatabase { | ||
47 | + local DATABASE=$1 | ||
48 | + | ||
49 | + mysql -u root --protocol=socket --wait -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" | ||
50 | +} | ||
51 | + | ||
52 | +function CreateFileBegin { | ||
53 | + local FILE=$1 | ||
54 | + | ||
55 | + date +%c > $FILE | ||
56 | +} | ||
57 | + | ||
58 | +function StartDatabaseServer { | ||
59 | + exec mysqld $@ | ||
60 | +} | ||
61 | + | ||
62 | +function StartDatabaseServerBackground { | ||
63 | + mysqld & | ||
64 | + sleep 5 | ||
65 | +} | ||
66 | + | ||
67 | +function StopDatabaseServer { | ||
68 | + mysqladmin -u root shutdown | ||
69 | + sleep 5 | ||
70 | +} | ||
71 | + | ||
72 | +function RestoreDatabase { | ||
73 | + | ||
74 | + mysqladmin -u root refresh | ||
75 | + | ||
76 | + for file in `ls /docker-entrypoint-initdb.d/*`; do | ||
77 | + case $file in | ||
78 | + *.sh ) | ||
79 | + echo "Running '$file'..." | ||
80 | + . $file ;; | ||
81 | + esac | ||
82 | + done | ||
83 | + | ||
84 | +} | ||
85 | + | ||
86 | +function Main { | ||
87 | + | ||
88 | + if [ ! -f $MYSQL_FILE_BEGIN ]; then | ||
89 | + VerifyCredintials $MYSQL_USER $MYSQL_PASSWORD $MYSQL_DATABASE | ||
90 | + CreateInitialDatabase $MYSQL_DATA | ||
91 | + StartDatabaseServerBackground | ||
92 | + CreateDatabase $MYSQL_DATABASE | ||
93 | + CreateSuperuser $MYSQL_USER $MYSQL_PASSWORD | ||
94 | + RestoreDatabase | ||
95 | + StopDatabaseServer | ||
96 | + CreateFileBegin $MYSQL_FILE_BEGIN | ||
97 | + fi | ||
98 | + | ||
99 | + StartDatabaseServer $@ | ||
100 | +} | ||
101 | + | ||
102 | +Main $@ |
@@ -0,0 +1,33 @@ | @@ -0,0 +1,33 @@ | ||
1 | +FROM capgov/centos:latest | ||
2 | + | ||
3 | +MAINTAINER CAPGov-Infra | ||
4 | + | ||
5 | +LABEL name="MySQL 5.7" \ | ||
6 | + description="Imagem do MySQL 5.7" \ | ||
7 | + dockerfile-version="1.0.0" \ | ||
8 | + vendor="CAPGov-INFRA <capgov@cos.ufrj.br>" | ||
9 | + | ||
10 | +COPY ./FILES/docker-entrypoint.sh / | ||
11 | + | ||
12 | +ENV MYSQL_CONFIG_FILE /usr/my.cnf | ||
13 | + | ||
14 | +RUN groupadd -r --gid=5000 mysql && \ | ||
15 | + useradd -m -c "MySQL User" -r -g mysql --uid=5000 mysql && \ | ||
16 | + yum install -y https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm && \ | ||
17 | + yum update -y && \ | ||
18 | + yum install -y install mysql-community-server && \ | ||
19 | + yum clean all && \ | ||
20 | + touch $MYSQL_CONFIG_FILE && \ | ||
21 | + chmod 740 /docker-entrypoint.sh && \ | ||
22 | + mkdir /docker-entrypoint-initdb.d && \ | ||
23 | + chown -R mysql:mysql /docker-entrypoint.sh $MYSQL_CONFIG_FILE /docker-entrypoint-initdb.d | ||
24 | + | ||
25 | +EXPOSE 3306 | ||
26 | + | ||
27 | +USER mysql | ||
28 | + | ||
29 | +WORKDIR /var/lib/mysql | ||
30 | + | ||
31 | +VOLUME ["/var/lib/mysql"] | ||
32 | + | ||
33 | +CMD ["bash","/docker-entrypoint.sh"] |
@@ -0,0 +1,105 @@ | @@ -0,0 +1,105 @@ | ||
1 | +#!/bin/bash | ||
2 | + | ||
3 | +MYSQL_DATA='/var/lib/mysql' | ||
4 | +MYSQL_FILE_BEGIN="$MYSQL_DATA/MYSQL_BEGIN" | ||
5 | + | ||
6 | +function VerifyCredintials { | ||
7 | + local USER=$1 | ||
8 | + local PASSWORD=$2 | ||
9 | + local DATABASE=$3 | ||
10 | + | ||
11 | + if [ -z "$USER" ] || [ -z "$PASSWORD" ]; then | ||
12 | + echo "ERROR: MYSQL_USER and MYSQL_PASSWORD cannot be empty." > /dev/stderr | ||
13 | + exit -1 | ||
14 | + fi | ||
15 | + | ||
16 | + if [ "$USER" = "root" ]; then | ||
17 | + echo "ERROR: MYSQL_USER cannot be the root account." > /dev/stderr | ||
18 | + exit -1 | ||
19 | + fi | ||
20 | + | ||
21 | + if [ -z "$DATABASE" ]; then | ||
22 | + MYSQL_DATABASE="$USER" | ||
23 | + fi | ||
24 | +} | ||
25 | + | ||
26 | +function CreateInitialDatabase { | ||
27 | + local DATADIR=$1 | ||
28 | + | ||
29 | +# mysql_install_db --datadir="$DATADIR" --user='mysql' --insecure --verbose | ||
30 | + mysqld --initialize-insecure --datadir="$DATADIR" --user="mysql" | ||
31 | + | ||
32 | + if [ $? -ne 0 ]; then | ||
33 | + echo "ERROR: Could not create an initial database." > /dev/stderr | ||
34 | + exit -1 | ||
35 | + fi | ||
36 | +} | ||
37 | + | ||
38 | +function CreateSuperuser { | ||
39 | + local USER=$1 | ||
40 | + local PASSWORD=$2 | ||
41 | + | ||
42 | + mysql -u root --protocol=socket --wait -e "CREATE USER '$USER' IDENTIFIED BY '$PASSWORD';" | ||
43 | + mysql -u root --protocol=socket --wait -e "GRANT ALL PRIVILEGES ON $MYSQL_DATABASE.* TO '$USER';" | ||
44 | + mysql -u root --protocol=socket --wait -e "FLUSH PRIVILEGES;" | ||
45 | +} | ||
46 | + | ||
47 | +function CreateDatabase { | ||
48 | + local DATABASE=$1 | ||
49 | + | ||
50 | + mysql -u root --protocol=socket --wait -e "CREATE DATABASE IF NOT EXISTS $DATABASE;" | ||
51 | +} | ||
52 | + | ||
53 | +function CreateFileBegin { | ||
54 | + local FILE=$1 | ||
55 | + | ||
56 | + date +%c > $FILE | ||
57 | +} | ||
58 | + | ||
59 | +function StartDatabaseServer { | ||
60 | + mysqld $@ | ||
61 | +} | ||
62 | + | ||
63 | +function StartDatabaseServerBackground { | ||
64 | + mysqld & | ||
65 | + sleep 5 | ||
66 | +} | ||
67 | + | ||
68 | +function StopDatabaseServer { | ||
69 | + mysqladmin -u root shutdown | ||
70 | + sleep 5 | ||
71 | +} | ||
72 | + | ||
73 | +function RestoreDatabase { | ||
74 | + | ||
75 | + mysqladmin -u root refresh | ||
76 | + | ||
77 | + for file in `ls /docker-entrypoint-initdb.d/*`; do | ||
78 | + case $file in | ||
79 | + *.sh ) | ||
80 | + echo "Running '$file'..." | ||
81 | + . $file ;; | ||
82 | + esac | ||
83 | + done | ||
84 | + | ||
85 | +} | ||
86 | + | ||
87 | +function Main { | ||
88 | + | ||
89 | + if [ ! -f $MYSQL_FILE_BEGIN ]; then | ||
90 | + VerifyCredintials $MYSQL_USER $MYSQL_PASSWORD $MYSQL_DATABASE | ||
91 | + CreateInitialDatabase $MYSQL_DATA | ||
92 | + StartDatabaseServerBackground | ||
93 | + CreateDatabase $MYSQL_DATABASE | ||
94 | + CreateSuperuser $MYSQL_USER $MYSQL_PASSWORD | ||
95 | + RestoreDatabase | ||
96 | + StopDatabaseServer | ||
97 | + CreateFileBegin $MYSQL_FILE_BEGIN | ||
98 | + cat /var/log/mysqld.log | ||
99 | + fi | ||
100 | + | ||
101 | + StartDatabaseServerBackground | ||
102 | + tail -f /var/log/mysqld.log | ||
103 | +} | ||
104 | + | ||
105 | +Main $@ |
@@ -0,0 +1,28 @@ | @@ -0,0 +1,28 @@ | ||
1 | +FROM capgov/centos | ||
2 | + | ||
3 | +MAINTAINER CAPGog-INFRA | ||
4 | + | ||
5 | +LABEL name="Rabbitmq 3.6.9-1" \ | ||
6 | + description="Imagem do Rabbitmq 3.6.9-1" \ | ||
7 | + version="1.0.0" | ||
8 | + | ||
9 | +COPY ./FILES/rabbitmq-server-3.6.9-1.el7.noarch.rpm /opt/ | ||
10 | + | ||
11 | +RUN yum install -y epel-release && \ | ||
12 | + yum update -y && \ | ||
13 | + yum install -y erlang /opt/rabbitmq-server-3.6.9-1.el7.noarch.rpm && \ | ||
14 | + mkdir -p /var/lib/rabbitmq /etc/rabbitmq && \ | ||
15 | + echo '[ { rabbit, [ { loopback_users, [ ] } ] } ].' > /etc/rabbitmq/rabbitmq.config && \ | ||
16 | + rabbitmq-plugins enable --offline rabbitmq_management && \ | ||
17 | + chown -R rabbitmq. /var/lib/rabbit* /etc/rabbit* | ||
18 | + | ||
19 | +ENV RABBITMQ_LOGS=- \ | ||
20 | + RABBITMQ_SASL_LOGS=- | ||
21 | + | ||
22 | +USER rabbitmq | ||
23 | + | ||
24 | +EXPOSE 15671 15672 4369 5671 5672 25672 | ||
25 | + | ||
26 | +VOLUME /var/lib/rabbitmq | ||
27 | + | ||
28 | +CMD ["rabbitmq-server"] |
commons/rabbitmq/3.6.9-1-management/FILES/rabbitmq-server-3.6.9-1.el7.noarch.rpm
0 → 100644
No preview for this file type
@@ -0,0 +1,41 @@ | @@ -0,0 +1,41 @@ | ||
1 | +FROM capgov/centos:7 | ||
2 | + | ||
3 | +LABEL name="Redis 3.2.8" \ | ||
4 | + description="Imagem do redis 3.2.8" \ | ||
5 | + version="1.0.0" | ||
6 | + | ||
7 | +ENV redisPrefix=/usr/local \ | ||
8 | + redisLocation=/var/lib/redis \ | ||
9 | + redisVersion=3.2.8 \ | ||
10 | + redisInstallerSHA256="61b373c23d18e6cc752a69d5ab7f676c6216dc2853e46750a8c4ed791d68482c" | ||
11 | + | ||
12 | +ENV REDIS_VERSION=${redisVersion} \ | ||
13 | + REDIS_INSTALLER_URL="http://download.redis.io/releases/redis-${redisVersion}.tar.gz" \ | ||
14 | + REDIS_INSTALLER_SHA256SUM="61b373c23d18e6cc752a69d5ab7f676c6216dc2853e46750a8c4ed791d68482c" | ||
15 | + | ||
16 | +RUN groupadd --system --gid 5000 redis && \ | ||
17 | + useradd -Mr -c "Redis User" --gid 5000 --uid 5000 redis && \ | ||
18 | + yum install -y gcc make && \ | ||
19 | + curl --silent --output /tmp/redis.tgz ${REDIS_INSTALLER_URL} && \ | ||
20 | + echo -n "${REDIS_INSTALLER_SHA256SUM} /tmp/redis.tgz" | sha256sum --check && \ | ||
21 | + tar -C ${redisPrefix} -xzvf /tmp/redis.tgz && rm -f /tmp/redis.tgz && \ | ||
22 | + make --directory=${redisPrefix}/redis-${REDIS_VERSION} distclean install && \ | ||
23 | + yum remove -y gcc make && yum autoremove -y && \ | ||
24 | + rm -rf ${redisPrefix}/redis-${REDIS_VERSION} && \ | ||
25 | + mkdir -p ${redisLocation} /etc/redis /var/log/redis && \ | ||
26 | + touch /etc/redis/redis-server.log && \ | ||
27 | + chown -R redis:redis ${redisLocation} /opt /etc/redis /var/log/redis | ||
28 | + | ||
29 | +COPY ./FILES/redis.conf /etc/redis/redis.conf | ||
30 | + | ||
31 | +RUN chown -R redis:redis /etc/redis | ||
32 | + | ||
33 | +USER redis | ||
34 | + | ||
35 | +WORKDIR ${redisLocation} | ||
36 | + | ||
37 | +VOLUME ${redisLocation} | ||
38 | + | ||
39 | +EXPOSE 6379 | ||
40 | + | ||
41 | +ENTRYPOINT ["redis-server"] |
@@ -0,0 +1,943 @@ | @@ -0,0 +1,943 @@ | ||
1 | +# Redis configuration file example. | ||
2 | +# | ||
3 | +# Note that in order to read the configuration file, Redis must be | ||
4 | +# started with the file path as first argument: | ||
5 | +# | ||
6 | +# ./redis-server /path/to/redis.conf | ||
7 | + | ||
8 | +# Note on units: when memory size is needed, it is possible to specify | ||
9 | +# it in the usual form of 1k 5GB 4M and so forth: | ||
10 | +# | ||
11 | +# 1k => 1000 bytes | ||
12 | +# 1kb => 1024 bytes | ||
13 | +# 1m => 1000000 bytes | ||
14 | +# 1mb => 1024*1024 bytes | ||
15 | +# 1g => 1000000000 bytes | ||
16 | +# 1gb => 1024*1024*1024 bytes | ||
17 | +# | ||
18 | +# units are case insensitive so 1GB 1Gb 1gB are all the same. | ||
19 | + | ||
20 | +################################## INCLUDES ################################### | ||
21 | + | ||
22 | +# Include one or more other config files here. This is useful if you | ||
23 | +# have a standard template that goes to all Redis servers but also need | ||
24 | +# to customize a few per-server settings. Include files can include | ||
25 | +# other files, so use this wisely. | ||
26 | +# | ||
27 | +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" | ||
28 | +# from admin or Redis Sentinel. Since Redis always uses the last processed | ||
29 | +# line as value of a configuration directive, you'd better put includes | ||
30 | +# at the beginning of this file to avoid overwriting config change at runtime. | ||
31 | +# | ||
32 | +# If instead you are interested in using includes to override configuration | ||
33 | +# options, it is better to use include as the last line. | ||
34 | +# | ||
35 | +# include /path/to/local.conf | ||
36 | +# include /path/to/other.conf | ||
37 | + | ||
38 | +################################ GENERAL ##################################### | ||
39 | + | ||
40 | +# By default Redis does not run as a daemon. Use 'yes' if you need it. | ||
41 | +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. | ||
42 | +daemonize no | ||
43 | + | ||
44 | +# When running daemonized, Redis writes a pid file in /var/run/redis.pid by | ||
45 | +# default. You can specify a custom pid file location here. | ||
46 | +pidfile /var/run/redis/redis-server.pid | ||
47 | + | ||
48 | +# Accept connections on the specified port, default is 6379. | ||
49 | +# If port 0 is specified Redis will not listen on a TCP socket. | ||
50 | +port 6379 | ||
51 | + | ||
52 | +# TCP listen() backlog. | ||
53 | +# | ||
54 | +# In high requests-per-second environments you need an high backlog in order | ||
55 | +# to avoid slow clients connections issues. Note that the Linux kernel | ||
56 | +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so | ||
57 | +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog | ||
58 | +# in order to get the desired effect. | ||
59 | +tcp-backlog 511 | ||
60 | + | ||
61 | +# By default Redis listens for connections from all the network interfaces | ||
62 | +# available on the server. It is possible to listen to just one or multiple | ||
63 | +# interfaces using the "bind" configuration directive, followed by one or | ||
64 | +# more IP addresses. | ||
65 | +# | ||
66 | +# Examples: | ||
67 | +# | ||
68 | +# bind 192.168.1.100 10.0.0.1 | ||
69 | +bind 0.0.0.0 | ||
70 | + | ||
71 | +# Specify the path for the Unix socket that will be used to listen for | ||
72 | +# incoming connections. There is no default, so Redis will not listen | ||
73 | +# on a unix socket when not specified. | ||
74 | +# | ||
75 | +# unixsocket /var/run/redis/redis.sock | ||
76 | +# unixsocketperm 700 | ||
77 | + | ||
78 | +# Close the connection after a client is idle for N seconds (0 to disable) | ||
79 | +timeout 0 | ||
80 | + | ||
81 | +# TCP keepalive. | ||
82 | +# | ||
83 | +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence | ||
84 | +# of communication. This is useful for two reasons: | ||
85 | +# | ||
86 | +# 1) Detect dead peers. | ||
87 | +# 2) Take the connection alive from the point of view of network | ||
88 | +# equipment in the middle. | ||
89 | +# | ||
90 | +# On Linux, the specified value (in seconds) is the period used to send ACKs. | ||
91 | +# Note that to close the connection the double of the time is needed. | ||
92 | +# On other kernels the period depends on the kernel configuration. | ||
93 | +# | ||
94 | +# A reasonable value for this option is 60 seconds. | ||
95 | +tcp-keepalive 0 | ||
96 | + | ||
97 | +# Specify the server verbosity level. | ||
98 | +# This can be one of: | ||
99 | +# debug (a lot of information, useful for development/testing) | ||
100 | +# verbose (many rarely useful info, but not a mess like the debug level) | ||
101 | +# notice (moderately verbose, what you want in production probably) | ||
102 | +# warning (only very important / critical messages are logged) | ||
103 | +loglevel notice | ||
104 | + | ||
105 | +# Specify the log file name. Also the empty string can be used to force | ||
106 | +# Redis to log on the standard output. Note that if you use standard | ||
107 | +# output for logging but daemonize, logs will be sent to /dev/null | ||
108 | +logfile /var/log/redis/redis-server.log | ||
109 | + | ||
110 | +# To enable logging to the system logger, just set 'syslog-enabled' to yes, | ||
111 | +# and optionally update the other syslog parameters to suit your needs. | ||
112 | +# syslog-enabled no | ||
113 | + | ||
114 | +# Specify the syslog identity. | ||
115 | +# syslog-ident redis | ||
116 | + | ||
117 | +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. | ||
118 | +# syslog-facility local0 | ||
119 | + | ||
120 | +# Set the number of databases. The default database is DB 0, you can select | ||
121 | +# a different one on a per-connection basis using SELECT <dbid> where | ||
122 | +# dbid is a number between 0 and 'databases'-1 | ||
123 | +databases 16 | ||
124 | + | ||
125 | +################################ SNAPSHOTTING ################################ | ||
126 | +# | ||
127 | +# Save the DB on disk: | ||
128 | +# | ||
129 | +# save <seconds> <changes> | ||
130 | +# | ||
131 | +# Will save the DB if both the given number of seconds and the given | ||
132 | +# number of write operations against the DB occurred. | ||
133 | +# | ||
134 | +# In the example below the behaviour will be to save: | ||
135 | +# after 900 sec (15 min) if at least 1 key changed | ||
136 | +# after 300 sec (5 min) if at least 10 keys changed | ||
137 | +# after 60 sec if at least 10000 keys changed | ||
138 | +# | ||
139 | +# Note: you can disable saving completely by commenting out all "save" lines. | ||
140 | +# | ||
141 | +# It is also possible to remove all the previously configured save | ||
142 | +# points by adding a save directive with a single empty string argument | ||
143 | +# like in the following example: | ||
144 | +# | ||
145 | +# save "" | ||
146 | + | ||
147 | +save 900 1 | ||
148 | +save 300 10 | ||
149 | +save 60 10000 | ||
150 | + | ||
151 | +# By default Redis will stop accepting writes if RDB snapshots are enabled | ||
152 | +# (at least one save point) and the latest background save failed. | ||
153 | +# This will make the user aware (in a hard way) that data is not persisting | ||
154 | +# on disk properly, otherwise chances are that no one will notice and some | ||
155 | +# disaster will happen. | ||
156 | +# | ||
157 | +# If the background saving process will start working again Redis will | ||
158 | +# automatically allow writes again. | ||
159 | +# | ||
160 | +# However if you have setup your proper monitoring of the Redis server | ||
161 | +# and persistence, you may want to disable this feature so that Redis will | ||
162 | +# continue to work as usual even if there are problems with disk, | ||
163 | +# permissions, and so forth. | ||
164 | +stop-writes-on-bgsave-error yes | ||
165 | + | ||
166 | +# Compress string objects using LZF when dump .rdb databases? | ||
167 | +# For default that's set to 'yes' as it's almost always a win. | ||
168 | +# If you want to save some CPU in the saving child set it to 'no' but | ||
169 | +# the dataset will likely be bigger if you have compressible values or keys. | ||
170 | +rdbcompression yes | ||
171 | + | ||
172 | +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. | ||
173 | +# This makes the format more resistant to corruption but there is a performance | ||
174 | +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it | ||
175 | +# for maximum performances. | ||
176 | +# | ||
177 | +# RDB files created with checksum disabled have a checksum of zero that will | ||
178 | +# tell the loading code to skip the check. | ||
179 | +rdbchecksum yes | ||
180 | + | ||
181 | +# The filename where to dump the DB | ||
182 | +dbfilename dump.rdb | ||
183 | + | ||
184 | +# The working directory. | ||
185 | +# | ||
186 | +# The DB will be written inside this directory, with the filename specified | ||
187 | +# above using the 'dbfilename' configuration directive. | ||
188 | +# | ||
189 | +# The Append Only File will also be created inside this directory. | ||
190 | +# | ||
191 | +# Note that you must specify a directory here, not a file name. | ||
192 | +dir /var/lib/redis | ||
193 | + | ||
194 | +################################# REPLICATION ################################# | ||
195 | + | ||
196 | +# Master-Slave replication. Use slaveof to make a Redis instance a copy of | ||
197 | +# another Redis server. A few things to understand ASAP about Redis replication. | ||
198 | +# | ||
199 | +# 1) Redis replication is asynchronous, but you can configure a master to | ||
200 | +# stop accepting writes if it appears to be not connected with at least | ||
201 | +# a given number of slaves. | ||
202 | +# 2) Redis slaves are able to perform a partial resynchronization with the | ||
203 | +# master if the replication link is lost for a relatively small amount of | ||
204 | +# time. You may want to configure the replication backlog size (see the next | ||
205 | +# sections of this file) with a sensible value depending on your needs. | ||
206 | +# 3) Replication is automatic and does not need user intervention. After a | ||
207 | +# network partition slaves automatically try to reconnect to masters | ||
208 | +# and resynchronize with them. | ||
209 | +# | ||
210 | +# slaveof <masterip> <masterport> | ||
211 | + | ||
212 | +# If the master is password protected (using the "requirepass" configuration | ||
213 | +# directive below) it is possible to tell the slave to authenticate before | ||
214 | +# starting the replication synchronization process, otherwise the master will | ||
215 | +# refuse the slave request. | ||
216 | +# | ||
217 | +# masterauth <master-password> | ||
218 | + | ||
219 | +# When a slave loses its connection with the master, or when the replication | ||
220 | +# is still in progress, the slave can act in two different ways: | ||
221 | +# | ||
222 | +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will | ||
223 | +# still reply to client requests, possibly with out of date data, or the | ||
224 | +# data set may just be empty if this is the first synchronization. | ||
225 | +# | ||
226 | +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with | ||
227 | +# an error "SYNC with master in progress" to all the kind of commands | ||
228 | +# but to INFO and SLAVEOF. | ||
229 | +# | ||
230 | +slave-serve-stale-data yes | ||
231 | + | ||
232 | +# You can configure a slave instance to accept writes or not. Writing against | ||
233 | +# a slave instance may be useful to store some ephemeral data (because data | ||
234 | +# written on a slave will be easily deleted after resync with the master) but | ||
235 | +# may also cause problems if clients are writing to it because of a | ||
236 | +# misconfiguration. | ||
237 | +# | ||
238 | +# Since Redis 2.6 by default slaves are read-only. | ||
239 | +# | ||
240 | +# Note: read only slaves are not designed to be exposed to untrusted clients | ||
241 | +# on the internet. It's just a protection layer against misuse of the instance. | ||
242 | +# Still a read only slave exports by default all the administrative commands | ||
243 | +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve | ||
244 | +# security of read only slaves using 'rename-command' to shadow all the | ||
245 | +# administrative / dangerous commands. | ||
246 | +slave-read-only yes | ||
247 | + | ||
248 | +# Replication SYNC strategy: disk or socket. | ||
249 | +# | ||
250 | +# ------------------------------------------------------- | ||
251 | +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY | ||
252 | +# ------------------------------------------------------- | ||
253 | +# | ||
254 | +# New slaves and reconnecting slaves that are not able to continue the replication | ||
255 | +# process just receiving differences, need to do what is called a "full | ||
256 | +# synchronization". An RDB file is transmitted from the master to the slaves. | ||
257 | +# The transmission can happen in two different ways: | ||
258 | +# | ||
259 | +# 1) Disk-backed: The Redis master creates a new process that writes the RDB | ||
260 | +# file on disk. Later the file is transferred by the parent | ||
261 | +# process to the slaves incrementally. | ||
262 | +# 2) Diskless: The Redis master creates a new process that directly writes the | ||
263 | +# RDB file to slave sockets, without touching the disk at all. | ||
264 | +# | ||
265 | +# With disk-backed replication, while the RDB file is generated, more slaves | ||
266 | +# can be queued and served with the RDB file as soon as the current child producing | ||
267 | +# the RDB file finishes its work. With diskless replication instead once | ||
268 | +# the transfer starts, new slaves arriving will be queued and a new transfer | ||
269 | +# will start when the current one terminates. | ||
270 | +# | ||
271 | +# When diskless replication is used, the master waits a configurable amount of | ||
272 | +# time (in seconds) before starting the transfer in the hope that multiple slaves | ||
273 | +# will arrive and the transfer can be parallelized. | ||
274 | +# | ||
275 | +# With slow disks and fast (large bandwidth) networks, diskless replication | ||
276 | +# works better. | ||
277 | +repl-diskless-sync no | ||
278 | + | ||
279 | +# When diskless replication is enabled, it is possible to configure the delay | ||
280 | +# the server waits in order to spawn the child that transfers the RDB via socket | ||
281 | +# to the slaves. | ||
282 | +# | ||
283 | +# This is important since once the transfer starts, it is not possible to serve | ||
284 | +# new slaves arriving, that will be queued for the next RDB transfer, so the server | ||
285 | +# waits a delay in order to let more slaves arrive. | ||
286 | +# | ||
287 | +# The delay is specified in seconds, and by default is 5 seconds. To disable | ||
288 | +# it entirely just set it to 0 seconds and the transfer will start ASAP. | ||
289 | +repl-diskless-sync-delay 5 | ||
290 | + | ||
291 | +# Slaves send PINGs to server in a predefined interval. It's possible to change | ||
292 | +# this interval with the repl_ping_slave_period option. The default value is 10 | ||
293 | +# seconds. | ||
294 | +# | ||
295 | +# repl-ping-slave-period 10 | ||
296 | + | ||
297 | +# The following option sets the replication timeout for: | ||
298 | +# | ||
299 | +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. | ||
300 | +# 2) Master timeout from the point of view of slaves (data, pings). | ||
301 | +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). | ||
302 | +# | ||
303 | +# It is important to make sure that this value is greater than the value | ||
304 | +# specified for repl-ping-slave-period otherwise a timeout will be detected | ||
305 | +# every time there is low traffic between the master and the slave. | ||
306 | +# | ||
307 | +# repl-timeout 60 | ||
308 | + | ||
309 | +# Disable TCP_NODELAY on the slave socket after SYNC? | ||
310 | +# | ||
311 | +# If you select "yes" Redis will use a smaller number of TCP packets and | ||
312 | +# less bandwidth to send data to slaves. But this can add a delay for | ||
313 | +# the data to appear on the slave side, up to 40 milliseconds with | ||
314 | +# Linux kernels using a default configuration. | ||
315 | +# | ||
316 | +# If you select "no" the delay for data to appear on the slave side will | ||
317 | +# be reduced but more bandwidth will be used for replication. | ||
318 | +# | ||
319 | +# By default we optimize for low latency, but in very high traffic conditions | ||
320 | +# or when the master and slaves are many hops away, turning this to "yes" may | ||
321 | +# be a good idea. | ||
322 | +repl-disable-tcp-nodelay no | ||
323 | + | ||
324 | +# Set the replication backlog size. The backlog is a buffer that accumulates | ||
325 | +# slave data when slaves are disconnected for some time, so that when a slave | ||
326 | +# wants to reconnect again, often a full resync is not needed, but a partial | ||
327 | +# resync is enough, just passing the portion of data the slave missed while | ||
328 | +# disconnected. | ||
329 | +# | ||
330 | +# The bigger the replication backlog, the longer the time the slave can be | ||
331 | +# disconnected and later be able to perform a partial resynchronization. | ||
332 | +# | ||
333 | +# The backlog is only allocated once there is at least a slave connected. | ||
334 | +# | ||
335 | +# repl-backlog-size 1mb | ||
336 | + | ||
337 | +# After a master has no longer connected slaves for some time, the backlog | ||
338 | +# will be freed. The following option configures the amount of seconds that | ||
339 | +# need to elapse, starting from the time the last slave disconnected, for | ||
340 | +# the backlog buffer to be freed. | ||
341 | +# | ||
342 | +# A value of 0 means to never release the backlog. | ||
343 | +# | ||
344 | +# repl-backlog-ttl 3600 | ||
345 | + | ||
346 | +# The slave priority is an integer number published by Redis in the INFO output. | ||
347 | +# It is used by Redis Sentinel in order to select a slave to promote into a | ||
348 | +# master if the master is no longer working correctly. | ||
349 | +# | ||
350 | +# A slave with a low priority number is considered better for promotion, so | ||
351 | +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will | ||
352 | +# pick the one with priority 10, that is the lowest. | ||
353 | +# | ||
354 | +# However a special priority of 0 marks the slave as not able to perform the | ||
355 | +# role of master, so a slave with priority of 0 will never be selected by | ||
356 | +# Redis Sentinel for promotion. | ||
357 | +# | ||
358 | +# By default the priority is 100. | ||
359 | +slave-priority 100 | ||
360 | + | ||
361 | +# It is possible for a master to stop accepting writes if there are less than | ||
362 | +# N slaves connected, having a lag less or equal than M seconds. | ||
363 | +# | ||
364 | +# The N slaves need to be in "online" state. | ||
365 | +# | ||
366 | +# The lag in seconds, that must be <= the specified value, is calculated from | ||
367 | +# the last ping received from the slave, that is usually sent every second. | ||
368 | +# | ||
369 | +# This option does not GUARANTEE that N replicas will accept the write, but | ||
370 | +# will limit the window of exposure for lost writes in case not enough slaves | ||
371 | +# are available, to the specified number of seconds. | ||
372 | +# | ||
373 | +# For example to require at least 3 slaves with a lag <= 10 seconds use: | ||
374 | +# | ||
375 | +# min-slaves-to-write 3 | ||
376 | +# min-slaves-max-lag 10 | ||
377 | +# | ||
378 | +# Setting one or the other to 0 disables the feature. | ||
379 | +# | ||
380 | +# By default min-slaves-to-write is set to 0 (feature disabled) and | ||
381 | +# min-slaves-max-lag is set to 10. | ||
382 | + | ||
383 | +################################## SECURITY ################################### | ||
384 | + | ||
385 | +# Require clients to issue AUTH <PASSWORD> before processing any other | ||
386 | +# commands. This might be useful in environments in which you do not trust | ||
387 | +# others with access to the host running redis-server. | ||
388 | +# | ||
389 | +# This should stay commented out for backward compatibility and because most | ||
390 | +# people do not need auth (e.g. they run their own servers). | ||
391 | +# | ||
392 | +# Warning: since Redis is pretty fast an outside user can try up to | ||
393 | +# 150k passwords per second against a good box. This means that you should | ||
394 | +# use a very strong password otherwise it will be very easy to break. | ||
395 | +# | ||
396 | +# requirepass foobared | ||
397 | + | ||
398 | +# Command renaming. | ||
399 | +# | ||
400 | +# It is possible to change the name of dangerous commands in a shared | ||
401 | +# environment. For instance the CONFIG command may be renamed into something | ||
402 | +# hard to guess so that it will still be available for internal-use tools | ||
403 | +# but not available for general clients. | ||
404 | +# | ||
405 | +# Example: | ||
406 | +# | ||
407 | +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 | ||
408 | +# | ||
409 | +# It is also possible to completely kill a command by renaming it into | ||
410 | +# an empty string: | ||
411 | +# | ||
412 | +# rename-command CONFIG "" | ||
413 | +# | ||
414 | +# Please note that changing the name of commands that are logged into the | ||
415 | +# AOF file or transmitted to slaves may cause problems. | ||
416 | + | ||
417 | +################################### LIMITS #################################### | ||
418 | + | ||
419 | +# Set the max number of connected clients at the same time. By default | ||
420 | +# this limit is set to 10000 clients, however if the Redis server is not | ||
421 | +# able to configure the process file limit to allow for the specified limit | ||
422 | +# the max number of allowed clients is set to the current file limit | ||
423 | +# minus 32 (as Redis reserves a few file descriptors for internal uses). | ||
424 | +# | ||
425 | +# Once the limit is reached Redis will close all the new connections sending | ||
426 | +# an error 'max number of clients reached'. | ||
427 | +# | ||
428 | +# maxclients 10000 | ||
429 | + | ||
430 | +# Don't use more memory than the specified amount of bytes. | ||
431 | +# When the memory limit is reached Redis will try to remove keys | ||
432 | +# according to the eviction policy selected (see maxmemory-policy). | ||
433 | +# | ||
434 | +# If Redis can't remove keys according to the policy, or if the policy is | ||
435 | +# set to 'noeviction', Redis will start to reply with errors to commands | ||
436 | +# that would use more memory, like SET, LPUSH, and so on, and will continue | ||
437 | +# to reply to read-only commands like GET. | ||
438 | +# | ||
439 | +# This option is usually useful when using Redis as an LRU cache, or to set | ||
440 | +# a hard memory limit for an instance (using the 'noeviction' policy). | ||
441 | +# | ||
442 | +# WARNING: If you have slaves attached to an instance with maxmemory on, | ||
443 | +# the size of the output buffers needed to feed the slaves are subtracted | ||
444 | +# from the used memory count, so that network problems / resyncs will | ||
445 | +# not trigger a loop where keys are evicted, and in turn the output | ||
446 | +# buffer of slaves is full with DELs of keys evicted triggering the deletion | ||
447 | +# of more keys, and so forth until the database is completely emptied. | ||
448 | +# | ||
449 | +# In short... if you have slaves attached it is suggested that you set a lower | ||
450 | +# limit for maxmemory so that there is some free RAM on the system for slave | ||
451 | +# output buffers (but this is not needed if the policy is 'noeviction'). | ||
452 | +# | ||
453 | +# maxmemory <bytes> | ||
454 | + | ||
455 | +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory | ||
456 | +# is reached. You can select among five behaviors: | ||
457 | +# | ||
458 | +# volatile-lru -> remove the key with an expire set using an LRU algorithm | ||
459 | +# allkeys-lru -> remove any key according to the LRU algorithm | ||
460 | +# volatile-random -> remove a random key with an expire set | ||
461 | +# allkeys-random -> remove a random key, any key | ||
462 | +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) | ||
463 | +# noeviction -> don't expire at all, just return an error on write operations | ||
464 | +# | ||
465 | +# Note: with any of the above policies, Redis will return an error on write | ||
466 | +# operations, when there are no suitable keys for eviction. | ||
467 | +# | ||
468 | +# At the date of writing these commands are: set setnx setex append | ||
469 | +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd | ||
470 | +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby | ||
471 | +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby | ||
472 | +# getset mset msetnx exec sort | ||
473 | +# | ||
474 | +# The default is: | ||
475 | +# | ||
476 | +# maxmemory-policy noeviction | ||
477 | + | ||
478 | +# LRU and minimal TTL algorithms are not precise algorithms but approximated | ||
479 | +# algorithms (in order to save memory), so you can tune it for speed or | ||
480 | +# accuracy. For default Redis will check five keys and pick the one that was | ||
481 | +# used less recently, you can change the sample size using the following | ||
482 | +# configuration directive. | ||
483 | +# | ||
484 | +# The default of 5 produces good enough results. 10 Approximates very closely | ||
485 | +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. | ||
486 | +# | ||
487 | +# maxmemory-samples 5 | ||
488 | + | ||
489 | +############################## APPEND ONLY MODE ############################### | ||
490 | + | ||
491 | +# By default Redis asynchronously dumps the dataset on disk. This mode is | ||
492 | +# good enough in many applications, but an issue with the Redis process or | ||
493 | +# a power outage may result into a few minutes of writes lost (depending on | ||
494 | +# the configured save points). | ||
495 | +# | ||
496 | +# The Append Only File is an alternative persistence mode that provides | ||
497 | +# much better durability. For instance using the default data fsync policy | ||
498 | +# (see later in the config file) Redis can lose just one second of writes in a | ||
499 | +# dramatic event like a server power outage, or a single write if something | ||
500 | +# wrong with the Redis process itself happens, but the operating system is | ||
501 | +# still running correctly. | ||
502 | +# | ||
503 | +# AOF and RDB persistence can be enabled at the same time without problems. | ||
504 | +# If the AOF is enabled on startup Redis will load the AOF, that is the file | ||
505 | +# with the better durability guarantees. | ||
506 | +# | ||
507 | +# Please check http://redis.io/topics/persistence for more information. | ||
508 | + | ||
509 | +appendonly no | ||
510 | + | ||
511 | +# The name of the append only file (default: "appendonly.aof") | ||
512 | + | ||
513 | +appendfilename "appendonly.aof" | ||
514 | + | ||
515 | +# The fsync() call tells the Operating System to actually write data on disk | ||
516 | +# instead of waiting for more data in the output buffer. Some OS will really flush | ||
517 | +# data on disk, some other OS will just try to do it ASAP. | ||
518 | +# | ||
519 | +# Redis supports three different modes: | ||
520 | +# | ||
521 | +# no: don't fsync, just let the OS flush the data when it wants. Faster. | ||
522 | +# always: fsync after every write to the append only log. Slow, Safest. | ||
523 | +# everysec: fsync only one time every second. Compromise. | ||
524 | +# | ||
525 | +# The default is "everysec", as that's usually the right compromise between | ||
526 | +# speed and data safety. It's up to you to understand if you can relax this to | ||
527 | +# "no" that will let the operating system flush the output buffer when | ||
528 | +# it wants, for better performances (but if you can live with the idea of | ||
529 | +# some data loss consider the default persistence mode that's snapshotting), | ||
530 | +# or on the contrary, use "always" that's very slow but a bit safer than | ||
531 | +# everysec. | ||
532 | +# | ||
533 | +# More details please check the following article: | ||
534 | +# http://antirez.com/post/redis-persistence-demystified.html | ||
535 | +# | ||
536 | +# If unsure, use "everysec". | ||
537 | + | ||
538 | +# appendfsync always | ||
539 | +appendfsync everysec | ||
540 | +# appendfsync no | ||
541 | + | ||
542 | +# When the AOF fsync policy is set to always or everysec, and a background | ||
543 | +# saving process (a background save or AOF log background rewriting) is | ||
544 | +# performing a lot of I/O against the disk, in some Linux configurations | ||
545 | +# Redis may block too long on the fsync() call. Note that there is no fix for | ||
546 | +# this currently, as even performing fsync in a different thread will block | ||
547 | +# our synchronous write(2) call. | ||
548 | +# | ||
549 | +# In order to mitigate this problem it's possible to use the following option | ||
550 | +# that will prevent fsync() from being called in the main process while a | ||
551 | +# BGSAVE or BGREWRITEAOF is in progress. | ||
552 | +# | ||
553 | +# This means that while another child is saving, the durability of Redis is | ||
554 | +# the same as "appendfsync none". In practical terms, this means that it is | ||
555 | +# possible to lose up to 30 seconds of log in the worst scenario (with the | ||
556 | +# default Linux settings). | ||
557 | +# | ||
558 | +# If you have latency problems turn this to "yes". Otherwise leave it as | ||
559 | +# "no" that is the safest pick from the point of view of durability. | ||
560 | + | ||
561 | +no-appendfsync-on-rewrite no | ||
562 | + | ||
563 | +# Automatic rewrite of the append only file. | ||
564 | +# Redis is able to automatically rewrite the log file implicitly calling | ||
565 | +# BGREWRITEAOF when the AOF log size grows by the specified percentage. | ||
566 | +# | ||
567 | +# This is how it works: Redis remembers the size of the AOF file after the | ||
568 | +# latest rewrite (if no rewrite has happened since the restart, the size of | ||
569 | +# the AOF at startup is used). | ||
570 | +# | ||
571 | +# This base size is compared to the current size. If the current size is | ||
572 | +# bigger than the specified percentage, the rewrite is triggered. Also | ||
573 | +# you need to specify a minimal size for the AOF file to be rewritten, this | ||
574 | +# is useful to avoid rewriting the AOF file even if the percentage increase | ||
575 | +# is reached but it is still pretty small. | ||
576 | +# | ||
577 | +# Specify a percentage of zero in order to disable the automatic AOF | ||
578 | +# rewrite feature. | ||
579 | + | ||
580 | +auto-aof-rewrite-percentage 100 | ||
581 | +auto-aof-rewrite-min-size 64mb | ||
582 | + | ||
583 | +# An AOF file may be found to be truncated at the end during the Redis | ||
584 | +# startup process, when the AOF data gets loaded back into memory. | ||
585 | +# This may happen when the system where Redis is running | ||
586 | +# crashes, especially when an ext4 filesystem is mounted without the | ||
587 | +# data=ordered option (however this can't happen when Redis itself | ||
588 | +# crashes or aborts but the operating system still works correctly). | ||
589 | +# | ||
590 | +# Redis can either exit with an error when this happens, or load as much | ||
591 | +# data as possible (the default now) and start if the AOF file is found | ||
592 | +# to be truncated at the end. The following option controls this behavior. | ||
593 | +# | ||
594 | +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and | ||
595 | +# the Redis server starts emitting a log to inform the user of the event. | ||
596 | +# Otherwise if the option is set to no, the server aborts with an error | ||
597 | +# and refuses to start. When the option is set to no, the user requires | ||
598 | +# to fix the AOF file using the "redis-check-aof" utility before to restart | ||
599 | +# the server. | ||
600 | +# | ||
601 | +# Note that if the AOF file will be found to be corrupted in the middle | ||
602 | +# the server will still exit with an error. This option only applies when | ||
603 | +# Redis will try to read more data from the AOF file but not enough bytes | ||
604 | +# will be found. | ||
605 | +aof-load-truncated yes | ||
606 | + | ||
607 | +################################ LUA SCRIPTING ############################### | ||
608 | + | ||
609 | +# Max execution time of a Lua script in milliseconds. | ||
610 | +# | ||
611 | +# If the maximum execution time is reached Redis will log that a script is | ||
612 | +# still in execution after the maximum allowed time and will start to | ||
613 | +# reply to queries with an error. | ||
614 | +# | ||
615 | +# When a long running script exceeds the maximum execution time only the | ||
616 | +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be | ||
617 | +# used to stop a script that did not yet called write commands. The second | ||
618 | +# is the only way to shut down the server in the case a write command was | ||
619 | +# already issued by the script but the user doesn't want to wait for the natural | ||
620 | +# termination of the script. | ||
621 | +# | ||
622 | +# Set it to 0 or a negative value for unlimited execution without warnings. | ||
623 | +lua-time-limit 5000 | ||
624 | + | ||
625 | +################################ REDIS CLUSTER ############################### | ||
626 | +# | ||
627 | +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
628 | +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however | ||
629 | +# in order to mark it as "mature" we need to wait for a non trivial percentage | ||
630 | +# of users to deploy it in production. | ||
631 | +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | ||
632 | +# | ||
633 | +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are | ||
634 | +# started as cluster nodes can. In order to start a Redis instance as a | ||
635 | +# cluster node enable the cluster support uncommenting the following: | ||
636 | +# | ||
637 | +# cluster-enabled yes | ||
638 | + | ||
639 | +# Every cluster node has a cluster configuration file. This file is not | ||
640 | +# intended to be edited by hand. It is created and updated by Redis nodes. | ||
641 | +# Every Redis Cluster node requires a different cluster configuration file. | ||
642 | +# Make sure that instances running in the same system do not have | ||
643 | +# overlapping cluster configuration file names. | ||
644 | +# | ||
645 | +# cluster-config-file nodes-6379.conf | ||
646 | + | ||
647 | +# Cluster node timeout is the amount of milliseconds a node must be unreachable | ||
648 | +# for it to be considered in failure state. | ||
649 | +# Most other internal time limits are multiple of the node timeout. | ||
650 | +# | ||
651 | +# cluster-node-timeout 15000 | ||
652 | + | ||
653 | +# A slave of a failing master will avoid to start a failover if its data | ||
654 | +# looks too old. | ||
655 | +# | ||
656 | +# There is no simple way for a slave to actually have a exact measure of | ||
657 | +# its "data age", so the following two checks are performed: | ||
658 | +# | ||
659 | +# 1) If there are multiple slaves able to failover, they exchange messages | ||
660 | +# in order to try to give an advantage to the slave with the best | ||
661 | +# replication offset (more data from the master processed). | ||
662 | +# Slaves will try to get their rank by offset, and apply to the start | ||
663 | +# of the failover a delay proportional to their rank. | ||
664 | +# | ||
665 | +# 2) Every single slave computes the time of the last interaction with | ||
666 | +# its master. This can be the last ping or command received (if the master | ||
667 | +# is still in the "connected" state), or the time that elapsed since the | ||
668 | +# disconnection with the master (if the replication link is currently down). | ||
669 | +# If the last interaction is too old, the slave will not try to failover | ||
670 | +# at all. | ||
671 | +# | ||
672 | +# The point "2" can be tuned by user. Specifically a slave will not perform | ||
673 | +# the failover if, since the last interaction with the master, the time | ||
674 | +# elapsed is greater than: | ||
675 | +# | ||
676 | +# (node-timeout * slave-validity-factor) + repl-ping-slave-period | ||
677 | +# | ||
678 | +# So for example if node-timeout is 30 seconds, and the slave-validity-factor | ||
679 | +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the | ||
680 | +# slave will not try to failover if it was not able to talk with the master | ||
681 | +# for longer than 310 seconds. | ||
682 | +# | ||
683 | +# A large slave-validity-factor may allow slaves with too old data to failover | ||
684 | +# a master, while a too small value may prevent the cluster from being able to | ||
685 | +# elect a slave at all. | ||
686 | +# | ||
687 | +# For maximum availability, it is possible to set the slave-validity-factor | ||
688 | +# to a value of 0, which means, that slaves will always try to failover the | ||
689 | +# master regardless of the last time they interacted with the master. | ||
690 | +# (However they'll always try to apply a delay proportional to their | ||
691 | +# offset rank). | ||
692 | +# | ||
693 | +# Zero is the only value able to guarantee that when all the partitions heal | ||
694 | +# the cluster will always be able to continue. | ||
695 | +# | ||
696 | +# cluster-slave-validity-factor 10 | ||
697 | + | ||
698 | +# Cluster slaves are able to migrate to orphaned masters, that are masters | ||
699 | +# that are left without working slaves. This improves the cluster ability | ||
700 | +# to resist to failures as otherwise an orphaned master can't be failed over | ||
701 | +# in case of failure if it has no working slaves. | ||
702 | +# | ||
703 | +# Slaves migrate to orphaned masters only if there are still at least a | ||
704 | +# given number of other working slaves for their old master. This number | ||
705 | +# is the "migration barrier". A migration barrier of 1 means that a slave | ||
706 | +# will migrate only if there is at least 1 other working slave for its master | ||
707 | +# and so forth. It usually reflects the number of slaves you want for every | ||
708 | +# master in your cluster. | ||
709 | +# | ||
710 | +# Default is 1 (slaves migrate only if their masters remain with at least | ||
711 | +# one slave). To disable migration just set it to a very large value. | ||
712 | +# A value of 0 can be set but is useful only for debugging and dangerous | ||
713 | +# in production. | ||
714 | +# | ||
715 | +# cluster-migration-barrier 1 | ||
716 | + | ||
717 | +# By default Redis Cluster nodes stop accepting queries if they detect there | ||
718 | +# is at least an hash slot uncovered (no available node is serving it). | ||
719 | +# This way if the cluster is partially down (for example a range of hash slots | ||
720 | +# are no longer covered) all the cluster becomes, eventually, unavailable. | ||
721 | +# It automatically returns available as soon as all the slots are covered again. | ||
722 | +# | ||
723 | +# However sometimes you want the subset of the cluster which is working, | ||
724 | +# to continue to accept queries for the part of the key space that is still | ||
725 | +# covered. In order to do so, just set the cluster-require-full-coverage | ||
726 | +# option to no. | ||
727 | +# | ||
728 | +# cluster-require-full-coverage yes | ||
729 | + | ||
730 | +# In order to setup your cluster make sure to read the documentation | ||
731 | +# available at http://redis.io web site. | ||
732 | + | ||
733 | +################################## SLOW LOG ################################### | ||
734 | + | ||
735 | +# The Redis Slow Log is a system to log queries that exceeded a specified | ||
736 | +# execution time. The execution time does not include the I/O operations | ||
737 | +# like talking with the client, sending the reply and so forth, | ||
738 | +# but just the time needed to actually execute the command (this is the only | ||
739 | +# stage of command execution where the thread is blocked and can not serve | ||
740 | +# other requests in the meantime). | ||
741 | +# | ||
742 | +# You can configure the slow log with two parameters: one tells Redis | ||
743 | +# what is the execution time, in microseconds, to exceed in order for the | ||
744 | +# command to get logged, and the other parameter is the length of the | ||
745 | +# slow log. When a new command is logged the oldest one is removed from the | ||
746 | +# queue of logged commands. | ||
747 | + | ||
748 | +# The following time is expressed in microseconds, so 1000000 is equivalent | ||
749 | +# to one second. Note that a negative number disables the slow log, while | ||
750 | +# a value of zero forces the logging of every command. | ||
751 | +slowlog-log-slower-than 10000 | ||
752 | + | ||
753 | +# There is no limit to this length. Just be aware that it will consume memory. | ||
754 | +# You can reclaim memory used by the slow log with SLOWLOG RESET. | ||
755 | +slowlog-max-len 128 | ||
756 | + | ||
757 | +################################ LATENCY MONITOR ############################## | ||
758 | + | ||
759 | +# The Redis latency monitoring subsystem samples different operations | ||
760 | +# at runtime in order to collect data related to possible sources of | ||
761 | +# latency of a Redis instance. | ||
762 | +# | ||
763 | +# Via the LATENCY command this information is available to the user that can | ||
764 | +# print graphs and obtain reports. | ||
765 | +# | ||
766 | +# The system only logs operations that were performed in a time equal or | ||
767 | +# greater than the amount of milliseconds specified via the | ||
768 | +# latency-monitor-threshold configuration directive. When its value is set | ||
769 | +# to zero, the latency monitor is turned off. | ||
770 | +# | ||
771 | +# By default latency monitoring is disabled since it is mostly not needed | ||
772 | +# if you don't have latency issues, and collecting data has a performance | ||
773 | +# impact, that while very small, can be measured under big load. Latency | ||
774 | +# monitoring can easily be enabled at runtime using the command | ||
775 | +# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed. | ||
776 | +latency-monitor-threshold 0 | ||
777 | + | ||
778 | +############################# EVENT NOTIFICATION ############################## | ||
779 | + | ||
780 | +# Redis can notify Pub/Sub clients about events happening in the key space. | ||
781 | +# This feature is documented at http://redis.io/topics/notifications | ||
782 | +# | ||
783 | +# For instance if keyspace events notification is enabled, and a client | ||
784 | +# performs a DEL operation on key "foo" stored in the Database 0, two | ||
785 | +# messages will be published via Pub/Sub: | ||
786 | +# | ||
787 | +# PUBLISH __keyspace@0__:foo del | ||
788 | +# PUBLISH __keyevent@0__:del foo | ||
789 | +# | ||
790 | +# It is possible to select the events that Redis will notify among a set | ||
791 | +# of classes. Every class is identified by a single character: | ||
792 | +# | ||
793 | +# K Keyspace events, published with __keyspace@<db>__ prefix. | ||
794 | +# E Keyevent events, published with __keyevent@<db>__ prefix. | ||
795 | +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... | ||
796 | +# $ String commands | ||
797 | +# l List commands | ||
798 | +# s Set commands | ||
799 | +# h Hash commands | ||
800 | +# z Sorted set commands | ||
801 | +# x Expired events (events generated every time a key expires) | ||
802 | +# e Evicted events (events generated when a key is evicted for maxmemory) | ||
803 | +# A Alias for g$lshzxe, so that the "AKE" string means all the events. | ||
804 | +# | ||
805 | +# The "notify-keyspace-events" takes as argument a string that is composed | ||
806 | +# of zero or multiple characters. The empty string means that notifications | ||
807 | +# are disabled. | ||
808 | +# | ||
809 | +# Example: to enable list and generic events, from the point of view of the | ||
810 | +# event name, use: | ||
811 | +# | ||
812 | +# notify-keyspace-events Elg | ||
813 | +# | ||
814 | +# Example 2: to get the stream of the expired keys subscribing to channel | ||
815 | +# name __keyevent@0__:expired use: | ||
816 | +# | ||
817 | +# notify-keyspace-events Ex | ||
818 | +# | ||
819 | +# By default all notifications are disabled because most users don't need | ||
820 | +# this feature and the feature has some overhead. Note that if you don't | ||
821 | +# specify at least one of K or E, no events will be delivered. | ||
822 | +notify-keyspace-events "" | ||
823 | + | ||
824 | +############################### ADVANCED CONFIG ############################### | ||
825 | + | ||
826 | +# Hashes are encoded using a memory efficient data structure when they have a | ||
827 | +# small number of entries, and the biggest entry does not exceed a given | ||
828 | +# threshold. These thresholds can be configured using the following directives. | ||
829 | +hash-max-ziplist-entries 512 | ||
830 | +hash-max-ziplist-value 64 | ||
831 | + | ||
832 | +# Similarly to hashes, small lists are also encoded in a special way in order | ||
833 | +# to save a lot of space. The special representation is only used when | ||
834 | +# you are under the following limits: | ||
835 | +list-max-ziplist-entries 512 | ||
836 | +list-max-ziplist-value 64 | ||
837 | + | ||
838 | +# Sets have a special encoding in just one case: when a set is composed | ||
839 | +# of just strings that happen to be integers in radix 10 in the range | ||
840 | +# of 64 bit signed integers. | ||
841 | +# The following configuration setting sets the limit in the size of the | ||
842 | +# set in order to use this special memory saving encoding. | ||
843 | +set-max-intset-entries 512 | ||
844 | + | ||
845 | +# Similarly to hashes and lists, sorted sets are also specially encoded in | ||
846 | +# order to save a lot of space. This encoding is only used when the length and | ||
847 | +# elements of a sorted set are below the following limits: | ||
848 | +zset-max-ziplist-entries 128 | ||
849 | +zset-max-ziplist-value 64 | ||
850 | + | ||
851 | +# HyperLogLog sparse representation bytes limit. The limit includes the | ||
852 | +# 16 bytes header. When an HyperLogLog using the sparse representation crosses | ||
853 | +# this limit, it is converted into the dense representation. | ||
854 | +# | ||
855 | +# A value greater than 16000 is totally useless, since at that point the | ||
856 | +# dense representation is more memory efficient. | ||
857 | +# | ||
858 | +# The suggested value is ~ 3000 in order to have the benefits of | ||
859 | +# the space efficient encoding without slowing down too much PFADD, | ||
860 | +# which is O(N) with the sparse encoding. The value can be raised to | ||
861 | +# ~ 10000 when CPU is not a concern, but space is, and the data set is | ||
862 | +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. | ||
863 | +hll-sparse-max-bytes 3000 | ||
864 | + | ||
865 | +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in | ||
866 | +# order to help rehashing the main Redis hash table (the one mapping top-level | ||
867 | +# keys to values). The hash table implementation Redis uses (see dict.c) | ||
868 | +# performs a lazy rehashing: the more operation you run into a hash table | ||
869 | +# that is rehashing, the more rehashing "steps" are performed, so if the | ||
870 | +# server is idle the rehashing is never complete and some more memory is used | ||
871 | +# by the hash table. | ||
872 | +# | ||
873 | +# The default is to use this millisecond 10 times every second in order to | ||
874 | +# actively rehash the main dictionaries, freeing memory when possible. | ||
875 | +# | ||
876 | +# If unsure: | ||
877 | +# use "activerehashing no" if you have hard latency requirements and it is | ||
878 | +# not a good thing in your environment that Redis can reply from time to time | ||
879 | +# to queries with 2 milliseconds delay. | ||
880 | +# | ||
881 | +# use "activerehashing yes" if you don't have such hard requirements but | ||
882 | +# want to free memory asap when possible. | ||
883 | +activerehashing yes | ||
884 | + | ||
885 | +# The client output buffer limits can be used to force disconnection of clients | ||
886 | +# that are not reading data from the server fast enough for some reason (a | ||
887 | +# common reason is that a Pub/Sub client can't consume messages as fast as the | ||
888 | +# publisher can produce them). | ||
889 | +# | ||
890 | +# The limit can be set differently for the three different classes of clients: | ||
891 | +# | ||
892 | +# normal -> normal clients including MONITOR clients | ||
893 | +# slave -> slave clients | ||
894 | +# pubsub -> clients subscribed to at least one pubsub channel or pattern | ||
895 | +# | ||
896 | +# The syntax of every client-output-buffer-limit directive is the following: | ||
897 | +# | ||
898 | +# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds> | ||
899 | +# | ||
900 | +# A client is immediately disconnected once the hard limit is reached, or if | ||
901 | +# the soft limit is reached and remains reached for the specified number of | ||
902 | +# seconds (continuously). | ||
903 | +# So for instance if the hard limit is 32 megabytes and the soft limit is | ||
904 | +# 16 megabytes / 10 seconds, the client will get disconnected immediately | ||
905 | +# if the size of the output buffers reach 32 megabytes, but will also get | ||
906 | +# disconnected if the client reaches 16 megabytes and continuously overcomes | ||
907 | +# the limit for 10 seconds. | ||
908 | +# | ||
909 | +# By default normal clients are not limited because they don't receive data | ||
910 | +# without asking (in a push way), but just after a request, so only | ||
911 | +# asynchronous clients may create a scenario where data is requested faster | ||
912 | +# than it can read. | ||
913 | +# | ||
914 | +# Instead there is a default limit for pubsub and slave clients, since | ||
915 | +# subscribers and slaves receive data in a push fashion. | ||
916 | +# | ||
917 | +# Both the hard or the soft limit can be disabled by setting them to zero. | ||
918 | +client-output-buffer-limit normal 0 0 0 | ||
919 | +client-output-buffer-limit slave 256mb 64mb 60 | ||
920 | +client-output-buffer-limit pubsub 32mb 8mb 60 | ||
921 | + | ||
922 | +# Redis calls an internal function to perform many background tasks, like | ||
923 | +# closing connections of clients in timeout, purging expired keys that are | ||
924 | +# never requested, and so forth. | ||
925 | +# | ||
926 | +# Not all tasks are performed with the same frequency, but Redis checks for | ||
927 | +# tasks to perform according to the specified "hz" value. | ||
928 | +# | ||
929 | +# By default "hz" is set to 10. Raising the value will use more CPU when | ||
930 | +# Redis is idle, but at the same time will make Redis more responsive when | ||
931 | +# there are many keys expiring at the same time, and timeouts may be | ||
932 | +# handled with more precision. | ||
933 | +# | ||
934 | +# The range is between 1 and 500, however a value over 100 is usually not | ||
935 | +# a good idea. Most users should use the default of 10 and raise this up to | ||
936 | +# 100 only in environments where very low latency is required. | ||
937 | +hz 10 | ||
938 | + | ||
939 | +# When a child rewrites the AOF file, if the following option is enabled | ||
940 | +# the file will be fsync-ed every 32 MB of data generated. This is useful | ||
941 | +# in order to commit the file to the disk more incrementally and avoid | ||
942 | +# big latency spikes. | ||
943 | +aof-rewrite-incremental-fsync yes |