aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/NPTest.cache (renamed from plugins/t/NPTest.cache.travis)48
-rwxr-xr-x.github/prepare_debian.sh101
-rw-r--r--.github/workflows/codeql-analysis.yml71
-rw-r--r--.github/workflows/test.yml47
-rw-r--r--.gitignore8
-rw-r--r--.travis.yml88
-rw-r--r--ACKNOWLEDGEMENTS8
-rw-r--r--Makefile.am1
-rw-r--r--NEWS47
-rwxr-xr-xNP-VERSION-GEN2
-rw-r--r--NPTest.pm252
-rw-r--r--README4
-rw-r--r--REQUIREMENTS16
-rw-r--r--THANKS.in25
-rw-r--r--configure.ac63
-rw-r--r--doc/RELEASING.md28
-rw-r--r--gl/fsusage.c5
-rw-r--r--gl/fsusage.h3
-rw-r--r--lib/utils_base.c32
-rw-r--r--lib/utils_base.h6
-rw-r--r--lib/utils_cmd.c42
-rw-r--r--lib/utils_cmd.h13
-rw-r--r--lib/utils_disk.c2
-rw-r--r--lib/utils_disk.h5
-rw-r--r--m4/libcurl.m4272
-rw-r--r--m4/np_mysqlclient.m428
-rw-r--r--m4/uriparser.m4140
-rw-r--r--plugins-root/Makefile.am3
-rw-r--r--plugins-root/check_dhcp.c3
-rw-r--r--plugins-root/check_icmp.c473
-rw-r--r--plugins-root/t/check_dhcp.t9
-rw-r--r--plugins-scripts/Makefile.am2
-rwxr-xr-xplugins-scripts/check_disk_smb.pl7
-rwxr-xr-xplugins-scripts/check_file_age.pl61
-rwxr-xr-xplugins-scripts/check_ifoperstatus.pl2
-rwxr-xr-xplugins-scripts/check_ircd.pl4
-rwxr-xr-xplugins-scripts/check_log.sh88
-rwxr-xr-xplugins-scripts/check_mailq.pl34
-rwxr-xr-xplugins-scripts/check_uptime.pl315
-rw-r--r--plugins-scripts/t/check_file_age.t75
-rw-r--r--plugins-scripts/t/check_uptime.t129
-rw-r--r--plugins-scripts/t/utils.t1
-rw-r--r--plugins/Makefile.am9
-rw-r--r--plugins/check_apt.c85
-rw-r--r--plugins/check_by_ssh.c2
-rw-r--r--plugins/check_cluster.c10
-rw-r--r--plugins/check_curl.c2467
-rw-r--r--plugins/check_dbi.c1
-rw-r--r--plugins/check_dig.c2
-rw-r--r--plugins/check_disk.c233
-rw-r--r--plugins/check_dns.c119
-rw-r--r--plugins/check_fping.c2
-rw-r--r--plugins/check_hpjd.c29
-rw-r--r--plugins/check_http.c62
-rw-r--r--plugins/check_ide_smart.c2
-rw-r--r--plugins/check_ldap.c7
-rw-r--r--plugins/check_load.c63
-rw-r--r--plugins/check_mysql.c3
-rw-r--r--plugins/check_mysql_query.c12
-rw-r--r--plugins/check_ntp.c4
-rw-r--r--plugins/check_ntp_peer.c2
-rw-r--r--plugins/check_pgsql.c5
-rw-r--r--plugins/check_ping.c14
-rw-r--r--plugins/check_procs.c57
-rw-r--r--plugins/check_radius.c2
-rw-r--r--plugins/check_real.c2
-rw-r--r--plugins/check_smtp.c21
-rw-r--r--plugins/check_snmp.c19
-rw-r--r--plugins/check_swap.c280
-rw-r--r--plugins/check_tcp.c23
-rw-r--r--plugins/check_ups.c5
-rw-r--r--plugins/common.h19
-rw-r--r--plugins/negate.c4
-rw-r--r--plugins/picohttpparser/Makefile.am3
-rw-r--r--plugins/picohttpparser/picohttpparser.c645
-rw-r--r--plugins/picohttpparser/picohttpparser.h87
-rw-r--r--plugins/popen.c85
-rw-r--r--plugins/popen.h1
-rw-r--r--plugins/runcmd.c23
-rw-r--r--plugins/sslutils.c33
-rw-r--r--plugins/t/check_by_ssh.t14
-rw-r--r--plugins/t/check_curl.t201
-rw-r--r--plugins/t/check_disk.t15
-rw-r--r--plugins/t/check_dns.t10
-rw-r--r--plugins/t/check_fping.t44
-rw-r--r--plugins/t/check_ftp.t11
-rw-r--r--plugins/t/check_http.t153
-rw-r--r--plugins/t/check_imap.t15
-rw-r--r--plugins/t/check_jabber.t20
-rw-r--r--plugins/t/check_ldap.t17
-rw-r--r--plugins/t/check_load.t4
-rw-r--r--plugins/t/check_mysql.t29
-rw-r--r--plugins/t/check_mysql_query.t11
-rw-r--r--plugins/t/check_snmp.t22
-rw-r--r--plugins/t/check_ssh.t14
-rw-r--r--plugins/t/check_swap.t6
-rw-r--r--plugins/t/check_tcp.t25
-rw-r--r--plugins/t/check_time.t11
-rw-r--r--plugins/t/check_udp.t6
-rw-r--r--plugins/tests/certs/expired-cert.pem41
-rw-r--r--plugins/tests/certs/expired-key.pem43
-rw-r--r--plugins/tests/certs/server-cert.pem41
-rw-r--r--plugins/tests/certs/server-key.pem43
-rwxr-xr-xplugins/tests/check_curl.t509
-rwxr-xr-xplugins/tests/check_http.t87
-rwxr-xr-xplugins/tests/check_procs.t26
-rwxr-xr-xplugins/tests/check_snmp.t127
-rw-r--r--plugins/tests/var/ps-axwo.debian219
-rw-r--r--plugins/tests/var/ps_axwo.debian84
-rw-r--r--plugins/utils.c175
-rw-r--r--plugins/utils.h20
-rw-r--r--po/de.po28
-rw-r--r--po/fr.po12
-rw-r--r--po/monitoring-plugins.pot12
-rw-r--r--tools/squid.conf7979
115 files changed, 15533 insertions, 1546 deletions
diff --git a/plugins/t/NPTest.cache.travis b/.github/NPTest.cache
index 38c0a6b2..232305a7 100644
--- a/plugins/t/NPTest.cache.travis
+++ b/.github/NPTest.cache
@@ -1,60 +1,54 @@
{
- 'MYSQL_LOGIN_DETAILS' => '-u root -d test',
'NP_ALLOW_SUDO' => 'yes',
'NP_DNS_SERVER' => '8.8.8.8',
'NP_GOOD_NTP_SERVICE' => '',
+ 'NP_HOST_DHCP_RESPONSIVE' => '',
+ 'NP_HOST_HPJD_PORT_INVALID' => '161',
+ 'NP_HOST_HPJD_PORT_VALID' => '',
+ 'NP_HOSTNAME_INVALID_CIDR' => '130.133.8.39/30',
'NP_HOSTNAME_INVALID' => 'nosuchhost',
- 'NP_HOSTNAME_VALID' => 'monitoring-plugins.org',
- 'NP_HOSTNAME_VALID_IP' => '130.133.8.40',
'NP_HOSTNAME_VALID_CIDR' => '130.133.8.41/30',
- 'NP_HOSTNAME_INVALID_CIDR' => '130.133.8.39/30',
+ 'NP_HOSTNAME_VALID_IP' => '130.133.8.40',
+ 'NP_HOSTNAME_VALID' => 'monitoring-plugins.org',
'NP_HOSTNAME_VALID_REVERSE' => 'orwell.monitoring-plugins.org.',
- 'NP_HOST_DHCP_RESPONSIVE' => '',
- 'NP_HOST_NONRESPONSIVE' => '10.0.0.1',
+ 'NP_HOST_NONRESPONSIVE' => '192.168.1.2',
'NP_HOST_RESPONSIVE' => 'localhost',
'NP_HOST_SMB' => '',
- 'NP_HOST_SNMP' => 'localhost',
+ 'NP_HOST_SNMP' => '',
'NP_HOST_TCP_FTP' => '',
'NP_HOST_TCP_HPJD' => '',
- 'NP_HOST_HPJD_PORT_INVALID' => '161',
- 'NP_HOST_HPJD_PORT_VALID' => '',
- 'NP_HOST_TCP_HTTP' => 'localhost',
'NP_HOST_TCP_HTTP2' => 'test.monitoring-plugins.org',
+ 'NP_HOST_TCP_HTTP' => 'localhost',
'NP_HOST_TCP_IMAP' => 'imap.web.de',
+ 'NP_HOST_TCP_JABBER' => 'jabber.org',
'NP_HOST_TCP_LDAP' => 'localhost',
'NP_HOST_TCP_POP' => 'pop.web.de',
+ 'NP_HOST_TCP_PROXY' => 'localhost',
'NP_HOST_TCP_SMTP' => 'localhost',
'NP_HOST_TCP_SMTP_NOTLS' => '',
'NP_HOST_TCP_SMTP_TLS' => '',
+ 'NP_HOST_TLS_CERT' => 'localhost',
+ 'NP_HOST_TLS_HTTP' => 'localhost',
+ 'NP_HOST_UDP_TIME' => 'none',
'NP_INTERNET_ACCESS' => 'yes',
- 'NP_LDAP_BASE_DN' => 'cn=admin,dc=nodomain',
- 'NP_MOUNTPOINT2_VALID' => '/media/ramdisk',
+ 'NP_LDAP_BASE_DN' => 'dc=nodomain',
+ 'NP_MOUNTPOINT2_VALID' => '/media/ramdisk1',
'NP_MOUNTPOINT_VALID' => '/',
+ 'NP_MYSQL_LOGIN_DETAILS' => '-u root -d test',
'NP_MYSQL_SERVER' => 'localhost',
- 'NP_HOST_UDP_TIME' => 'localhost',
'NP_MYSQL_SOCKET' => '/var/run/mysqld/mysqld.sock',
'NP_MYSQL_WITH_SLAVE' => '',
'NP_MYSQL_WITH_SLAVE_LOGIN' => '',
'NP_NO_NTP_SERVICE' => 'localhost',
+ 'NP_PORT_TCP_PROXY' => '3128',
'NP_SMB_SHARE' => '',
'NP_SMB_SHARE_DENY' => '',
'NP_SMB_SHARE_SPC' => '',
'NP_SMB_VALID_USER' => '',
'NP_SMB_VALID_USER_PASS' => '',
- 'NP_SNMP_COMMUNITY' => 'public',
+ 'NP_SNMP_COMMUNITY' => '',
+ 'NP_SNMP_USER' => '',
'NP_SSH_CONFIGFILE' => '~/.ssh/config',
'NP_SSH_HOST' => 'localhost',
- 'NP_SSH_IDENTITY' => '~/.ssh/id_dsa',
- 'NP_HOST_TCP_JABBER' => 'jabber.org',
- 'host_nonresponsive' => '10.0.0.1',
- 'host_responsive' => 'localhost',
- 'host_snmp' => '',
- 'host_tcp_ftp' => '',
- 'host_tcp_http' => 'localhost',
- 'host_tcp_imap' => 'imap.nierlein.de',
- 'host_tcp_smtp' => 'localhost',
- 'hostname_invalid' => 'nosuchhost',
- 'snmp_community' => '',
- 'user_snmp' => '',
- 'host_udp_time' => 'none',
+ 'NP_SSH_IDENTITY' => '~/.ssh/id_rsa'
}
diff --git a/.github/prepare_debian.sh b/.github/prepare_debian.sh
new file mode 100755
index 00000000..4021c104
--- /dev/null
+++ b/.github/prepare_debian.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+set -x
+set -e
+
+export DEBIAN_FRONTEND=noninteractive
+
+apt-get update
+apt-get -y install software-properties-common
+if [ $(lsb_release -is) = "Debian" ]; then
+ apt-add-repository non-free
+ apt-get update
+fi
+apt-get -y install perl autotools-dev libdbi-dev libldap2-dev libpq-dev libradcli-dev libnet-snmp-perl procps
+apt-get -y install libdbi0-dev libdbd-sqlite3 libssl-dev dnsutils snmp-mibs-downloader libsnmp-perl snmpd
+apt-get -y install fping snmp netcat-openbsd smbclient vsftpd apache2 ssl-cert postfix libhttp-daemon-ssl-perl
+apt-get -y install libdbd-sybase-perl libnet-dns-perl
+apt-get -y install slapd ldap-utils
+apt-get -y install gcc make autoconf automake gettext
+apt-get -y install faketime
+apt-get -y install libmonitoring-plugin-perl
+apt-get -y install libcurl4-openssl-dev
+apt-get -y install liburiparser-dev
+apt-get -y install squid
+apt-get -y install openssh-server
+apt-get -y install mariadb-server mariadb-client libmariadb-dev
+apt-get -y install cron iputils-ping
+apt-get -y install iproute2
+
+# remove ipv6 interface from hosts
+if [ $(ip addr show | grep "inet6 ::1" | wc -l) -eq "0" ]; then
+ sed '/^::1/d' /etc/hosts > /tmp/hosts
+ cp -f /tmp/hosts /etc/hosts
+fi
+
+ip addr show
+
+cat /etc/hosts
+
+
+# apache
+a2enmod ssl
+a2ensite default-ssl
+# replace snakeoil certs with openssl generated ones as the make-ssl-cert ones
+# seems to cause problems with our plugins
+rm /etc/ssl/certs/ssl-cert-snakeoil.pem
+rm /etc/ssl/private/ssl-cert-snakeoil.key
+openssl req -nodes -newkey rsa:2048 -x509 -sha256 -days 365 -nodes -keyout /etc/ssl/private/ssl-cert-snakeoil.key -out /etc/ssl/certs/ssl-cert-snakeoil.pem -subj "/C=GB/ST=London/L=London/O=Global Security/OU=IT Department/CN=$(hostname)"
+service apache2 restart
+
+# squid
+cp tools/squid.conf /etc/squid/squid.conf
+service squid start
+
+# mariadb
+service mariadb start
+mysql -e "create database IF NOT EXISTS test;" -uroot
+
+# ldap
+sed -e 's/cn=admin,dc=nodomain/'$(/usr/sbin/slapcat|grep ^dn:|awk '{print $2}')'/' -i .github/NPTest.cache
+service slapd start
+
+# sshd
+ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
+service ssh start
+sleep 1
+ssh-keyscan localhost >> ~/.ssh/known_hosts
+touch ~/.ssh/config
+
+# start one login session, required for check_users
+ssh -tt localhost </dev/null >/dev/null 2>/dev/null &
+disown %1
+
+# snmpd
+for DIR in /usr/share/snmp/mibs /usr/share/mibs; do
+ rm -f $DIR/ietf/SNMPv2-PDU \
+ $DIR/ietf/IPSEC-SPD-MIB \
+ $DIR/ietf/IPATM-IPMC-MIB \
+ $DIR/iana/IANA-IPPM-METRICS-REGISTRY-MIB
+done
+mkdir -p /var/lib/snmp/mib_indexes
+sed -e 's/^agentaddress.*/agentaddress 127.0.0.1/' -i /etc/snmp/snmpd.conf
+service snmpd start
+
+# start cron, will be used by check_nagios
+cron
+
+# start postfix
+service postfix start
+
+# start ftpd
+service vsftpd start
+
+# hostname
+sed "/NP_HOST_TLS_CERT/s/.*/'NP_HOST_TLS_CERT' => '$(hostname)',/" -i /src/.github/NPTest.cache
+
+# create some test files to lower inodes
+for i in $(seq 10); do
+ touch /media/ramdisk2/test.$1
+done
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
new file mode 100644
index 00000000..9de367e5
--- /dev/null
+++ b/.github/workflows/codeql-analysis.yml
@@ -0,0 +1,71 @@
+# For most projects, this workflow file will not need changing; you simply need
+# to commit it to your repository.
+#
+# You may wish to alter this file to override the set of languages analyzed,
+# or to provide custom queries or build logic.
+#
+# ******** NOTE ********
+# We have attempted to detect the languages in your repository. Please check
+# the `language` matrix defined below to confirm you have the correct set of
+# supported CodeQL languages.
+#
+name: "CodeQL"
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ # The branches below must be a subset of the branches above
+ branches: [ master ]
+ schedule:
+ - cron: '15 18 * * 0'
+
+jobs:
+ analyze:
+ name: Analyze
+ runs-on: ubuntu-latest
+ permissions:
+ actions: read
+ contents: read
+ security-events: write
+
+ strategy:
+ fail-fast: false
+ matrix:
+ language: [ 'cpp' ]
+ # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
+ # Learn more:
+ # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
+
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v2
+
+ # Initializes the CodeQL tools for scanning.
+ - name: Initialize CodeQL
+ uses: github/codeql-action/init@v1
+ with:
+ languages: ${{ matrix.language }}
+ # If you wish to specify custom queries, you can do so here or in a config file.
+ # By default, queries listed here will override any specified in a config file.
+ # Prefix the list here with "+" to use these queries and those in the config file.
+ # queries: ./path/to/local/query, your-org/your-repo/queries@main
+
+ - name: Install packages
+ run: |
+ sudo apt-get install -y --no-install-recommends m4 gettext automake autoconf make build-essential
+ sudo apt-get install -y --no-install-recommends perl autotools-dev libdbi-dev libldap2-dev libpq-dev \
+ libmysqlclient-dev libradcli-dev libkrb5-dev libdbi0-dev \
+ libdbd-sqlite3 libssl-dev libcurl4-openssl-dev liburiparser-dev
+
+ - name: Configure build
+ run: |
+ ./tools/setup
+ ./configure --enable-libtap
+
+ - name: Build
+ run: |
+ make
+
+ - name: Perform CodeQL Analysis
+ uses: github/codeql-action/analyze@v1
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 00000000..0f93930a
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,47 @@
+name: Test
+
+on:
+ push:
+ branches:
+ - '*'
+ pull_request:
+
+jobs:
+# macos:
+# ...
+ linux:
+ runs-on: ubuntu-latest
+ name: Running tests on ${{ matrix.distro }}
+ strategy:
+ fail-fast: false
+ matrix:
+ distro:
+ - 'debian:testing'
+ #...
+ include:
+ - distro: 'debian:testing'
+ prepare: .github/prepare_debian.sh
+ #...
+ steps:
+ - name: Git clone repository
+ uses: actions/checkout@v2
+ #- name: Setup tmate session, see https://github.com/marketplace/actions/debugging-with-tmate
+ # uses: mxschmitt/action-tmate@v3
+ - name: Run the tests on ${{ matrix.distro }}
+ run: |
+ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 tmp-vol
+ docker run \
+ -e NPTEST_ACCEPTDEFAULT=1 \
+ -e NPTEST_CACHE="/src/.github/NPTest.cache" \
+ -w /src -v ${PWD}:/src \
+ --tmpfs /media/ramdisk1 \
+ -v /var/run/utmp:/var/run/utmp \
+ --mount source=tmp-vol,destination=/src,target=/media/ramdisk2 \
+ ${{ matrix.distro }} \
+ /bin/sh -c '${{ matrix.prepare }} && \
+ tools/setup && \
+ ./configure --enable-libtap --with-ipv6=no && \
+ make && \
+ make test'
+ docker container prune -f
+ docker volume prune -f
diff --git a/.gitignore b/.gitignore
index 3093c6ea..c7b668e2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -142,6 +142,7 @@ NP-VERSION-FILE
/plugins/check_by_ssh
/plugins/check_clamd
/plugins/check_cluster
+/plugins/check_curl
/plugins/check_dbi
/plugins/check_dig
/plugins/check_disk
@@ -202,6 +203,12 @@ NP-VERSION-FILE
/plugins/stamp-h*
/plugins/urlize
+# /plugins/picohttpparser
+/plugins/picohttpparser/Makefile
+/plugins/picohttpparser/Makefile.in
+/plugins/picohttpparser/.deps
+/plugins/picohttpparser/libpicohttpparser.a
+
# /plugins/t/
/plugins/t/*.tmp
@@ -241,6 +248,7 @@ NP-VERSION-FILE
/plugins-scripts/check_sensors
/plugins-scripts/check_wave
/plugins-scripts/check_file_age
+/plugins-scripts/check_uptime
# /po/
/po/Makefile
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 78ebc30b..00000000
--- a/.travis.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-sudo: required
-dist: trusty
-language: c
-
-env:
- global:
- # This is the encrypted COVERITY_SCAN_TOKEN, created via the "travis
- # encrypt" command using the project repository's public key.
- - secure: "ggJ9c/VfKcwtrwz/My+ne4My7D8g3qi3vz5Hh+yLiri0+oIXCy313ZD6ssIEY/5beQZEOnuHhBgBJd/Y3clSQNc2M9fRNc+wxOkIO992lgnY0MZJN3y9MLfpqUbTClhU9Fst0qXQqGpI6UI8yz1tj7yKi7DPrycJLRrjMpyTfyo="
-
-matrix:
- include:
- - compiler: "gcc"
- os: linux
- env:
- - PLATFORM=linux BITS=64 HOST=x86_64
- addons:
- coverity_scan:
- project:
- name: "monitoring-plugins/monitoring-plugins"
- description: "Monitoring Plugins"
- notification_email: team@monitoring-plugins.org
- build_command_prepend: tools/setup && ./configure
- build_command: make
- branch_pattern: coverity.*
- - compiler: "clang"
- os: linux
- env:
- - PLATFORM=linux BITS=64 HOST=x86_64
-
-before_install:
- # Trusty related fixed
- # multiverse is no on trusty activated (https://github.com/travis-ci/travis-ci/issues/4979)
- - sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty multiverse" && sudo add-apt-repository "deb http://archive.ubuntu.com/ubuntu/ trusty-updates multiverse"
- # /etc/hosts has IPv6 hosts (https://github.com/travis-ci/travis-ci/issues/4978)
- - sudo [ $(ip addr show | grep "inet6 ::1" | wc -l) -lt "1" ] && sudo sed -i '/^::1/d' /etc/hosts
- # Trusty has running ntpd on localhost, but we don't like that for our tests
- - sudo killall -9 ntpd
- # Trusty has no swap, lets create some
- - sudo fallocate -l 20M /swapfile; sudo chmod 600 /swapfile; sudo mkswap /swapfile; sudo swapon /swapfile
- - sudo add-apt-repository -y ppa:waja/trusty-backports
- - sudo apt-get update -qq
- - sudo apt-get purge -qq gawk
- # http://docs.travis-ci.com/user/trusty-ci-environment/ indicates, no MySQL on Trusty (yet)
- # # ensure we have a test database in place for tests
- # - mysql -e "create database IF NOT EXISTS test;" -uroot
-
-install:
- - sudo apt-get install -qq --no-install-recommends perl autotools-dev libdbi-dev libldap2-dev libpq-dev libmysqlclient-dev libradcli-dev libkrb5-dev libnet-snmp-perl procps
- - sudo apt-get install -qq --no-install-recommends libdbi0-dev libdbd-sqlite3 libssl-dev dnsutils snmp-mibs-downloader libsnmp-perl snmpd
- - sudo apt-get install -qq --no-install-recommends fping snmp netcat smbclient fping pure-ftpd apache2 postfix libhttp-daemon-ssl-perl
- - sudo apt-get install -qq --no-install-recommends libdbd-sybase-perl libnet-dns-perl
- - sudo apt-get install -qq --no-install-recommends slapd ldap-utils
- - sudo apt-get install -qq --no-install-recommends autoconf automake
- - sudo apt-get install -qq --no-install-recommends faketime
- # Trusty related dependencies (not yet provided)
- - test "$(dpkg -l | grep -E "mysql-(client|server)-[0-9].[0-9]" | grep -c ^ii)" -gt 0 || sudo apt-get install -qq --no-install-recommends mariadb-client mariadb-server
-
-before_script:
- # ensure we have a test database in place for tests
- - mysql -e "create database IF NOT EXISTS test;" -uroot
- # Detect LDAP configuration (seems volatile on trusty env)
- - sed -e 's/cn=admin,dc=nodomain/'$(sudo /usr/sbin/slapcat|grep ^dn:|grep cn=|awk '{print $2}')'/' -i plugins/t/NPTest.cache.travis
- - tools/setup
- - ./configure --enable-libtap
- - make
- - export NPTEST_CACHE="$(pwd)/plugins/t/NPTest.cache.travis"
- - ssh-keygen -t dsa -N "" -f ~/.ssh/id_dsa
- - cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
- - ssh-keyscan localhost >> ~/.ssh/known_hosts
- - touch ~/.ssh/config
- - sudo rm -f /usr/share/mibs/ietf/SNMPv2-PDU /usr/share/mibs/ietf/IPSEC-SPD-MIB /usr/share/mibs/ietf/IPATM-IPMC-MIB /usr/share/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB
- - sudo mkdir -p /var/lib/snmp/mib_indexes
- - sudo mkdir /media/ramdisk && sudo chmod 777 /media/ramdisk && sudo mount -t tmpfs -o size=20% none /media/ramdisk
-
-script:
- - if [ "$COVERITY_SCAN_BRANCH" != 1 ]; then make test; fi
-
-notifications:
- irc:
- channels:
- - "chat.freenode.net#Monitoring-Plugins"
- on_success: change
- on_failure: always
- skip_join: true
- email:
- # - team@monitoring-plugins.org
-
diff --git a/ACKNOWLEDGEMENTS b/ACKNOWLEDGEMENTS
index 50c714c3..d73be549 100644
--- a/ACKNOWLEDGEMENTS
+++ b/ACKNOWLEDGEMENTS
@@ -20,7 +20,7 @@ Using the DLPI support on SysV systems to get the host MAC address in check_dhcp
Stenberg, Daniel
Copyright (c) 1996 - 2004, Daniel Stenberg, <daniel@haxx.se>
http://curl.haxx.se/
-Use of duplication of macros in m4/np_curl.m4
+Use of duplication of macros in m4/np_curl.m4 (slighly adapted for m4/uriparser.m4 too)
Coreutils team
Copyright (C) 91, 1995-2004 Free Software Foundation, Inc.
@@ -31,3 +31,9 @@ Gnulib team
Copyright (C) 2001, 2003, 2004, 2006 Free Software Foundation, Inc
http://www.gnu.org/software/gnulib/
Use of lib files that originally were used from coreutils
+
+Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase,
+ Shigeo Mitsunari
+picohttpparser
+https://github.com/h2o/picohttpparser
+Use of the library for HTTP header parsing in check_curl.
diff --git a/Makefile.am b/Makefile.am
index 7e0d4131..df1bcbb3 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -34,6 +34,7 @@ test test-debug:
if test "$(PERLMODS_DIR)" != ""; then cd perlmods && $(MAKE) $@; fi
cd plugins && $(MAKE) $@
cd plugins-scripts && $(MAKE) $@
+ cd plugins-root && $(MAKE) $@
# Solaris pkgmk
BUILDDIR = build-solaris
diff --git a/NEWS b/NEWS
index 7be8048f..3790e8a0 100644
--- a/NEWS
+++ b/NEWS
@@ -1,12 +1,57 @@
This file documents the major additions and syntax changes between releases.
-2.3 [...]
+2.4 [...]
+ ENHANCEMENTS
+
+ FIXES
+
+2.3 10th December 2020
ENHANCEMENTS
check_dns: allow 'expected address' (-a) to be specified in CIDR notation
(IPv4 only).
+ check_dns: allow for IPv6 RDNS
+ check_dns: Accept CIDR
+ check_dns: allow unsorted addresses
+ check_dns: allow forcing complete match of all addresses
+ check_dns: option to expect NXDOMAIN
+ check_apt: add --only-critical switch
+ check_apt: add -l/--list option to print packages
+ check_file_age: add range checking
+ check_file_age: enable to test for maximum file size
+ check_apt: adding packages-warning option
+ check_load: Adding top consuming processes option
+ check_http: Adding Proxy-Authorization and extra headers
+ check_snmp: make calcualtion of timeout value in help output more clear
+ check_uptime: new plugin for checking uptime to see how long the system is running
+ check_curl: check_http replacement based on libcurl
+ check_http: Allow user to specify HTTP method after proxy CONNECT
+ check_http: Add new flag --show-body/-B to print body
+ check_cluster: Added data argument validation
+ check_icmp: Add IPv6 support
+ check_icmp: Automatically detect IP protocol
+ check_icmp: emit error if multiple protocol version
+ check_disk: add support to display inodes usage in perfdata
+ check_hpjd: Added -D option to disable warning on 'out of paper'
+ check_http: support the --show-body/-B flag when --expect is used
+ check_mysql: allow mariadbclient to be used
+ check_tcp: add --sni
+ check_dns: detect unreachable dns service in nslookup output
FIXES
Fix regression where check_dhcp was rereading response in a tight loop
+ check_dns: fix error detection on sles nslookup
+ check_disk_smb: fix timeout issue
+ check_swap: repaired "-n" behaviour
+ check_icmp: Correctly set address_family on lookup
+ check_icmp: Do not overwrite -4,-6 on lookup
+ check_smtp: initializes n before it is used
+ check_dns: fix typo in parameter description
+ check_by_ssh: fix child process leak on timeouts
+ check_mysql: Allow sockets to be specified to -H
+ check_procs: improve command examples for 'at least' processes
+ check_swap: repaired "-n" behaviour
+ check_disk: include -P switch in help
+ check_mailq: restore accidentially removed options
2.2 29th November 2016
ENHANCEMENTS
diff --git a/NP-VERSION-GEN b/NP-VERSION-GEN
index cf78d69c..c353b1d1 100755
--- a/NP-VERSION-GEN
+++ b/NP-VERSION-GEN
@@ -6,7 +6,7 @@
SRC_ROOT=`dirname $0`
NPVF=NP-VERSION-FILE
-DEF_VER=2.2.git
+DEF_VER=2.3git
LF='
'
diff --git a/NPTest.pm b/NPTest.pm
index f72ed2df..4b2de39b 100644
--- a/NPTest.pm
+++ b/NPTest.pm
@@ -53,8 +53,8 @@ developer to interactively request test parameter information from the
user. The user can accept the developer's default value or reply "none"
which will then be returned as "" for the test to skip if appropriate.
-If a parameter needs to be entered and the test is run without a tty
-attached (such as a cronjob), the parameter will be assigned as if it
+If a parameter needs to be entered and the test is run without a tty
+attached (such as a cronjob), the parameter will be assigned as if it
was "none". Tests can check for the parameter and skip if not set.
Responses are stored in an external, file-based cache so subsequent test
@@ -62,17 +62,6 @@ runs will use these values. The user is able to change the values by
amending the values in the file /var/tmp/NPTest.cache, or by setting
the appropriate environment variable before running the test.
-The option exists to store parameters in a scoped means, allowing a
-test harness to a localise a parameter should the need arise. This
-allows a parameter of the same name to exist in a test harness
-specific scope, while not affecting the globally scoped parameter. The
-scoping identifier is the name of the test harness sans the trailing
-".t". All cache searches first look to a scoped parameter before
-looking for the parameter at global scope. Thus for a test harness
-called "check_disk.t" requesting the parameter "mountpoint_valid", the
-cache is first searched for "check_disk"/"mountpoint_valid", if this
-fails, then a search is conducted for "mountpoint_valid".
-
To facilitate quick testing setup, it is possible to accept all the
developer provided defaults by setting the environment variable
"NPTEST_ACCEPTDEFAULT" to "1" (or any other perl truth value). Note
@@ -249,26 +238,26 @@ sub checkCmd
{
if ( scalar( grep { $_ == $exitStatus } @{$desiredExitStatus} ) )
{
- $desiredExitStatus = $exitStatus;
+ $desiredExitStatus = $exitStatus;
}
else
{
- $desiredExitStatus = -1;
+ $desiredExitStatus = -1;
}
}
elsif ( ref $desiredExitStatus eq "HASH" )
{
if ( exists( ${$desiredExitStatus}{$exitStatus} ) )
{
- if ( defined( ${$desiredExitStatus}{$exitStatus} ) )
- {
- $testOutput = ${$desiredExitStatus}{$exitStatus};
- }
- $desiredExitStatus = $exitStatus;
+ if ( defined( ${$desiredExitStatus}{$exitStatus} ) )
+ {
+ $testOutput = ${$desiredExitStatus}{$exitStatus};
+ }
+ $desiredExitStatus = $exitStatus;
}
else
{
- $desiredExitStatus = -1;
+ $desiredExitStatus = -1;
}
}
@@ -327,78 +316,51 @@ sub skipMsg
return $testStatus;
}
-sub getTestParameter
-{
- my( $param, $envvar, $default, $brief, $scoped );
- my $new_style;
- if (scalar @_ <= 3) {
- ($param, $brief, $default) = @_;
- $envvar = $param;
- $new_style = 1;
- } else {
- ( $param, $envvar, $default, $brief, $scoped ) = @_;
- $new_style = 0;
- }
-
- # Apply default values for optional arguments
- $scoped = ( defined( $scoped ) && $scoped );
-
- my $testharness = basename( (caller(0))[1], ".t" ); # used for scoping
+sub getTestParameter {
+ my($param, $description, $default) = @_;
- if ( defined( $envvar ) && exists( $ENV{$envvar} ) && $ENV{$envvar} )
- {
- return $ENV{$envvar};
+ if($param !~ m/^NP_[A-Z0-9_]+$/mx) {
+ die("parameter should be all uppercase and start with NP_ (requested from ".(caller(0))[1].")");
}
- my $cachedValue = SearchCache( $param, $testharness );
- if ( defined( $cachedValue ) )
- {
- # This save required to convert to new style because the key required is
- # changing to the environment variable
- if ($new_style == 0) {
- SetCacheParameter( $envvar, undef, $cachedValue );
- }
+ return $ENV{$param} if $ENV{$param};
+
+ my $cachedValue = SearchCache($param);
+ if(defined $cachedValue) {
return $cachedValue;
}
- my $defaultValid = ( defined( $default ) && $default );
- my $autoAcceptDefault = ( exists( $ENV{'NPTEST_ACCEPTDEFAULT'} ) && $ENV{'NPTEST_ACCEPTDEFAULT'} );
-
- if ( $autoAcceptDefault && $defaultValid )
- {
- return $default;
+ if($ENV{'NPTEST_ACCEPTDEFAULT'}) {
+ return $default if $default;
+ return "";
}
# Set "none" if no terminal attached (eg, tinderbox build servers when new variables set)
return "" unless (-t STDIN);
my $userResponse = "";
-
- while ( $userResponse eq "" )
- {
+ while($userResponse eq "") {
print STDERR "\n";
- print STDERR "Test Harness : $testharness\n";
- print STDERR "Test Parameter : $param\n";
- print STDERR "Environment Variable : $envvar\n" if ($param ne $envvar);
- print STDERR "Brief Description : $brief\n";
- print STDERR "Enter value (or 'none') ", ($defaultValid ? "[${default}]" : "[]"), " => ";
+ print STDERR "Test File : ".(caller(0))[1]."\n";
+ print STDERR "Test Parameter : $param\n";
+ print STDERR "Description : $description\n";
+ print STDERR "Enter value (or 'none') ", ($default ? "[${default}]" : "[]"), " => ";
$userResponse = <STDIN>;
$userResponse = "" if ! defined( $userResponse ); # Handle EOF
- chomp( $userResponse );
- if ( $defaultValid && $userResponse eq "" )
- {
+ chomp($userResponse);
+ if($default && $userResponse eq "") {
$userResponse = $default;
}
}
print STDERR "\n";
- if ($userResponse =~ /^(na|none)$/) {
- $userResponse = "";
+ if($userResponse =~ /^(na|none)$/) {
+ $userResponse = "";
}
- # define all user responses at global scope
- SetCacheParameter( $param, ( $scoped ? $testharness : undef ), $userResponse );
+ # store user responses
+ SetCacheParameter($param, $userResponse);
return $userResponse;
}
@@ -407,37 +369,20 @@ sub getTestParameter
# Internal Cache Management Functions
#
-sub SearchCache
-{
- my( $param, $scope ) = @_;
+sub SearchCache {
+ my($param) = @_;
LoadCache();
- if ( exists( $CACHE{$scope} ) && exists( $CACHE{$scope}{$param} ) )
- {
- return $CACHE{$scope}{$param};
- }
-
- if ( exists( $CACHE{$param} ) )
- {
+ if(exists $CACHE{$param}) {
return $CACHE{$param};
}
- return undef; # Need this to say "nothing found"
+ return undef; # Need this to say "nothing found"
}
-sub SetCacheParameter
-{
- my( $param, $scope, $value ) = @_;
-
- if ( defined( $scope ) )
- {
- $CACHE{$scope}{$param} = $value;
- }
- else
- {
- $CACHE{$param} = $value;
- }
-
+sub SetCacheParameter {
+ my($param, $value) = @_;
+ $CACHE{$param} = $value;
SaveCache();
}
@@ -475,6 +420,11 @@ sub SaveCache
delete $CACHE{'_cache_loaded_'};
my $oldFileContents = delete $CACHE{'_original_cache'};
+ # clean up old style params
+ for my $key (keys %CACHE) {
+ delete $CACHE{$key} if $key !~ m/^NP_[A-Z0-9_]+$/mx;
+ }
+
my($dataDumper) = new Data::Dumper([\%CACHE]);
$dataDumper->Terse(1);
$dataDumper->Sortkeys(1);
@@ -486,7 +436,7 @@ sub SaveCache
if($oldFileContents ne $data) {
my($fileHandle) = new IO::File;
if (!$fileHandle->open( "> ${CACHEFILENAME}")) {
- print STDERR "NPTest::LoadCache() : Problem saving ${CACHEFILENAME} : $!\n";
+ print STDERR "NPTest::SaveCache() : Problem saving ${CACHEFILENAME} : $!\n";
return;
}
print $fileHandle $data;
@@ -542,10 +492,10 @@ sub DetermineTestHarnessDirectory
push ( @dirs, "./tests");
}
- if ( @dirs > 0 )
- {
- return @dirs;
- }
+ if ( @dirs > 0 )
+ {
+ return @dirs;
+ }
# To be honest I don't understand which case satisfies the
# original code in test.pl : when $tstdir == `pwd` w.r.t.
@@ -611,73 +561,73 @@ sub TestsFrom
# All the new object oriented stuff below
-sub new {
- my $type = shift;
- my $self = {};
- return bless $self, $type;
+sub new {
+ my $type = shift;
+ my $self = {};
+ return bless $self, $type;
}
# Accessors
sub return_code {
- my $self = shift;
- if (@_) {
- return $self->{return_code} = shift;
- } else {
- return $self->{return_code};
- }
+ my $self = shift;
+ if (@_) {
+ return $self->{return_code} = shift;
+ } else {
+ return $self->{return_code};
+ }
}
sub output {
- my $self = shift;
- if (@_) {
- return $self->{output} = shift;
- } else {
- return $self->{output};
- }
+ my $self = shift;
+ if (@_) {
+ return $self->{output} = shift;
+ } else {
+ return $self->{output};
+ }
}
sub perf_output {
- my $self = shift;
- $_ = $self->{output};
- /\|(.*)$/;
- return $1 || "";
+ my $self = shift;
+ $_ = $self->{output};
+ /\|(.*)$/;
+ return $1 || "";
}
sub only_output {
- my $self = shift;
- $_ = $self->{output};
- /(.*?)\|/;
- return $1 || "";
+ my $self = shift;
+ $_ = $self->{output};
+ /(.*?)\|/;
+ return $1 || "";
}
sub testCmd {
- my $class = shift;
- my $command = shift or die "No command passed to testCmd";
- my $timeout = shift || 120;
- my $object = $class->new;
-
- local $SIG{'ALRM'} = sub { die("timeout in command: $command"); };
- alarm($timeout); # no test should take longer than 120 seconds
-
- my $output = `$command`;
- $object->return_code($? >> 8);
- $_ = $? & 127;
- if ($_) {
- die "Got signal $_ for command $command";
- }
- chomp $output;
- $object->output($output);
-
- alarm(0);
-
- my ($pkg, $file, $line) = caller(0);
- print "Testing: $command", $/;
- if ($ENV{'NPTEST_DEBUG'}) {
- print "testCmd: Called from line $line in $file", $/;
- print "Output: ", $object->output, $/;
- print "Return code: ", $object->return_code, $/;
- }
-
- return $object;
+ my $class = shift;
+ my $command = shift or die "No command passed to testCmd";
+ my $timeout = shift || 120;
+ my $object = $class->new;
+
+ local $SIG{'ALRM'} = sub { die("timeout in command: $command"); };
+ alarm($timeout); # no test should take longer than 120 seconds
+
+ my $output = `$command`;
+ $object->return_code($? >> 8);
+ $_ = $? & 127;
+ if ($_) {
+ die "Got signal $_ for command $command";
+ }
+ chomp $output;
+ $object->output($output);
+
+ alarm(0);
+
+ my ($pkg, $file, $line) = caller(0);
+ print "Testing: $command", $/;
+ if ($ENV{'NPTEST_DEBUG'}) {
+ print "testCmd: Called from line $line in $file", $/;
+ print "Output: ", $object->output, $/;
+ print "Return code: ", $object->return_code, $/;
+ }
+
+ return $object;
}
# do we have ipv6
diff --git a/README b/README
index beb77690..71b4d37c 100644
--- a/README
+++ b/README
@@ -10,7 +10,7 @@ Monitoring Plugins
* For information on detailed changes that have been made or plugins
that have been added, read the `ChangeLog` file.
-* Some plugins require that you have additional programs and/or
+* Some plugins require that you have additional programs or
libraries installed on your system before they can be used. Plugins that
are dependent on other programs/libraries that are missing are usually not
compiled. Read the `REQUIREMENTS` file for more information.
@@ -19,7 +19,7 @@ Monitoring Plugins
the basic guidelines for development will provide detailed help when
invoked with the `-h` or `--help` options.
-You can check for the latest plugins at:
+You can check the latest plugins at:
* <https://www.monitoring-plugins.org/>
diff --git a/REQUIREMENTS b/REQUIREMENTS
index ac7b5935..f3b1c01d 100644
--- a/REQUIREMENTS
+++ b/REQUIREMENTS
@@ -11,6 +11,22 @@ check_ldaps, check_http --ssl, check_tcp --ssl, check_smtp --starttls
- Requires openssl or gnutls libraries for SSL connections
http://www.openssl.org, http://www.gnu.org/software/gnutls
+check_curl:
+ - Requires libcurl 7.15.2 or later
+ http://www.haxx.se
+ - --ssl/-S and -C requires OpenSSL for certificate checks, otherwise
+ libcurl must be quite new to support CURLINFO_CERTINFO with
+ GnuTLS and NSS libraries:
+ - 7.42.0 or newer for GnuTLS
+ - 7.34.0 or newer for NSS
+ GnuTLS is known to create problems on some distributions with
+ self-signed certificate chains
+ http://www.openssl.org, http://www.gnu.org/software/gnutls,
+ http://www.mozilla.org/projects/security/pki/nss/,
+ other SSL implementations are currently not supported
+ - uriparser 0.7.5 or later
+ https://uriparser.github.io/
+
check_fping:
- Requires the fping utility distributed with SATAN. Either
download and install SATAN or grab the fping program from
diff --git a/THANKS.in b/THANKS.in
index ebc81556..7d1d1ff0 100644
--- a/THANKS.in
+++ b/THANKS.in
@@ -356,3 +356,28 @@ Sven Geggus
Thomas Kurschel
Yannick Charton
Nicolai Søborg
+Rolf Eike Beer
+Bernd Arnold
+Andreas Baumann
+Tobias Wolf
+Lars Michelsen
+Vincent Danjean
+Kostyantyn Hushchyn
+Christian Tacke
+Alexander A. Klimov
+Vadim Zhukov
+Bernard Spil
+Christian Schmidt
+Guido Falsi
+Harald Koch
+Iustin Pop
+Jacob Hansen
+Jean-François Rameau
+Karol Babioch
+Lucas Bussey
+Marc Sánchez
+Markus Frosch
+Michael Kraus
+Patrick Rauscher
+Prathamesh Bhanuse
+Valentin Vidic
diff --git a/configure.ac b/configure.ac
index bf129956..dfc37b5e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,6 +1,6 @@
dnl Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
-AC_INIT(monitoring-plugins,2.2)
+AC_INIT(monitoring-plugins,2.3git)
AC_CONFIG_SRCDIR(NPTest.pm)
AC_CONFIG_FILES([gl/Makefile])
AC_CONFIG_AUX_DIR(build-aux)
@@ -385,6 +385,42 @@ if test "$ac_cv_header_wtsapi32_h" = "yes"; then
AC_SUBST(WTSAPI32LIBS)
fi
+_can_enable_check_curl=no
+dnl Check for cURL library
+LIBCURL_CHECK_CONFIG(yes, 7.15.2, [
+ _can_enable_check_curl=yes
+ LIBCURLINCLUDE="$LIBCURL_CPPFLAGS"
+ LIBCURLLIBS="$LIBCURL"
+ LIBCURLCFLAGS="$LIBCURL_CPPFLAGS"
+ AC_SUBST(LIBCURLINCLUDE)
+ AC_SUBST(LIBCURLLIBS)
+ AC_SUBST(LIBCURLCFLAGS)
+ ], [
+ _can_enable_check_curl=no
+ AC_MSG_WARN([Skipping curl plugin])
+ AC_MSG_WARN([install libcurl libs to compile this plugin (see REQUIREMENTS).])
+])
+
+dnl Check for uriparser library
+URIPARSER_CHECK(yes, 0.7.5, [
+ URIPARSERINCLUDE="$URIPARSER_CPPFLAGS"
+ URIPARSERLIBS="$URIPARSER"
+ URIPARSERCFLAGS="$URIPARSER_CPPFLAGS"
+ AC_SUBST(URIPARSERINCLUDE)
+ AC_SUBST(URIPARSERLIBS)
+ AC_SUBST(URIPARSERCFLAGS)
+ ], [
+ _can_enable_check_curl=no
+ AC_MSG_WARN([Skipping curl plugin])
+ AC_MSG_WARN([install the uriparser library to compile this plugin (see REQUIREMENTS).])
+])
+
+dnl prerequisites met, enable the plugin
+if test x$_can_enable_check_curl = xyes; then
+ EXTRAS="$EXTRAS check_curl\$(EXEEXT)"
+fi
+AC_CONFIG_FILES([plugins/picohttpparser/Makefile])
+
dnl Fallback to who(1) if the system doesn't provide an utmpx(5) interface
if test "$ac_cv_header_utmpx_h" = "no" -a "$ac_cv_header_wtsapi32_h" = "no"
then
@@ -747,6 +783,16 @@ dnl ac_cv_ps_format=["%*s %d %d %d %d %*d %*d %d %d%*[ 0123456789abcdef]%[OSRZT
dnl ac_cv_ps_cols=8
dnl AC_MSG_RESULT([$ac_cv_ps_command])
+dnl This one is the exact same test as the next one but includes etime
+elif ps axwo 'stat comm vsz rss user uid pid ppid etime args' 2>/dev/null | \
+ egrep -i ["^ *STAT +[UCOMAND]+ +VSZ +RSS +USER +UID +PID +PPID +ELAPSED +COMMAND"] > /dev/null
+then
+ ac_cv_ps_varlist="[procstat,&procuid,&procpid,&procppid,&procvsz,&procrss,&procpcpu,procetime,procprog,&pos]"
+ ac_cv_ps_command="$PATH_TO_PS axwo 'stat uid pid ppid vsz rss pcpu etime comm args'"
+ ac_cv_ps_format="%s %d %d %d %d %d %f %s %s %n"
+ ac_cv_ps_cols=10
+ AC_MSG_RESULT([$ac_cv_ps_command])
+
dnl Some gnu/linux systems (debian for one) don't like -axwo and need axwo.
dnl so test for this first...
elif ps axwo 'stat comm vsz rss user uid pid ppid args' 2>/dev/null | \
@@ -1016,6 +1062,10 @@ if test -n "$ac_cv_ps_varlist" ; then
AC_DEFINE(PS_USES_PROCETIME,"yes",
[Whether the ps utility uses the "procetime" field])
fi
+ if echo "$ac_cv_ps_varlist" | grep "procpcpu" >/dev/null; then
+ AC_DEFINE(PS_USES_PROCPCPU,"yes",
+ [Whether the ps utility uses the "procpcpu" field])
+ fi
fi
AC_PATH_PROG(PATH_TO_PING,ping)
@@ -1060,6 +1110,14 @@ then
ac_cv_ping_packets_first=yes
AC_MSG_RESULT([$with_ping_command])
+elif $PATH_TO_PING -4 -n -U -w 10 -c 1 127.0.0.1 2>/dev/null | \
+ egrep -i "^round-trip|^rtt" >/dev/null
+then
+ # check if -4 is supported - issue #1550
+ with_ping_command="$PATH_TO_PING -4 -n -U -w %d -c %d %s"
+ ac_cv_ping_packets_first=yes
+ ac_cv_ping_has_timeout=yes
+ AC_MSG_RESULT([$with_ping_command])
elif $PATH_TO_PING -n -U -w 10 -c 1 127.0.0.1 2>/dev/null | \
egrep -i "^round-trip|^rtt" >/dev/null
then
@@ -1882,4 +1940,5 @@ ACX_FEATURE([enable],[perl-modules])
ACX_FEATURE([with],[cgiurl])
ACX_FEATURE([with],[trusted-path])
ACX_FEATURE([enable],[libtap])
-
+ACX_FEATURE([with],[libcurl])
+ACX_FEATURE([with],[uriparser])
diff --git a/doc/RELEASING.md b/doc/RELEASING.md
index 1f9db078..bcd2c5ac 100644
--- a/doc/RELEASING.md
+++ b/doc/RELEASING.md
@@ -11,14 +11,14 @@ Before you start
- Check Travis CI status.
- Update local Git repository to the current `master` tip. For a
- maintenance release (e.g., version 2.2.2), update to the current
- `maint-2.2` tip, instead.
+ maintenance release (e.g., version 2.3.2), update to the current
+ `maint-2.3` tip, instead.
Prepare and commit files
------------------------
- Update `configure.ac` and `NP-VERSION-GEN` with new version.
-- Update `NEWS` from `git log --reverse v2.2.1..` output, and specify
+- Update `NEWS` from `git log --reverse v2.3.1..` output, and specify
the release version/date.
- Update `AUTHORS` if there are new team members.
- Update `THANKS.in` using `tools/update-thanks`.
@@ -29,27 +29,27 @@ Prepare and commit files
Create annotated tag
--------------------
- git tag -a -m 'Monitoring Plugins 2.3' v2.3
+ git tag -a -m 'Monitoring Plugins 2.4' v2.4
Push the code and tag to GitHub
-------------------------------
git push monitoring-plugins master
- git push monitoring-plugins v2.3
+ git push monitoring-plugins v2.4
Create new maintenance branch
-----------------------------
_Only necessary when creating a feature release._
- git checkout -b maint-2.3 v2.3
- git push -u monitoring-plugins maint-2.3
+ git checkout -b maint-2.4 v2.4
+ git push -u monitoring-plugins maint-2.4
Checkout new version
--------------------
rm -rf /tmp/plugins
- git archive --prefix=tmp/plugins/ v2.3 | (cd /; tar -xf -)
+ git archive --prefix=tmp/plugins/ v2.4 | (cd /; tar -xf -)
Build the tarball
-----------------
@@ -62,26 +62,26 @@ Build the tarball
Upload tarball to web site
--------------------------
- scp monitoring-plugins-2.3.tar.gz \
+ scp monitoring-plugins-2.4.tar.gz \
plugins@orwell.monitoring-plugins.org:web/download/
Generate SHA1 checksum file on web site
---------------------------------------
ssh plugins@orwell.monitoring-plugins.org \
- '(cd web/download; $HOME/bin/create-checksum monitoring-plugins-2.3.tar.gz)'
+ '(cd web/download; $HOME/bin/create-checksum monitoring-plugins-2.4.tar.gz)'
Announce new release
--------------------
- In the site.git repository:
- - Create `web/input/news/release-2-3.md`.
+ - Create `web/input/news/release-2-4.md`.
- Update the `plugins_release` version in `web/macros.py`.
- Commit and push the result:
- git add web/input/news/release-2-3.md
- git commit web/input/news/release-2-3.md web/macros.py
+ git add web/input/news/release-2-4.md
+ git commit web/input/news/release-2-4.md web/macros.py
git push origin master
- Post an announcement on (at least) the following mailing lists:
@@ -93,6 +93,6 @@ Announce new release
If you want to mention the number of contributors in the announcement:
- git shortlog -s v2.2.1..v2.3 | wc -l
+ git shortlog -s v2.3.1..v2.4 | wc -l
<!-- vim:set filetype=markdown textwidth=72: -->
diff --git a/gl/fsusage.c b/gl/fsusage.c
index 0657555f..6103ecf3 100644
--- a/gl/fsusage.c
+++ b/gl/fsusage.c
@@ -143,6 +143,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp)
fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (vfsd.f_bavail) != 0;
fsp->fsu_files = PROPAGATE_ALL_ONES (vfsd.f_files);
fsp->fsu_ffree = PROPAGATE_ALL_ONES (vfsd.f_ffree);
+ fsp->fsu_favail = PROPAGATE_ALL_ONES (vfsd.f_favail);
return 0;
}
@@ -174,6 +175,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp)
fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (fsd.fd_req.bfreen) != 0;
fsp->fsu_files = PROPAGATE_ALL_ONES (fsd.fd_req.gtot);
fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.fd_req.gfree);
+ fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.fd_req.gfree);
#elif defined STAT_READ_FILSYS /* SVR2 */
# ifndef SUPERBOFF
@@ -209,6 +211,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp)
? UINTMAX_MAX
: (fsd.s_isize - 2) * INOPB * (fsd.s_type == Fs2b ? 2 : 1));
fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.s_tinode);
+ fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.s_tinode);
#elif defined STAT_STATFS3_OSF1 /* OSF/1 */
@@ -296,6 +299,7 @@ get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp)
fsp->fsu_bavail_top_bit_set = EXTRACT_TOP_BIT (fsd.f_bavail) != 0;
fsp->fsu_files = PROPAGATE_ALL_ONES (fsd.f_files);
fsp->fsu_ffree = PROPAGATE_ALL_ONES (fsd.f_ffree);
+ fsp->fsu_favail = PROPAGATE_ALL_ONES (fsd.f_ffree);
#endif
@@ -323,6 +327,7 @@ statfs (char *file, struct statfs *fsb)
fsb->f_bavail = fsd.du_tfree;
fsb->f_files = (fsd.du_isize - 2) * fsd.du_inopb;
fsb->f_ffree = fsd.du_tinode;
+ fsb->f_favail = fsd.du_tinode;
fsb->f_fsid.val[0] = fsd.du_site;
fsb->f_fsid.val[1] = fsd.du_pckno;
return 0;
diff --git a/gl/fsusage.h b/gl/fsusage.h
index 7810fc01..e2654fd8 100644
--- a/gl/fsusage.h
+++ b/gl/fsusage.h
@@ -32,7 +32,8 @@ struct fs_usage
uintmax_t fsu_bavail; /* Free blocks available to non-superuser. */
bool fsu_bavail_top_bit_set; /* 1 if fsu_bavail represents a value < 0. */
uintmax_t fsu_files; /* Total file nodes. */
- uintmax_t fsu_ffree; /* Free file nodes. */
+ uintmax_t fsu_ffree; /* Free file nodes to superuser. */
+ uintmax_t fsu_favail; /* Free file nodes to non-superuser. */
};
int get_fs_usage (char const *file, char const *disk, struct fs_usage *fsp);
diff --git a/lib/utils_base.c b/lib/utils_base.c
index 3822bcf1..08fa215c 100644
--- a/lib/utils_base.c
+++ b/lib/utils_base.c
@@ -37,6 +37,9 @@
monitoring_plugin *this_monitoring_plugin=NULL;
+unsigned int timeout_state = STATE_CRITICAL;
+unsigned int timeout_interval = DEFAULT_SOCKET_TIMEOUT;
+
int _np_state_read_file(FILE *);
void np_init( char *plugin_name, int argc, char **argv ) {
@@ -87,10 +90,13 @@ void _get_monitoring_plugin( monitoring_plugin **pointer ){
void
die (int result, const char *fmt, ...)
{
- va_list ap;
- va_start (ap, fmt);
- vprintf (fmt, ap);
- va_end (ap);
+ if(fmt!=NULL) {
+ va_list ap;
+ va_start (ap, fmt);
+ vprintf (fmt, ap);
+ va_end (ap);
+ }
+
if(this_monitoring_plugin!=NULL) {
np_cleanup();
}
@@ -122,6 +128,7 @@ range
temp_range->end = 0;
temp_range->end_infinity = TRUE;
temp_range->alert_on = OUTSIDE;
+ temp_range->text = strdup(str);
if (str[0] == '@') {
temp_range->alert_on = INSIDE;
@@ -356,6 +363,22 @@ char *np_extract_value(const char *varlist, const char *name, char sep) {
return value;
}
+const char *
+state_text (int result)
+{
+ switch (result) {
+ case STATE_OK:
+ return "OK";
+ case STATE_WARNING:
+ return "WARNING";
+ case STATE_CRITICAL:
+ return "CRITICAL";
+ case STATE_DEPENDENT:
+ return "DEPENDENT";
+ default:
+ return "UNKNOWN";
+ }
+}
/*
* Read a string representing a state (ok, warning... or numeric: 0, 1) and
@@ -684,4 +707,3 @@ void np_state_write_string(time_t data_time, char *data_string) {
np_free(temp_file);
}
-
diff --git a/lib/utils_base.h b/lib/utils_base.h
index 42ae0c09..9482f23b 100644
--- a/lib/utils_base.h
+++ b/lib/utils_base.h
@@ -23,6 +23,7 @@ typedef struct range_struct {
double end;
int end_infinity;
int alert_on; /* OUTSIDE (default) or INSIDE */
+ char* text; /* original unparsed text input */
} range;
typedef struct thresholds_struct {
@@ -61,6 +62,10 @@ void print_thresholds(const char *, thresholds *);
int check_range(double, range *);
int get_status(double, thresholds *);
+/* Handle timeouts */
+extern unsigned int timeout_state;
+extern unsigned int timeout_interval;
+
/* All possible characters in a threshold range */
#define NP_THRESHOLDS_CHARS "-0123456789.:@~"
@@ -107,5 +112,6 @@ void np_state_write_string(time_t, char *);
void np_init(char *, int argc, char **argv);
void np_set_args(int argc, char **argv);
void np_cleanup();
+const char *state_text (int);
#endif /* _UTILS_BASE_ */
diff --git a/lib/utils_cmd.c b/lib/utils_cmd.c
index 7eb9a3a0..795840d3 100644
--- a/lib/utils_cmd.c
+++ b/lib/utils_cmd.c
@@ -40,6 +40,7 @@
/** includes **/
#include "common.h"
+#include "utils.h"
#include "utils_cmd.h"
#include "utils_base.h"
#include <fcntl.h>
@@ -65,31 +66,6 @@ extern char **environ;
# define SIG_ERR ((Sigfunc *)-1)
#endif
-/* This variable must be global, since there's no way the caller
- * can forcibly slay a dead or ungainly running program otherwise.
- * Multithreading apps and plugins can initialize it (via CMD_INIT)
- * in an async safe manner PRIOR to calling cmd_run() or cmd_run_array()
- * for the first time.
- *
- * The check for initialized values is atomic and can
- * occur in any number of threads simultaneously. */
-static pid_t *_cmd_pids = NULL;
-
-/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX.
- * If that fails and the macro isn't defined, we fall back to an educated
- * guess. There's no guarantee that our guess is adequate and the program
- * will die with SIGSEGV if it isn't and the upper boundary is breached. */
-#define DEFAULT_MAXFD 256 /* fallback value if no max open files value is set */
-#define MAXFD_LIMIT 8192 /* upper limit of open files */
-#ifdef _SC_OPEN_MAX
-static long maxfd = 0;
-#elif defined(OPEN_MAX)
-# define maxfd OPEN_MAX
-#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */
-# define maxfd DEFAULT_MAXFD
-#endif
-
-
/** prototypes **/
static int _cmd_open (char *const *, int *, int *)
__attribute__ ((__nonnull__ (1, 2, 3)));
@@ -406,3 +382,19 @@ cmd_file_read ( char *filename, output *out, int flags)
return 0;
}
+
+void
+timeout_alarm_handler (int signo)
+{
+ size_t i;
+ if (signo == SIGALRM) {
+ printf (_("%s - Plugin timed out after %d seconds\n"),
+ state_text(timeout_state), timeout_interval);
+
+ if(_cmd_pids) for(i = 0; i < maxfd; i++) {
+ if(_cmd_pids[i] != 0) kill(_cmd_pids[i], SIGKILL);
+ }
+
+ exit (timeout_state);
+ }
+}
diff --git a/lib/utils_cmd.h b/lib/utils_cmd.h
index ebaf15be..6f3aeb81 100644
--- a/lib/utils_cmd.h
+++ b/lib/utils_cmd.h
@@ -32,4 +32,17 @@ void cmd_init (void);
#define CMD_NO_ARRAYS 0x01 /* don't populate arrays at all */
#define CMD_NO_ASSOC 0x02 /* output.line won't point to buf */
+/* This variable must be global, since there's no way the caller
+ * can forcibly slay a dead or ungainly running program otherwise.
+ * Multithreading apps and plugins can initialize it (via CMD_INIT)
+ * in an async safe manner PRIOR to calling cmd_run() or cmd_run_array()
+ * for the first time.
+ *
+ * The check for initialized values is atomic and can
+ * occur in any number of threads simultaneously. */
+static pid_t *_cmd_pids = NULL;
+
+RETSIGTYPE timeout_alarm_handler (int);
+
+
#endif /* _UTILS_CMD_ */
diff --git a/lib/utils_disk.c b/lib/utils_disk.c
index efe35fc5..c7c9126e 100644
--- a/lib/utils_disk.c
+++ b/lib/utils_disk.c
@@ -69,6 +69,8 @@ np_add_parameter(struct parameter_list **list, const char *name)
new_path->dtotal_units = 0;
new_path->inodes_total = 0;
new_path->inodes_free = 0;
+ new_path->inodes_free_to_root = 0;
+ new_path->inodes_used = 0;
new_path->dused_inodes_percent = 0;
new_path->dfree_inodes_percent = 0;
diff --git a/lib/utils_disk.h b/lib/utils_disk.h
index 83a37639..bf52e4ce 100644
--- a/lib/utils_disk.h
+++ b/lib/utils_disk.h
@@ -24,9 +24,10 @@ struct parameter_list
char *group;
struct mount_entry *best_match;
struct parameter_list *name_next;
- uintmax_t total, available, available_to_root, used, inodes_free, inodes_total;
+ uintmax_t total, available, available_to_root, used,
+ inodes_free, inodes_free_to_root, inodes_used, inodes_total;
double dfree_pct, dused_pct;
- double dused_units, dfree_units, dtotal_units;
+ uint64_t dused_units, dfree_units, dtotal_units;
double dused_inodes_percent, dfree_inodes_percent;
};
diff --git a/m4/libcurl.m4 b/m4/libcurl.m4
new file mode 100644
index 00000000..53d694d0
--- /dev/null
+++ b/m4/libcurl.m4
@@ -0,0 +1,272 @@
+#***************************************************************************
+# _ _ ____ _
+# Project ___| | | | _ \| |
+# / __| | | | |_) | |
+# | (__| |_| | _ <| |___
+# \___|\___/|_| \_\_____|
+#
+# Copyright (C) 2006, David Shaw <dshaw@jabberwocky.com>
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at https://curl.haxx.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+###########################################################################
+# LIBCURL_CHECK_CONFIG ([DEFAULT-ACTION], [MINIMUM-VERSION],
+# [ACTION-IF-YES], [ACTION-IF-NO])
+# ----------------------------------------------------------
+# David Shaw <dshaw@jabberwocky.com> May-09-2006
+#
+# Checks for libcurl. DEFAULT-ACTION is the string yes or no to
+# specify whether to default to --with-libcurl or --without-libcurl.
+# If not supplied, DEFAULT-ACTION is yes. MINIMUM-VERSION is the
+# minimum version of libcurl to accept. Pass the version as a regular
+# version number like 7.10.1. If not supplied, any version is
+# accepted. ACTION-IF-YES is a list of shell commands to run if
+# libcurl was successfully found and passed the various tests.
+# ACTION-IF-NO is a list of shell commands that are run otherwise.
+# Note that using --without-libcurl does run ACTION-IF-NO.
+#
+# This macro #defines HAVE_LIBCURL if a working libcurl setup is
+# found, and sets @LIBCURL@ and @LIBCURL_CPPFLAGS@ to the necessary
+# values. Other useful defines are LIBCURL_FEATURE_xxx where xxx are
+# the various features supported by libcurl, and LIBCURL_PROTOCOL_yyy
+# where yyy are the various protocols supported by libcurl. Both xxx
+# and yyy are capitalized. See the list of AH_TEMPLATEs at the top of
+# the macro for the complete list of possible defines. Shell
+# variables $libcurl_feature_xxx and $libcurl_protocol_yyy are also
+# defined to 'yes' for those features and protocols that were found.
+# Note that xxx and yyy keep the same capitalization as in the
+# curl-config list (e.g. it's "HTTP" and not "http").
+#
+# Users may override the detected values by doing something like:
+# LIBCURL="-lcurl" LIBCURL_CPPFLAGS="-I/usr/myinclude" ./configure
+#
+# For the sake of sanity, this macro assumes that any libcurl that is
+# found is after version 7.7.2, the first version that included the
+# curl-config script. Note that it is very important for people
+# packaging binary versions of libcurl to include this script!
+# Without curl-config, we can only guess what protocols are available,
+# or use curl_version_info to figure it out at runtime.
+
+AC_DEFUN([LIBCURL_CHECK_CONFIG],
+[
+ AH_TEMPLATE([LIBCURL_FEATURE_SSL],[Defined if libcurl supports SSL])
+ AH_TEMPLATE([LIBCURL_FEATURE_KRB4],[Defined if libcurl supports KRB4])
+ AH_TEMPLATE([LIBCURL_FEATURE_IPV6],[Defined if libcurl supports IPv6])
+ AH_TEMPLATE([LIBCURL_FEATURE_LIBZ],[Defined if libcurl supports libz])
+ AH_TEMPLATE([LIBCURL_FEATURE_ASYNCHDNS],[Defined if libcurl supports AsynchDNS])
+ AH_TEMPLATE([LIBCURL_FEATURE_IDN],[Defined if libcurl supports IDN])
+ AH_TEMPLATE([LIBCURL_FEATURE_SSPI],[Defined if libcurl supports SSPI])
+ AH_TEMPLATE([LIBCURL_FEATURE_NTLM],[Defined if libcurl supports NTLM])
+
+ AH_TEMPLATE([LIBCURL_PROTOCOL_HTTP],[Defined if libcurl supports HTTP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_HTTPS],[Defined if libcurl supports HTTPS])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_FTP],[Defined if libcurl supports FTP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_FTPS],[Defined if libcurl supports FTPS])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_FILE],[Defined if libcurl supports FILE])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_TELNET],[Defined if libcurl supports TELNET])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_LDAP],[Defined if libcurl supports LDAP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_DICT],[Defined if libcurl supports DICT])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_TFTP],[Defined if libcurl supports TFTP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_RTSP],[Defined if libcurl supports RTSP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_POP3],[Defined if libcurl supports POP3])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_IMAP],[Defined if libcurl supports IMAP])
+ AH_TEMPLATE([LIBCURL_PROTOCOL_SMTP],[Defined if libcurl supports SMTP])
+
+ AC_ARG_WITH(libcurl,
+ AS_HELP_STRING([--with-libcurl=PREFIX],[look for the curl library in PREFIX/lib and headers in PREFIX/include]),
+ [_libcurl_with=$withval],[_libcurl_with=ifelse([$1],,[yes],[$1])])
+
+ if test "$_libcurl_with" != "no" ; then
+
+ AC_PROG_AWK
+
+ _libcurl_version_parse="eval $AWK '{split(\$NF,A,\".\"); X=256*256*A[[1]]+256*A[[2]]+A[[3]]; print X;}'"
+
+ _libcurl_try_link=yes
+
+ if test -d "$_libcurl_with" ; then
+ LIBCURL_CPPFLAGS="-I$withval/include"
+ _libcurl_ldflags="-L$withval/lib"
+ AC_PATH_PROG([_libcurl_config],[curl-config],[],
+ ["$withval/bin"])
+ else
+ AC_PATH_PROG([_libcurl_config],[curl-config],[],[$PATH])
+ fi
+
+ if test x$_libcurl_config != "x" ; then
+ AC_CACHE_CHECK([for the version of libcurl],
+ [libcurl_cv_lib_curl_version],
+ [libcurl_cv_lib_curl_version=`$_libcurl_config --version | $AWK '{print $[]2}'`])
+
+ _libcurl_version=`echo $libcurl_cv_lib_curl_version | $_libcurl_version_parse`
+ _libcurl_wanted=`echo ifelse([$2],,[0],[$2]) | $_libcurl_version_parse`
+
+ if test $_libcurl_wanted -gt 0 ; then
+ AC_CACHE_CHECK([for libcurl >= version $2],
+ [libcurl_cv_lib_version_ok],
+ [
+ if test $_libcurl_version -ge $_libcurl_wanted ; then
+ libcurl_cv_lib_version_ok=yes
+ else
+ libcurl_cv_lib_version_ok=no
+ fi
+ ])
+ fi
+
+ if test $_libcurl_wanted -eq 0 || test x$libcurl_cv_lib_version_ok = xyes ; then
+ if test x"$LIBCURL_CPPFLAGS" = "x" ; then
+ LIBCURL_CPPFLAGS=`$_libcurl_config --cflags`
+ fi
+ if test x"$LIBCURL" = "x" ; then
+ LIBCURL=`$_libcurl_config --libs`
+
+ # This is so silly, but Apple actually has a bug in their
+ # curl-config script. Fixed in Tiger, but there are still
+ # lots of Panther installs around.
+ case "${host}" in
+ powerpc-apple-darwin7*)
+ LIBCURL=`echo $LIBCURL | sed -e 's|-arch i386||g'`
+ ;;
+ esac
+ fi
+
+ # All curl-config scripts support --feature
+ _libcurl_features=`$_libcurl_config --feature`
+
+ # Is it modern enough to have --protocols? (7.12.4)
+ if test $_libcurl_version -ge 461828 ; then
+ _libcurl_protocols=`$_libcurl_config --protocols`
+ fi
+ else
+ _libcurl_try_link=no
+ fi
+
+ unset _libcurl_wanted
+ fi
+
+ if test $_libcurl_try_link = yes ; then
+
+ # we didn't find curl-config, so let's see if the user-supplied
+ # link line (or failing that, "-lcurl") is enough.
+ LIBCURL=${LIBCURL-"$_libcurl_ldflags -lcurl"}
+
+ AC_CACHE_CHECK([whether libcurl is usable],
+ [libcurl_cv_lib_curl_usable],
+ [
+ _libcurl_save_cppflags=$CPPFLAGS
+ CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
+ _libcurl_save_libs=$LIBS
+ LIBS="$LIBCURL $LIBS"
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <curl/curl.h>]],[[
+/* Try and use a few common options to force a failure if we are
+ missing symbols or can't link. */
+int x;
+curl_easy_setopt(NULL,CURLOPT_URL,NULL);
+x=CURL_ERROR_SIZE;
+x=CURLOPT_WRITEFUNCTION;
+x=CURLOPT_WRITEDATA;
+x=CURLOPT_ERRORBUFFER;
+x=CURLOPT_STDERR;
+x=CURLOPT_VERBOSE;
+if (x) {;}
+]])],libcurl_cv_lib_curl_usable=yes,libcurl_cv_lib_curl_usable=no)
+
+ CPPFLAGS=$_libcurl_save_cppflags
+ LIBS=$_libcurl_save_libs
+ unset _libcurl_save_cppflags
+ unset _libcurl_save_libs
+ ])
+
+ if test $libcurl_cv_lib_curl_usable = yes ; then
+
+ # Does curl_free() exist in this version of libcurl?
+ # If not, fake it with free()
+
+ _libcurl_save_cppflags=$CPPFLAGS
+ CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+ _libcurl_save_libs=$LIBS
+ LIBS="$LIBS $LIBCURL"
+
+ AC_CHECK_FUNC(curl_free,,
+ AC_DEFINE(curl_free,free,
+ [Define curl_free() as free() if our version of curl lacks curl_free.]))
+
+ CPPFLAGS=$_libcurl_save_cppflags
+ LIBS=$_libcurl_save_libs
+ unset _libcurl_save_cppflags
+ unset _libcurl_save_libs
+
+ AC_DEFINE(HAVE_LIBCURL,1,
+ [Define to 1 if you have a functional curl library.])
+ AC_SUBST(LIBCURL_CPPFLAGS)
+ AC_SUBST(LIBCURL)
+
+ for _libcurl_feature in $_libcurl_features ; do
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(libcurl_feature_$_libcurl_feature),[1])
+ eval AS_TR_SH(libcurl_feature_$_libcurl_feature)=yes
+ done
+
+ if test "x$_libcurl_protocols" = "x" ; then
+
+ # We don't have --protocols, so just assume that all
+ # protocols are available
+ _libcurl_protocols="HTTP FTP FILE TELNET LDAP DICT TFTP"
+
+ if test x$libcurl_feature_SSL = xyes ; then
+ _libcurl_protocols="$_libcurl_protocols HTTPS"
+
+ # FTPS wasn't standards-compliant until version
+ # 7.11.0 (0x070b00 == 461568)
+ if test $_libcurl_version -ge 461568; then
+ _libcurl_protocols="$_libcurl_protocols FTPS"
+ fi
+ fi
+
+ # RTSP, IMAP, POP3 and SMTP were added in
+ # 7.20.0 (0x071400 == 463872)
+ if test $_libcurl_version -ge 463872; then
+ _libcurl_protocols="$_libcurl_protocols RTSP IMAP POP3 SMTP"
+ fi
+ fi
+
+ for _libcurl_protocol in $_libcurl_protocols ; do
+ AC_DEFINE_UNQUOTED(AS_TR_CPP(libcurl_protocol_$_libcurl_protocol),[1])
+ eval AS_TR_SH(libcurl_protocol_$_libcurl_protocol)=yes
+ done
+ else
+ unset LIBCURL
+ unset LIBCURL_CPPFLAGS
+ fi
+ fi
+
+ unset _libcurl_try_link
+ unset _libcurl_version_parse
+ unset _libcurl_config
+ unset _libcurl_feature
+ unset _libcurl_features
+ unset _libcurl_protocol
+ unset _libcurl_protocols
+ unset _libcurl_version
+ unset _libcurl_ldflags
+ fi
+
+ if test x$_libcurl_with = xno || test x$libcurl_cv_lib_curl_usable != xyes ; then
+ # This is the IF-NO path
+ ifelse([$4],,:,[$4])
+ else
+ # This is the IF-YES path
+ ifelse([$3],,:,[$3])
+ fi
+
+ unset _libcurl_with
+])dnl
diff --git a/m4/np_mysqlclient.m4 b/m4/np_mysqlclient.m4
index c2a4d2a7..5099a02b 100644
--- a/m4/np_mysqlclient.m4
+++ b/m4/np_mysqlclient.m4
@@ -53,18 +53,34 @@ AC_DEFUN([np_mysqlclient],
_savedcppflags="$CPPFLAGS"
CPPFLAGS="$CPPFLAGS $np_mysql_include"
- dnl Putting $np_mysql_libs as other libraries ensures that all mysql dependencies are linked in
- dnl Although -lmysqlclient is duplicated, it is not a problem
- AC_CHECK_LIB([mysqlclient], [mysql_init], [
- with_mysql=$np_mysql_config
- AC_DEFINE(HAVE_MYSQLCLIENT, 1, [Defined if mysqlclient is found and can compile])
- ], [with_mysql=no], [$np_mysql_libs])
+ np_check_lib_mysqlclient
+
CPPFLAGS=$_savedcppflags
fi
fi
])
+dnl Test mysql_init using mysqlclient
+AC_DEFUN([np_check_lib_mysqlclient],
+[
+ dnl Putting $np_mysql_libs as other libraries ensures that all mysql dependencies are linked in
+ dnl Although -lmysqlclient is duplicated, it is not a problem
+ AC_CHECK_LIB([mysqlclient], [mysql_init], [
+ with_mysql=$np_mysql_config
+ AC_DEFINE(HAVE_MYSQLCLIENT, 1, [Defined if mysqlclient is found and can compile])
+ ], [np_check_lib_mariadbclient], [$np_mysql_libs])
+])
+
+dnl Test mysql_init using mariadbclient
+AC_DEFUN([np_check_lib_mariadbclient],
+[
+ AC_CHECK_LIB([mariadbclient], [mysql_init], [
+ with_mysql=$np_mysql_config
+ AC_DEFINE(HAVE_MYSQLCLIENT, 1, [Defined if mariadbclient is found and can compile])
+ ], [with_mysql=no], [$np_mysql_libs])
+])
+
dnl Will take $1, find last occurrance of -LDIR and add DIR to LD_RUN_PATH
AC_DEFUN([np_add_to_runpath],
[
diff --git a/m4/uriparser.m4 b/m4/uriparser.m4
new file mode 100644
index 00000000..dbb8a551
--- /dev/null
+++ b/m4/uriparser.m4
@@ -0,0 +1,140 @@
+# (this check is rougly based on and inspired libcurl.m4)
+# URIPARSER_CHECK ([DEFAULT-ACTION], [MINIMUM-VERSION],
+# [ACTION-IF-YES], [ACTION-IF-NO])
+# Checks for uriparser library. DEFAULT-ACTION is the string yes or no to
+# specify whether to default to --with-uriparser or --without-liburiparser.
+# If not supplied, DEFAULT-ACTION is yes. MINIMUM-VERSION is the
+# minimum version of uriparser to accept. Pass the version as a regular
+# version number like 0.8.5. If not supplied, any version is
+# accepted. ACTION-IF-YES is a list of shell commands to run if
+# uriparser was successfully found and passed the various tests.
+# ACTION-IF-NO is a list of shell commands that are run otherwise.
+# Note that using --without-uriparser does run ACTION-IF-NO.
+#
+# This macro #defines HAVE_URIPARSER if a working uriparser setup is
+# found, and sets @URIPARSER@ and @URIPARSER_CPPFLAGS@ to the necessary
+# values.
+#
+# Users may override the detected values by doing something like:
+# URIPARSER="-luriparser" URIPARSER_CPPFLAGS="-I/usr/myinclude" ./configure
+#
+
+AC_DEFUN([URIPARSER_CHECK],
+[
+ AC_ARG_WITH(uriparser,
+ AS_HELP_STRING([--with-uriparser=PREFIX],[look for the uriparser library in PREFIX/lib and headers in PREFIX/include]),
+ [_uriparser_with=$withval],[_uriparser_with=ifelse([$1],,[yes],[$1])])
+
+ if test "$_uriparser_with" != "no" ; then
+
+ _uriparser_try_link=yes
+
+ AC_CHECK_PROG(PKGCONFIG,pkg-config,pkg-config,no)
+
+ if test "x$URIPARSER" != "x" || test "x$URIPARSER_CPPFLAGS" != "x"; then
+ :
+ elif test -d "$_uriparser_with" ; then
+ URIPARSER_CPPFLAGS="-I$withval/include"
+ _uriparser_ldflags="-L$withval/lib"
+
+ elif test x$PKGCONFIG != xno; then
+
+ AC_CACHE_CHECK([for the version of uriparser],
+ [uriparser_cv_uriparser_version],
+ [uriparser_cv_uriparser_version=`$PKGCONFIG liburiparser --modversion`])
+
+ AC_PROG_AWK
+
+ _uriparser_version_parse="eval $AWK '{split(\$NF,A,\".\"); X=256*256*A[[1]]+256*A[[2]]+A[[3]]; print X;}'"
+
+ _uriparser_version=`echo $uriparser_cv_uriparser_version | $_uriparser_version_parse`
+ _uriparser_wanted=`echo ifelse([$2],,[0],[$2]) | $_uriparser_version_parse`
+
+ if test $_uriparser_wanted -gt 0 ; then
+ AC_CACHE_CHECK([for uriparser >= version $2],
+ [uriparser_cv_lib_version_ok],
+ [
+ if test $_uriparser_version -ge $_uriparser_wanted ; then
+ uriparser_cv_lib_version_ok=yes
+ else
+ uriparser_cv_lib_version_ok=no
+ fi
+ ])
+ fi
+
+ if test $_uriparser_wanted -eq 0 || test x$uriparser_cv_lib_version_ok = xyes ; then
+ if test x"$URIPARSER_CPPFLAGS" = "x" ; then
+ URIPARSER_CPPFLAGS=`$PKGCONFIG liburiparser --cflags`
+ fi
+ if test x"$URIPARSER" = "x" ; then
+ URIPARSER=`$PKGCONFIG liburiparser --libs`
+ fi
+ else
+ _uriparser_try_link=no
+ fi
+
+ unset _uriparser_wanted
+ else
+ dnl no pkg-config, ok, do our best and set some defaults
+ URIPARSER_CPPFLAGS="-I/usr/include"
+ URIPARSER="-luriparser -L/usr/lib -L/usr/lib64 -L/usr/lib/x86_64-linux-gnu -L/usr/lib/i686-linux-gnu"
+ fi
+
+ if test $_uriparser_try_link = yes ; then
+
+ # let's see if the user-supplied
+ # link line (or failing that, "-luriparser") is enough.
+ URIPARSER=${URIPARSER-"$_uriparser_ldflags -luriparser"}
+
+ AC_CACHE_CHECK([whether uriparser is usable],
+ [uriparser_cv_lib_uriparser_usable],
+ [
+ _liburiparser_save_cppflags=$CPPFLAGS
+ CPPFLAGS="$URIPARSER_CPPFLAGS $CPPFLAGS"
+ _liburiparser_save_libs=$LIBS
+ LIBS="$URIPARSER $LIBS"
+
+ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <uriparser/Uri.h>]],[[
+/* Try and use a few common options to force a failure if we are
+ missing symbols or cannot link. */
+UriParserStateA state;
+UriUriA uri;
+state.uri = &uri;
+char *location = "http://test.dom/dir/file.ext";
+int x = uriParseUriA (&state, location);
+if (x == URI_SUCCESS) {;}
+]])],uriparser_cv_lib_uriparser_usable=yes,uriparser_cv_lib_uriparser_usable=no)
+
+ CPPFLAGS=$_liburiparser_save_cppflags
+ LIBS=$_liburiparser_save_libs
+ unset _liburiparser_save_cppflags
+ unset _liburiparser_save_libs
+ ])
+
+ if test $uriparser_cv_lib_uriparser_usable = yes ; then
+ AC_DEFINE(HAVE_URIPARSER,1,
+ [Define to 1 if you have a functional uriparser library.])
+ AC_SUBST(URIPARSER_CPPFLAGS)
+ AC_SUBST(URIPARSER)
+ else
+ unset URIPARSER
+ unset URIPARSER_CPPFLAGS
+ fi
+ fi
+
+ unset _uriparser_try_link
+ unset _uriparser_version_parse
+ unset _uriparser_version
+ unset _uriparser_ldflags
+ fi
+
+ if test x$_uriparser_with = xno || test x$uriparser_cv_lib_uriparser_usable != xyes ; then
+ # This is the IF-NO path
+ ifelse([$4],,:,[$4])
+ else
+ # This is the IF-YES path
+ ifelse([$3],,:,[$3])
+ fi
+
+ unset _uriparser_with
+])dnl
diff --git a/plugins-root/Makefile.am b/plugins-root/Makefile.am
index a1ebb6d2..7cd2675a 100644
--- a/plugins-root/Makefile.am
+++ b/plugins-root/Makefile.am
@@ -37,6 +37,9 @@ TESTS = @PLUGIN_TEST@
test:
perl -I $(top_builddir) -I $(top_srcdir) ../test.pl
+test-debug:
+ NPTEST_DEBUG=1 HARNESS_VERBOSE=1 perl -I $(top_builddir) -I $(top_srcdir) ../test.pl
+
setuid_root_mode = ug=rx,u+s
# /* Author Coreutils team - see ACKNOWLEDGEMENTS */
diff --git a/plugins-root/check_dhcp.c b/plugins-root/check_dhcp.c
index f4c2dafd..ad673237 100644
--- a/plugins-root/check_dhcp.c
+++ b/plugins-root/check_dhcp.c
@@ -323,7 +323,8 @@ int get_hardware_address(int sock,char *interface_name){
#elif defined(__bsd__)
/* King 2004 see ACKNOWLEDGEMENTS */
- int mib[6], len;
+ size_t len;
+ int mib[6];
char *buf;
unsigned char *ptr;
struct if_msghdr *ifm;
diff --git a/plugins-root/check_icmp.c b/plugins-root/check_icmp.c
index 9ed12ba1..01ae174a 100644
--- a/plugins-root/check_icmp.c
+++ b/plugins-root/check_icmp.c
@@ -67,7 +67,9 @@ const char *email = "devel@monitoring-plugins.org";
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
+#include <netinet/ip6.h>
#include <netinet/ip_icmp.h>
+#include <netinet/icmp6.h>
#include <arpa/inet.h>
#include <signal.h>
#include <float.h>
@@ -113,8 +115,8 @@ typedef struct rta_host {
unsigned short id; /* id in **table, and icmp pkts */
char *name; /* arg used for adding this host */
char *msg; /* icmp error message, if any */
- struct sockaddr_in saddr_in; /* the address of this host */
- struct in_addr error_addr; /* stores address of error replies */
+ struct sockaddr_storage saddr_in; /* the address of this host */
+ struct sockaddr_storage error_addr; /* stores address of error replies */
unsigned long long time_waited; /* total time waited, in usecs */
unsigned int icmp_sent, icmp_recv, icmp_lost; /* counters */
unsigned char icmp_type, icmp_code; /* type and code from errors */
@@ -140,6 +142,18 @@ typedef struct icmp_ping_data {
unsigned short ping_id;
} icmp_ping_data;
+typedef union ip_hdr {
+ struct ip ip;
+ struct ip6_hdr ip6;
+} ip_hdr;
+
+typedef union icmp_packet {
+ void *buf;
+ struct icmp *icp;
+ struct icmp6_hdr *icp6;
+ u_short *cksum_in;
+} icmp_packet;
+
/* the different modes of this program are as follows:
* MODE_RTA: send all packets no matter what (mimic check_icmp and check_ping)
* MODE_HOSTCHECK: Return immediately upon any sign of life
@@ -190,8 +204,9 @@ static int get_threshold(char *str, threshold *th);
static void run_checks(void);
static void set_source_ip(char *);
static int add_target(char *);
-static int add_target_ip(char *, struct in_addr *);
-static int handle_random_icmp(unsigned char *, struct sockaddr_in *);
+static int add_target_ip(char *, struct sockaddr_storage *);
+static int handle_random_icmp(unsigned char *, struct sockaddr_storage *);
+static void parse_address(struct sockaddr_storage *, char *, int);
static unsigned short icmp_checksum(unsigned short *, int);
static void finish(int);
static void crash(const char *, ...);
@@ -300,7 +315,7 @@ get_icmp_error_msg(unsigned char icmp_type, unsigned char icmp_code)
}
static int
-handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr)
+handle_random_icmp(unsigned char *packet, struct sockaddr_storage *addr)
{
struct icmp p, sent_icmp;
struct rta_host *host = NULL;
@@ -342,9 +357,11 @@ handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr)
/* it is indeed a response for us */
host = table[ntohs(sent_icmp.icmp_seq)/packets];
if(debug) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address(addr, address, sizeof(address));
printf("Received \"%s\" from %s for ICMP ECHO sent to %s.\n",
- get_icmp_error_msg(p.icmp_type, p.icmp_code),
- inet_ntoa(addr->sin_addr), host->name);
+ get_icmp_error_msg(p.icmp_type, p.icmp_code),
+ address, host->name);
}
icmp_lost++;
@@ -364,11 +381,23 @@ handle_random_icmp(unsigned char *packet, struct sockaddr_in *addr)
}
host->icmp_type = p.icmp_type;
host->icmp_code = p.icmp_code;
- host->error_addr.s_addr = addr->sin_addr.s_addr;
+ host->error_addr = *addr;
return 0;
}
+void parse_address(struct sockaddr_storage *addr, char *address, int size)
+{
+ switch (address_family) {
+ case AF_INET:
+ inet_ntop(address_family, &((struct sockaddr_in *)addr)->sin_addr, address, size);
+ break;
+ case AF_INET6:
+ inet_ntop(address_family, &((struct sockaddr_in6 *)addr)->sin6_addr, address, size);
+ break;
+ }
+}
+
int
main(int argc, char **argv)
{
@@ -381,6 +410,7 @@ main(int argc, char **argv)
#ifdef SO_TIMESTAMP
int on = 1;
#endif
+ char * opts_str = "vhVw:c:n:p:t:H:s:i:b:I:l:m:64";
setlocale (LC_ALL, "");
bindtextdomain (PACKAGE, LOCALEDIR);
@@ -390,33 +420,8 @@ main(int argc, char **argv)
* that before pointer magic (esp. on network data) */
icmp_sockerrno = udp_sockerrno = tcp_sockerrno = sockets = 0;
- if((icmp_sock = socket(PF_INET, SOCK_RAW, IPPROTO_ICMP)) != -1)
- sockets |= HAVE_ICMP;
- else icmp_sockerrno = errno;
-
- /* if((udp_sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) != -1) */
- /* sockets |= HAVE_UDP; */
- /* else udp_sockerrno = errno; */
-
- /* if((tcp_sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) != -1) */
- /* sockets |= HAVE_TCP; */
- /* else tcp_sockerrno = errno; */
-
- /* now drop privileges (no effect if not setsuid or geteuid() == 0) */
- setuid(getuid());
-
-#ifdef SO_TIMESTAMP
- if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on)))
- if(debug) printf("Warning: no SO_TIMESTAMP support\n");
-#endif // SO_TIMESTAMP
-
- /* POSIXLY_CORRECT might break things, so unset it (the portable way) */
- environ = NULL;
-
- /* use the pid to mark packets as ours */
- /* Some systems have 32-bit pid_t so mask off only 16 bits */
- pid = getpid() & 0xffff;
- /* printf("pid = %u\n", pid); */
+ address_family = -1;
+ int icmp_proto = IPPROTO_ICMP;
/* get calling name the old-fashioned way for portability instead
* of relying on the glibc-ism __progname */
@@ -456,20 +461,35 @@ main(int argc, char **argv)
packets = 5;
}
- /* Parse extra opts if any */
- argv=np_extra_opts(&argc, argv, progname);
-
- /* support "--help" and "--version" */
- if(argc == 2) {
- if(!strcmp(argv[1], "--help"))
- strcpy(argv[1], "-h");
- if(!strcmp(argv[1], "--version"))
- strcpy(argv[1], "-V");
+ /* Parse protocol arguments first */
+ for(i = 1; i < argc; i++) {
+ while((arg = getopt(argc, argv, opts_str)) != EOF) {
+ unsigned short size;
+ switch(arg) {
+ case '4':
+ if (address_family != -1)
+ crash("Multiple protocol versions not supported");
+ address_family = AF_INET;
+ break;
+ case '6':
+#ifdef USE_IPV6
+ if (address_family != -1)
+ crash("Multiple protocol versions not supported");
+ address_family = AF_INET6;
+#else
+ usage (_("IPv6 support not available\n"));
+#endif
+ break;
+ }
+ }
}
+ /* Reset argument scanning */
+ optind = 1;
+
/* parse the arguments */
for(i = 1; i < argc; i++) {
- while((arg = getopt(argc, argv, "vhVw:c:n:p:t:H:s:i:b:I:l:m:")) != EOF) {
+ while((arg = getopt(argc, argv, opts_str)) != EOF) {
unsigned short size;
switch(arg) {
case 'v':
@@ -482,7 +502,7 @@ main(int argc, char **argv)
icmp_data_size = size;
icmp_pkt_size = size + ICMP_MINLEN;
} else
- usage_va("ICMP data length must be between: %d and %d",
+ usage_va("ICMP data length must be between: %lu and %lu",
sizeof(struct icmp) + sizeof(struct icmp_ping_data),
MAX_PING_DATA - 1);
break;
@@ -530,10 +550,30 @@ main(int argc, char **argv)
case 'h': /* help */
print_help ();
exit (STATE_UNKNOWN);
+ break;
}
}
}
+ /* POSIXLY_CORRECT might break things, so unset it (the portable way) */
+ environ = NULL;
+
+ /* use the pid to mark packets as ours */
+ /* Some systems have 32-bit pid_t so mask off only 16 bits */
+ pid = getpid() & 0xffff;
+ /* printf("pid = %u\n", pid); */
+
+ /* Parse extra opts if any */
+ argv=np_extra_opts(&argc, argv, progname);
+
+ /* support "--help" and "--version" */
+ if(argc == 2) {
+ if(!strcmp(argv[1], "--help"))
+ strcpy(argv[1], "-h");
+ if(!strcmp(argv[1], "--version"))
+ strcpy(argv[1], "-V");
+ }
+
argv = &argv[optind];
while(*argv) {
add_target(*argv);
@@ -545,6 +585,30 @@ main(int argc, char **argv)
exit(3);
}
+ // add_target might change address_family
+ switch ( address_family ){
+ case AF_INET: icmp_proto = IPPROTO_ICMP;
+ break;
+ case AF_INET6: icmp_proto = IPPROTO_ICMPV6;
+ break;
+ default: crash("Address family not supported");
+ }
+ if((icmp_sock = socket(address_family, SOCK_RAW, icmp_proto)) != -1)
+ sockets |= HAVE_ICMP;
+ else icmp_sockerrno = errno;
+
+
+#ifdef SO_TIMESTAMP
+ if(setsockopt(icmp_sock, SOL_SOCKET, SO_TIMESTAMP, &on, sizeof(on)))
+ if(debug) printf("Warning: no SO_TIMESTAMP support\n");
+#endif // SO_TIMESTAMP
+
+ /* now drop privileges (no effect if not setsuid or geteuid() == 0) */
+ if (setuid(getuid()) == -1) {
+ printf("ERROR: Failed to drop privileges\n");
+ return 1;
+ }
+
if(!sockets) {
if(icmp_sock == -1) {
errno = icmp_sockerrno;
@@ -608,7 +672,7 @@ main(int argc, char **argv)
if(max_completion_time > (u_int)timeout * 1000000) {
printf("max_completion_time: %llu timeout: %u\n",
max_completion_time, timeout);
- printf("Timout must be at lest %llu\n",
+ printf("Timeout must be at least %llu\n",
max_completion_time / 1000000 + 1);
}
}
@@ -633,7 +697,7 @@ main(int argc, char **argv)
}
host = list;
- table = malloc(sizeof(struct rta_host **) * targets);
+ table = (struct rta_host**)malloc(sizeof(struct rta_host **) * targets);
i = 0;
while(host) {
host->id = i*packets;
@@ -697,9 +761,15 @@ run_checks()
}
}
+
/* response structure:
+ * IPv4:
* ip header : 20 bytes
* icmp header : 28 bytes
+ * IPv6:
+ * ip header : 40 bytes
+ * icmp header : 28 bytes
+ * both:
* icmp echo reply : the rest
*/
static int
@@ -707,16 +777,27 @@ wait_for_reply(int sock, u_int t)
{
int n, hlen;
static unsigned char buf[4096];
- struct sockaddr_in resp_addr;
- struct ip *ip;
- struct icmp icp;
+ struct sockaddr_storage resp_addr;
+ union ip_hdr *ip;
+ union icmp_packet packet;
struct rta_host *host;
struct icmp_ping_data data;
struct timeval wait_start, now;
u_int tdiff, i, per_pkt_wait;
+ if (!(packet.buf = malloc(icmp_pkt_size))) {
+ crash("send_icmp_ping(): failed to malloc %d bytes for send buffer",
+ icmp_pkt_size);
+ return -1; /* might be reached if we're in debug mode */
+ }
+
+ memset(packet.buf, 0, icmp_pkt_size);
+
/* if we can't listen or don't have anything to listen to, just return */
- if(!t || !icmp_pkts_en_route) return 0;
+ if(!t || !icmp_pkts_en_route) {
+ free(packet.buf);
+ return 0;
+ }
gettimeofday(&wait_start, &tz);
@@ -735,7 +816,7 @@ wait_for_reply(int sock, u_int t)
/* reap responses until we hit a timeout */
n = recvfrom_wto(sock, buf, sizeof(buf),
- (struct sockaddr *)&resp_addr, &t, &now);
+ (struct sockaddr *)&resp_addr, &t, &now);
if(!n) {
if(debug > 1) {
printf("recvfrom_wto() timed out during a %u usecs wait\n",
@@ -745,12 +826,23 @@ wait_for_reply(int sock, u_int t)
}
if(n < 0) {
if(debug) printf("recvfrom_wto() returned errors\n");
+ free(packet.buf);
return n;
}
- ip = (struct ip *)buf;
- if(debug > 1) printf("received %u bytes from %s\n",
- ntohs(ip->ip_len), inet_ntoa(resp_addr.sin_addr));
+ // FIXME: with ipv6 we don't have an ip header here
+ if (address_family != AF_INET6) {
+ ip = (union ip_hdr *)buf;
+
+ if(debug > 1) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address(&resp_addr, address, sizeof(address));
+ printf("received %u bytes from %s\n",
+ address_family == AF_INET6 ? ntohs(ip->ip6.ip6_plen)
+ : ntohs(ip->ip.ip_len),
+ address);
+ }
+ }
/* obsolete. alpha on tru64 provides the necessary defines, but isn't broken */
/* #if defined( __alpha__ ) && __STDC__ && !defined( __GLIBC__ ) */
@@ -759,12 +851,14 @@ wait_for_reply(int sock, u_int t)
* off the bottom 4 bits */
/* hlen = (ip->ip_vhl & 0x0f) << 2; */
/* #else */
- hlen = ip->ip_hl << 2;
+ hlen = (address_family == AF_INET6) ? 0 : ip->ip.ip_hl << 2;
/* #endif */
if(n < (hlen + ICMP_MINLEN)) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address(&resp_addr, address, sizeof(address));
crash("received packet too short for ICMP (%d bytes, expected %d) from %s\n",
- n, hlen + icmp_pkt_size, inet_ntoa(resp_addr.sin_addr));
+ n, hlen + icmp_pkt_size, address);
}
/* else if(debug) { */
/* printf("ip header size: %u, packet size: %u (expected %u, %u)\n", */
@@ -773,23 +867,39 @@ wait_for_reply(int sock, u_int t)
/* } */
/* check the response */
- memcpy(&icp, buf + hlen, sizeof(icp));
- if(ntohs(icp.icmp_id) != pid || icp.icmp_type != ICMP_ECHOREPLY ||
- ntohs(icp.icmp_seq) >= targets*packets) {
+ memcpy(packet.buf, buf + hlen, icmp_pkt_size);
+/* address_family == AF_INET6 ? sizeof(struct icmp6_hdr)
+ : sizeof(struct icmp));*/
+
+ if( (address_family == PF_INET &&
+ (ntohs(packet.icp->icmp_id) != pid || packet.icp->icmp_type != ICMP_ECHOREPLY
+ || ntohs(packet.icp->icmp_seq) >= targets * packets))
+ || (address_family == PF_INET6 &&
+ (ntohs(packet.icp6->icmp6_id) != pid || packet.icp6->icmp6_type != ICMP6_ECHO_REPLY
+ || ntohs(packet.icp6->icmp6_seq) >= targets * packets))) {
if(debug > 2) printf("not a proper ICMP_ECHOREPLY\n");
handle_random_icmp(buf + hlen, &resp_addr);
continue;
}
/* this is indeed a valid response */
- memcpy(&data, icp.icmp_data, sizeof(data));
- if (debug > 2)
- printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n",
- (unsigned long)sizeof(data), ntohs(icp.icmp_id),
- ntohs(icp.icmp_seq), icp.icmp_cksum);
+ if (address_family == PF_INET) {
+ memcpy(&data, packet.icp->icmp_data, sizeof(data));
+ if (debug > 2)
+ printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n",
+ (unsigned long)sizeof(data), ntohs(packet.icp->icmp_id),
+ ntohs(packet.icp->icmp_seq), packet.icp->icmp_cksum);
+ host = table[ntohs(packet.icp->icmp_seq)/packets];
+ } else {
+ memcpy(&data, &packet.icp6->icmp6_dataun.icmp6_un_data8[4], sizeof(data));
+ if (debug > 2)
+ printf("ICMP echo-reply of len %lu, id %u, seq %u, cksum 0x%X\n",
+ (unsigned long)sizeof(data), ntohs(packet.icp6->icmp6_id),
+ ntohs(packet.icp6->icmp6_seq), packet.icp6->icmp6_cksum);
+ host = table[ntohs(packet.icp6->icmp6_seq)/packets];
+ }
- host = table[ntohs(icp.icmp_seq)/packets];
tdiff = get_timevaldiff(&data.stime, &now);
host->time_waited += tdiff;
@@ -801,22 +911,25 @@ wait_for_reply(int sock, u_int t)
host->rtmin = tdiff;
if(debug) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address(&resp_addr, address, sizeof(address));
printf("%0.3f ms rtt from %s, outgoing ttl: %u, incoming ttl: %u, max: %0.3f, min: %0.3f\n",
- (float)tdiff / 1000, inet_ntoa(resp_addr.sin_addr),
- ttl, ip->ip_ttl, (float)host->rtmax / 1000, (float)host->rtmin / 1000);
+ (float)tdiff / 1000, address,
+ ttl, ip->ip.ip_ttl, (float)host->rtmax / 1000, (float)host->rtmin / 1000);
}
/* if we're in hostcheck mode, exit with limited printouts */
if(mode == MODE_HOSTCHECK) {
printf("OK - %s responds to ICMP. Packet %u, rta %0.3fms|"
- "pkt=%u;;0;%u rta=%0.3f;%0.3f;%0.3f;;\n",
- host->name, icmp_recv, (float)tdiff / 1000,
- icmp_recv, packets, (float)tdiff / 1000,
- (float)warn.rta / 1000, (float)crit.rta / 1000);
+ "pkt=%u;;;0;%u rta=%0.3f;%0.3f;%0.3f;;\n",
+ host->name, icmp_recv, (float)tdiff / 1000,
+ icmp_recv, packets, (float)tdiff / 1000,
+ (float)warn.rta / 1000, (float)crit.rta / 1000);
exit(STATE_OK);
}
}
+ free(packet.buf);
return 0;
}
@@ -824,62 +937,81 @@ wait_for_reply(int sock, u_int t)
static int
send_icmp_ping(int sock, struct rta_host *host)
{
- static union {
- void *buf; /* re-use so we prevent leaks */
- struct icmp *icp;
- u_short *cksum_in;
- } packet = { NULL };
long int len;
struct icmp_ping_data data;
struct msghdr hdr;
struct iovec iov;
struct timeval tv;
- struct sockaddr *addr;
+ void *buf = NULL;
if(sock == -1) {
errno = 0;
crash("Attempt to send on bogus socket");
return -1;
}
- addr = (struct sockaddr *)&host->saddr_in;
- if(!packet.buf) {
- if (!(packet.buf = malloc(icmp_pkt_size))) {
+ if(!buf) {
+ if (!(buf = malloc(icmp_pkt_size))) {
crash("send_icmp_ping(): failed to malloc %d bytes for send buffer",
icmp_pkt_size);
return -1; /* might be reached if we're in debug mode */
}
}
- memset(packet.buf, 0, icmp_pkt_size);
+ memset(buf, 0, icmp_pkt_size);
- if((gettimeofday(&tv, &tz)) == -1) return -1;
+ if((gettimeofday(&tv, &tz)) == -1) {
+ free(buf);
+ return -1;
+ }
data.ping_id = 10; /* host->icmp.icmp_sent; */
memcpy(&data.stime, &tv, sizeof(tv));
- memcpy(&packet.icp->icmp_data, &data, sizeof(data));
- packet.icp->icmp_type = ICMP_ECHO;
- packet.icp->icmp_code = 0;
- packet.icp->icmp_cksum = 0;
- packet.icp->icmp_id = htons(pid);
- packet.icp->icmp_seq = htons(host->id++);
- packet.icp->icmp_cksum = icmp_checksum(packet.cksum_in, icmp_pkt_size);
-
- if (debug > 2)
- printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n",
- (unsigned long)sizeof(data), ntohs(packet.icp->icmp_id),
- ntohs(packet.icp->icmp_seq), packet.icp->icmp_cksum,
- host->name);
+
+ if (address_family == AF_INET) {
+ struct icmp *icp = (struct icmp*)buf;
+
+ memcpy(&icp->icmp_data, &data, sizeof(data));
+
+ icp->icmp_type = ICMP_ECHO;
+ icp->icmp_code = 0;
+ icp->icmp_cksum = 0;
+ icp->icmp_id = htons(pid);
+ icp->icmp_seq = htons(host->id++);
+ icp->icmp_cksum = icmp_checksum((unsigned short*)buf, icmp_pkt_size);
+
+ if (debug > 2)
+ printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n",
+ (unsigned long)sizeof(data), ntohs(icp->icmp_id), ntohs(icp->icmp_seq), icp->icmp_cksum, host->name);
+ }
+ else {
+ struct icmp6_hdr *icp6 = (struct icmp6_hdr*)buf;
+ memcpy(&icp6->icmp6_dataun.icmp6_un_data8[4], &data, sizeof(data));
+ icp6->icmp6_type = ICMP6_ECHO_REQUEST;
+ icp6->icmp6_code = 0;
+ icp6->icmp6_cksum = 0;
+ icp6->icmp6_id = htons(pid);
+ icp6->icmp6_seq = htons(host->id++);
+ // let checksum be calculated automatically
+
+ if (debug > 2) {
+ printf("Sending ICMP echo-request of len %lu, id %u, seq %u, cksum 0x%X to host %s\n",
+ (unsigned long)sizeof(data), ntohs(icp6->icmp6_id),
+ ntohs(icp6->icmp6_seq), icp6->icmp6_cksum, host->name);
+ }
+ }
memset(&iov, 0, sizeof(iov));
- iov.iov_base = packet.buf;
+ iov.iov_base = buf;
iov.iov_len = icmp_pkt_size;
memset(&hdr, 0, sizeof(hdr));
- hdr.msg_name = addr;
- hdr.msg_namelen = sizeof(struct sockaddr);
+ hdr.msg_name = (struct sockaddr *)&host->saddr_in;
+ hdr.msg_namelen = sizeof(struct sockaddr_storage);
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
+ errno = 0;
+
/* MSG_CONFIRM is a linux thing and only available on linux kernels >= 2.3.15, see send(2) */
#ifdef MSG_CONFIRM
len = sendmsg(sock, &hdr, MSG_CONFIRM);
@@ -887,9 +1019,15 @@ send_icmp_ping(int sock, struct rta_host *host)
len = sendmsg(sock, &hdr, 0);
#endif
+ free(buf);
+
if(len < 0 || (unsigned int)len != icmp_pkt_size) {
- if(debug) printf("Failed to send ping to %s\n",
- inet_ntoa(host->saddr_in.sin_addr));
+ if(debug) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address((struct sockaddr_storage *)&host->saddr_in, address, sizeof(address));
+ printf("Failed to send ping to %s: %s\n", address, strerror(errno));
+ }
+ errno = 0;
return -1;
}
@@ -934,7 +1072,7 @@ recvfrom_wto(int sock, void *buf, unsigned int len, struct sockaddr *saddr,
if(!n) return 0; /* timeout */
- slen = sizeof(struct sockaddr);
+ slen = sizeof(struct sockaddr_storage);
memset(&iov, 0, sizeof(iov));
iov.iov_base = buf;
@@ -958,6 +1096,7 @@ recvfrom_wto(int sock, void *buf, unsigned int len, struct sockaddr *saddr,
break ;
}
}
+
if (!chdr)
#endif // SO_TIMESTAMP
gettimeofday(tv, &tz);
@@ -991,10 +1130,11 @@ finish(int sig)
/* iterate thrice to calculate values, give output, and print perfparse */
host = list;
+
while(host) {
if(!host->icmp_recv) {
/* rta 0 is ofcourse not entirely correct, but will still show up
- * conspicuosly as missing entries in perfparse and cacti */
+ * conspicuously as missing entries in perfparse and cacti */
pl = 100;
rta = 0;
status = STATE_CRITICAL;
@@ -1039,10 +1179,12 @@ finish(int sig)
if(!host->icmp_recv) {
status = STATE_CRITICAL;
if(host->flags & FLAG_LOST_CAUSE) {
+ char address[INET6_ADDRSTRLEN];
+ parse_address(&host->error_addr, address, sizeof(address));
printf("%s: %s @ %s. rta nan, lost %d%%",
host->name,
get_icmp_error_msg(host->icmp_type, host->icmp_code),
- inet_ntoa(host->error_addr),
+ address,
100);
}
else { /* not marked as lost cause, so we have no flags for it */
@@ -1104,7 +1246,6 @@ get_timevaldiff(struct timeval *early, struct timeval *later)
{
return 0;
}
-
ret = (later->tv_sec - early->tv_sec) * 1000000;
ret += later->tv_usec - early->tv_usec;
@@ -1112,18 +1253,35 @@ get_timevaldiff(struct timeval *early, struct timeval *later)
}
static int
-add_target_ip(char *arg, struct in_addr *in)
+add_target_ip(char *arg, struct sockaddr_storage *in)
{
struct rta_host *host;
+ struct sockaddr_in *sin, *host_sin;
+ struct sockaddr_in6 *sin6, *host_sin6;
+
+ if (address_family == AF_INET)
+ sin = (struct sockaddr_in *)in;
+ else
+ sin6 = (struct sockaddr_in6 *)in;
- /* disregard obviously stupid addresses */
- if(in->s_addr == INADDR_NONE || in->s_addr == INADDR_ANY)
+
+
+ /* disregard obviously stupid addresses
+ * (I didn't find an ipv6 equivalent to INADDR_NONE) */
+ if (((address_family == AF_INET && (sin->sin_addr.s_addr == INADDR_NONE
+ || sin->sin_addr.s_addr == INADDR_ANY)))
+ || (address_family == AF_INET6 && (sin6->sin6_addr.s6_addr == in6addr_any.s6_addr))) {
return -1;
+ }
/* no point in adding two identical IP's, so don't. ;) */
host = list;
while(host) {
- if(host->saddr_in.sin_addr.s_addr == in->s_addr) {
+ host_sin = (struct sockaddr_in *)&host->saddr_in;
+ host_sin6 = (struct sockaddr_in6 *)&host->saddr_in;
+
+ if( (address_family == AF_INET && host_sin->sin_addr.s_addr == sin->sin_addr.s_addr)
+ || (address_family == AF_INET6 && host_sin6->sin6_addr.s6_addr == sin6->sin6_addr.s6_addr)) {
if(debug) printf("Identical IP already exists. Not adding %s\n", arg);
return -1;
}
@@ -1131,19 +1289,29 @@ add_target_ip(char *arg, struct in_addr *in)
}
/* add the fresh ip */
- host = malloc(sizeof(struct rta_host));
+ host = (struct rta_host*)malloc(sizeof(struct rta_host));
if(!host) {
- crash("add_target_ip(%s, %s): malloc(%d) failed",
- arg, inet_ntoa(*in), sizeof(struct rta_host));
+ char straddr[INET6_ADDRSTRLEN];
+ parse_address((struct sockaddr_storage*)&in, straddr, sizeof(straddr));
+ crash("add_target_ip(%s, %s): malloc(%lu) failed",
+ arg, straddr, sizeof(struct rta_host));
}
memset(host, 0, sizeof(struct rta_host));
/* set the values. use calling name for output */
host->name = strdup(arg);
- /* fill out the sockaddr_in struct */
- host->saddr_in.sin_family = AF_INET;
- host->saddr_in.sin_addr.s_addr = in->s_addr;
+ /* fill out the sockaddr_storage struct */
+ if(address_family == AF_INET) {
+ host_sin = (struct sockaddr_in *)&host->saddr_in;
+ host_sin->sin_family = AF_INET;
+ host_sin->sin_addr.s_addr = sin->sin_addr.s_addr;
+ }
+ else {
+ host_sin6 = (struct sockaddr_in6 *)&host->saddr_in;
+ host_sin6->sin6_family = AF_INET6;
+ memcpy(host_sin6->sin6_addr.s6_addr, sin6->sin6_addr.s6_addr, sizeof host_sin6->sin6_addr.s6_addr);
+ }
host->rtmin = DBL_MAX;
@@ -1160,31 +1328,67 @@ add_target_ip(char *arg, struct in_addr *in)
static int
add_target(char *arg)
{
- int i;
- struct hostent *he;
- struct in_addr *in, ip;
+ int error, result;
+ struct sockaddr_storage ip;
+ struct addrinfo hints, *res, *p;
+ struct sockaddr_in *sin;
+ struct sockaddr_in6 *sin6;
+
+ switch (address_family) {
+ case -1:
+ /* -4 and -6 are not specified on cmdline */
+ address_family = AF_INET;
+ sin = (struct sockaddr_in *)&ip;
+ result = inet_pton(address_family, arg, &sin->sin_addr);
+#ifdef USE_IPV6
+ if( result != 1 ){
+ address_family = AF_INET6;
+ sin6 = (struct sockaddr_in6 *)&ip;
+ result = inet_pton(address_family, arg, &sin6->sin6_addr);
+ }
+#endif
+ /* If we don't find any valid addresses, we still don't know the address_family */
+ if ( result != 1) {
+ address_family = -1;
+ }
+ break;
+ case AF_INET:
+ sin = (struct sockaddr_in *)&ip;
+ result = inet_pton(address_family, arg, &sin->sin_addr);
+ break;
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)&ip;
+ result = inet_pton(address_family, arg, &sin6->sin6_addr);
+ break;
+ default: crash("Address family not supported");
+ }
/* don't resolve if we don't have to */
- if((ip.s_addr = inet_addr(arg)) != INADDR_NONE) {
+ if(result == 1) {
/* don't add all ip's if we were given a specific one */
return add_target_ip(arg, &ip);
- /* he = gethostbyaddr((char *)in, sizeof(struct in_addr), AF_INET); */
- /* if(!he) return add_target_ip(arg, in); */
}
else {
errno = 0;
- he = gethostbyname(arg);
- if(!he) {
+ memset(&hints, 0, sizeof(hints));
+ if (address_family == -1) {
+ hints.ai_family = AF_UNSPEC;
+ } else {
+ hints.ai_family = address_family == AF_INET ? PF_INET : PF_INET6;
+ }
+ hints.ai_socktype = SOCK_RAW;
+ if((error = getaddrinfo(arg, NULL, &hints, &res)) != 0) {
errno = 0;
- crash("Failed to resolve %s", arg);
+ crash("Failed to resolve %s: %s", arg, gai_strerror(error));
return -1;
}
+ address_family = res->ai_family;
}
/* possibly add all the IP's as targets */
- for(i = 0; he->h_addr_list[i]; i++) {
- in = (struct in_addr *)he->h_addr_list[i];
- add_target_ip(arg, in);
+ for(p = res; p != NULL; p = p->ai_next) {
+ memcpy(&ip, p->ai_addr, p->ai_addrlen);
+ add_target_ip(arg, &ip);
/* this is silly, but it works */
if(mode == MODE_HOSTCHECK || mode == MODE_ALL) {
@@ -1193,6 +1397,7 @@ add_target(char *arg)
}
break;
}
+ freeaddrinfo(res);
return 0;
}
@@ -1203,7 +1408,7 @@ set_source_ip(char *arg)
struct sockaddr_in src;
memset(&src, 0, sizeof(src));
- src.sin_family = AF_INET;
+ src.sin_family = address_family;
if((src.sin_addr.s_addr = inet_addr(arg)) == INADDR_NONE)
src.sin_addr.s_addr = get_ip_address(arg);
if(bind(icmp_sock, (struct sockaddr *)&src, sizeof(src)) == -1)
@@ -1311,12 +1516,12 @@ get_threshold(char *str, threshold *th)
unsigned short
icmp_checksum(unsigned short *p, int n)
{
- register unsigned short cksum;
- register long sum = 0;
+ unsigned short cksum;
+ long sum = 0;
- while(n > 1) {
+ while(n > 2) {
sum += *p++;
- n -= 2;
+ n -= sizeof(unsigned short);
}
/* mop up the occasional odd byte */
@@ -1347,6 +1552,8 @@ print_help(void)
printf (" %s\n", "-H");
printf (" %s\n", _("specify a target"));
+ printf (" %s\n", "[-4|-6]");
+ printf (" %s\n", _("Use IPv4 (default) or IPv6 to communicate with the targets"));
printf (" %s\n", "-w");
printf (" %s", _("warning threshold (currently "));
printf ("%0.3fms,%u%%)\n", (float)warn.rta / 1000, warn.pl);
diff --git a/plugins-root/t/check_dhcp.t b/plugins-root/t/check_dhcp.t
index 222f4544..ce627736 100644
--- a/plugins-root/t/check_dhcp.t
+++ b/plugins-root/t/check_dhcp.t
@@ -19,7 +19,7 @@ if ($allow_sudo eq "yes" or $> == 0) {
my $sudo = $> == 0 ? '' : 'sudo';
my $successOutput = '/OK: Received \d+ DHCPOFFER\(s\), \d+ of 1 requested servers responded, max lease time = \d+ sec\./';
-my $failureOutput = '/CRITICAL: No DHCPOFFERs were received/';
+my $failureOutput = '/CRITICAL: (No DHCPOFFERs were received|Received \d+ DHCPOFFER\(s\), 0 of 1 requested servers responded, max lease time = \d+ sec\.)/';
my $invalidOutput = '/Invalid hostname/';
my $host_responsive = getTestParameter( "NP_HOST_DHCP_RESPONSIVE",
@@ -36,7 +36,12 @@ my $hostname_invalid = getTestParameter( "NP_HOSTNAME_INVALID",
# try to determince interface
my $interface = '';
-if(`ifconfig -a 2>/dev/null` =~ m/^(e\w*\d+)/mx and $1 ne 'eth0') {
+
+# find interface used for default route
+if (-x '/usr/sbin/ip' and `/usr/sbin/ip route get 1.1.1.1 2>/dev/null` =~ m/\sdev\s(\S+)/) {
+ $interface = "-i $1";
+}
+elsif (`ifconfig -a 2>/dev/null` =~ m/^(e\w*\d+)/mx and $1 ne 'eth0') {
$interface = ' -i '.$1;
}
diff --git a/plugins-scripts/Makefile.am b/plugins-scripts/Makefile.am
index ea65aed1..088a4459 100644
--- a/plugins-scripts/Makefile.am
+++ b/plugins-scripts/Makefile.am
@@ -16,11 +16,13 @@ VPATH=$(top_srcdir) $(top_srcdir)/plugins-scripts $(top_srcdir)/plugins-scripts/
libexec_SCRIPTS = check_breeze check_disk_smb check_flexlm check_ircd \
check_log check_oracle check_rpc check_sensors check_wave \
check_ifstatus check_ifoperstatus check_mailq check_file_age \
+ check_uptime \
utils.sh utils.pm
EXTRA_DIST=check_breeze.pl check_disk_smb.pl check_flexlm.pl check_ircd.pl \
check_log.sh check_oracle.sh check_rpc.pl check_sensors.sh \
check_ifstatus.pl check_ifoperstatus.pl check_wave.pl check_mailq.pl check_file_age.pl \
+ check_uptime.pl \
utils.sh.in utils.pm.in t
EDIT = sed \
diff --git a/plugins-scripts/check_disk_smb.pl b/plugins-scripts/check_disk_smb.pl
index 98992268..28c49e84 100755
--- a/plugins-scripts/check_disk_smb.pl
+++ b/plugins-scripts/check_disk_smb.pl
@@ -19,7 +19,7 @@
#
require 5.004;
-use POSIX;
+use POSIX qw(setsid);
use strict;
use Getopt::Long;
use vars qw($opt_P $opt_V $opt_h $opt_H $opt_s $opt_W $opt_u $opt_p $opt_w $opt_c $opt_a $verbose);
@@ -28,6 +28,9 @@ use FindBin;
use lib "$FindBin::Bin";
use utils qw($TIMEOUT %ERRORS &print_revision &support &usage);
+# make us session leader which makes all childs exit if we do
+setsid;
+
sub print_help ();
sub print_usage ();
@@ -175,6 +178,8 @@ my @lines = undef;
# Just in case of problems, let's not hang the monitoring system
$SIG{'ALRM'} = sub {
print "No Answer from Client\n";
+ $SIG{'INT'} = 'IGNORE';
+ kill(-2, $$);
exit $ERRORS{"UNKNOWN"};
};
alarm($TIMEOUT);
diff --git a/plugins-scripts/check_file_age.pl b/plugins-scripts/check_file_age.pl
index 56b8e97c..26281ddd 100755
--- a/plugins-scripts/check_file_age.pl
+++ b/plugins-scripts/check_file_age.pl
@@ -43,8 +43,6 @@ $ENV{'ENV'}='';
$opt_w = 240;
$opt_c = 600;
-$opt_W = 0;
-$opt_C = 0;
$opt_f = "";
Getopt::Long::Configure('bundling');
@@ -53,10 +51,10 @@ GetOptions(
"h" => \$opt_h, "help" => \$opt_h,
"i" => \$opt_i, "ignore-missing" => \$opt_i,
"f=s" => \$opt_f, "file" => \$opt_f,
- "w=f" => \$opt_w, "warning-age=f" => \$opt_w,
- "W=f" => \$opt_W, "warning-size=f" => \$opt_W,
- "c=f" => \$opt_c, "critical-age=f" => \$opt_c,
- "C=f" => \$opt_C, "critical-size=f" => \$opt_C);
+ "w=s" => \$opt_w, "warning-age=s" => \$opt_w,
+ "W=s" => \$opt_W, "warning-size=s" => \$opt_W,
+ "c=s" => \$opt_c, "critical-age=s" => \$opt_c,
+ "C=s" => \$opt_C, "critical-size=s" => \$opt_C);
if ($opt_V) {
print_revision($PROGNAME, '@NP_VERSION@');
@@ -91,18 +89,47 @@ unless (-e $opt_f) {
$st = File::stat::stat($opt_f);
$age = time - $st->mtime;
$size = $st->size;
-$perfdata = "age=${age}s;${opt_w};${opt_c} size=${size}B;${opt_W};${opt_C};0";
-
$result = 'OK';
-if (($opt_c and $age > $opt_c) or ($opt_C and $size < $opt_C)) {
- $result = 'CRITICAL';
+if ($opt_c !~ m/^\d+$/ or ($opt_C and $opt_C !~ m/^\d+$/)
+ or $opt_w !~ m/^\d+$/ or ($opt_W and $opt_W !~ m/^\d+$/)) {
+ # range has been specified so use M::P::R to process
+ require Monitoring::Plugin::Range;
+ # use permissive range defaults for size when none specified
+ $opt_W = "0:" unless ($opt_W);
+ $opt_C = "0:" unless ($opt_C);
+
+ if (Monitoring::Plugin::Range->parse_range_string($opt_c)
+ ->check_range($age) == 1) { # 1 means it raises an alert because it's OUTSIDE the range
+ $result = 'CRITICAL';
+ }
+ elsif (Monitoring::Plugin::Range->parse_range_string($opt_C)
+ ->check_range($size) == 1) {
+ $result = 'CRITICAL';
+ }
+ elsif (Monitoring::Plugin::Range->parse_range_string($opt_w)
+ ->check_range($age) == 1) {
+ $result = 'WARNING';
+ }
+ elsif (Monitoring::Plugin::Range->parse_range_string($opt_W)
+ ->check_range($size) == 1) {
+ $result = 'WARNING';
+ }
}
-elsif (($opt_w and $age > $opt_w) or ($opt_W and $size < $opt_W)) {
- $result = 'WARNING';
+else {
+ # use permissive defaults for size when none specified
+ $opt_W = 0 unless ($opt_W);
+ $opt_C = 0 unless ($opt_C);
+ if ($age > $opt_c or $size < $opt_C) {
+ $result = 'CRITICAL';
+ }
+ elsif ($age > $opt_w or $size < $opt_W) {
+ $result = 'WARNING';
+ }
}
+$perfdata = "age=${age}s;${opt_w};${opt_c} size=${size}B;${opt_W};${opt_C};0";
print "FILE_AGE $result: $opt_f is $age seconds old and $size bytes | $perfdata\n";
exit $ERRORS{$result};
@@ -120,7 +147,15 @@ sub print_help () {
print "\n";
print " -i | --ignore-missing : return OK if the file does not exist\n";
print " <secs> File must be no more than this many seconds old (default: warn 240 secs, crit 600)\n";
- print " <size> File must be at least this many bytes long (default: crit 0 bytes)\n";
+ print " <size> File must be at least this many bytes long (default: file size is ignored (0 bytes))\n\n";
+ print " Both <secs> and <size> can specify a range using the standard plugin syntax\n";
+ print " If any of the warning and critical arguments are in range syntax (not just bare numbers)\n";
+ print " then all warning and critical arguments will be interpreted as ranges.\n";
+ print " To use range processing the perl module Monitoring::Plugin must be installed\n";
+ print " For range syntax see https://www.monitoring-plugins.org/doc/guidelines.html#THRESHOLDFORMAT\n";
+ print " It is strongly recommended when using range syntax that all four of -w, -W, -c and -C are specified\n";
+ print " otherwise it is unlikely that the size test will be doing what is desired\n";
print "\n";
support();
}
+
diff --git a/plugins-scripts/check_ifoperstatus.pl b/plugins-scripts/check_ifoperstatus.pl
index 9ede1633..c190ce95 100755
--- a/plugins-scripts/check_ifoperstatus.pl
+++ b/plugins-scripts/check_ifoperstatus.pl
@@ -124,10 +124,10 @@ if (!defined($session)) {
## map ifdescr to ifindex - should look at being able to cache this value
if (defined $ifdescr || defined $iftype) {
- # escape "/" in ifdescr - very common in the Cisco world
if (defined $iftype) {
$status=fetch_ifindex($snmpIfType, $iftype);
} else {
+ # escape "/" in ifdescr - very common in the Cisco world
$ifdescr =~ s/\//\\\//g;
$status=fetch_ifindex($snmpIfDescr, $ifdescr); # if using on device with large number of interfaces
# recommend use of SNMP v2 (get-bulk)
diff --git a/plugins-scripts/check_ircd.pl b/plugins-scripts/check_ircd.pl
index 22d21c2e..d869ae7b 100755
--- a/plugins-scripts/check_ircd.pl
+++ b/plugins-scripts/check_ircd.pl
@@ -69,7 +69,9 @@ $ENV{'ENV'}='';
# -----------------------------------------------------------------[ Global ]--
$PROGNAME = "check_ircd";
-my $NICK="ircd$$";
+# nickname shouldn't be longer than 9 chars, this might happen with large PIDs
+# To prevent this, we cut of the part over 10000
+my $NICK="ircd" . $$ % 10000;
my $USER_INFO="monitor localhost localhost : ";
# -------------------------------------------------------------[ connection ]--
diff --git a/plugins-scripts/check_log.sh b/plugins-scripts/check_log.sh
index 8f76b1b5..61131236 100755
--- a/plugins-scripts/check_log.sh
+++ b/plugins-scripts/check_log.sh
@@ -1,8 +1,7 @@
#!/bin/sh
#
# Log file pattern detector plugin for monitoring
-# Written by Ethan Galstad (nagios@nagios.org)
-# Last Modified: 07-31-1999
+# Written originally by Ethan Galstad (nagios@nagios.org)
#
# Usage: ./check_log <log_file> <old_log_file> <pattern>
#
@@ -70,6 +69,11 @@ print_usage() {
echo "Usage: $PROGNAME -F logfile -O oldlog -q query"
echo "Usage: $PROGNAME --help"
echo "Usage: $PROGNAME --version"
+ echo ""
+ echo "Other parameters:"
+ echo " -a|--all : Print all matching lines"
+ echo " -p|--perl-regex : Use perl style regular expressions in the query"
+ echo " -e|--extended-regex : Use extended style regular expressions in the query (not necessary for GNU grep)"
}
print_help() {
@@ -116,34 +120,58 @@ while test -n "$1"; do
;;
--filename)
logfile=$2
- shift
+ shift 2
;;
-F)
logfile=$2
- shift
+ shift 2
;;
--oldlog)
oldlog=$2
- shift
+ shift 2
;;
-O)
oldlog=$2
- shift
+ shift 2
;;
--query)
query=$2
- shift
+ shift 2
;;
-q)
query=$2
- shift
+ shift 2
;;
-x)
exitstatus=$2
- shift
+ shift 2
;;
--exitstatus)
exitstatus=$2
+ shift 2
+ ;;
+ --extended-regex)
+ ERE=1
+ shift
+ ;;
+ -e)
+ ERE=1
+ shift
+ ;;
+ --perl-regex)
+ PRE=1
+ shift
+ ;;
+ -p)
+ PRE=1
+ shift
+ ;;
+ --all)
+ ALL=1
+ shift
+ ;;
+ -a)
+ ALL=1
shift
;;
*)
@@ -152,9 +180,24 @@ while test -n "$1"; do
exit "$STATE_UNKNOWN"
;;
esac
- shift
done
+# Parameter sanity check
+if [ $ERE ] && [ $PRE ] ; then
+ echo "Can not use extended and perl regex at the same time"
+ exit "$STATE_UNKNOWN"
+fi
+
+GREP="grep"
+
+if [ $ERE ]; then
+ GREP="grep -E"
+fi
+
+if [ $PRE ]; then
+ GREP="grep -P"
+fi
+
# If the source log file doesn't exist, exit
if [ ! -e "$logfile" ]; then
@@ -180,9 +223,10 @@ fi
# The temporary file that the script should use while
# processing the log file.
if [ -x /bin/mktemp ]; then
- tempdiff=$(/bin/mktemp /tmp/check_log.XXXXXXXXXX)
+
+ tempdiff=$(/bin/mktemp /tmp/check_log.XXXXXXXXXX)
else
- tempdiff=$(/bin/date '+%H%M%S')
+ tempdiff=$(/bin/date '+%H%M%S')
tempdiff="/tmp/check_log.${tempdiff}"
touch "$tempdiff"
chmod 600 "$tempdiff"
@@ -190,11 +234,21 @@ fi
diff "$logfile" "$oldlog" | grep -v "^>" > "$tempdiff"
-# Count the number of matching log entries we have
-count=$(grep -c "$query" "$tempdiff")
-# Get the last matching entry in the diff file
-lastentry=$(grep "$query" "$tempdiff" | tail -1)
+if [ $ALL ]; then
+ # Get the last matching entry in the diff file
+ entry=$($GREP "$query" "$tempdiff")
+
+ # Count the number of matching log entries we have
+ count=$(echo "$entry" | wc -l)
+
+else
+ # Count the number of matching log entries we have
+ count=$($GREP -c "$query" "$tempdiff")
+
+ # Get the last matching entry in the diff file
+ entry=$($GREP "$query" "$tempdiff" | tail -1)
+fi
rm -f "$tempdiff"
cat "$logfile" > "$oldlog"
@@ -203,7 +257,7 @@ if [ "$count" = "0" ]; then # no matches, exit with no error
echo "Log check ok - 0 pattern matches found"
exitstatus=$STATE_OK
else # Print total matche count and the last entry we found
- echo "($count) $lastentry"
+ echo "($count) $entry"
exitstatus=$STATE_CRITICAL
fi
diff --git a/plugins-scripts/check_mailq.pl b/plugins-scripts/check_mailq.pl
index 32f498d3..3914f4a7 100755
--- a/plugins-scripts/check_mailq.pl
+++ b/plugins-scripts/check_mailq.pl
@@ -28,9 +28,9 @@
use POSIX;
use strict;
use Getopt::Long;
-use vars qw($opt_V $opt_h $opt_v $verbose $PROGNAME $opt_w $opt_c $opt_t $opt_s
- $opt_M $mailq $status $state $msg $msg_q $msg_p $opt_W $opt_C $mailq @lines
- %srcdomains %dstdomains);
+use vars qw($opt_V $opt_h $opt_v $verbose $PROGNAME $opt_w $opt_c $opt_t $opt_s $opt_d
+ $opt_M $mailq $status $state $msg $msg_q $msg_p $opt_W $opt_C $mailq $mailq_args
+ @lines %srcdomains %dstdomains);
use FindBin;
use lib "$FindBin::Bin";
use utils qw(%ERRORS &print_revision &support &usage );
@@ -48,6 +48,8 @@ $PROGNAME = "check_mailq";
$mailq = 'sendmail'; # default
$msg_q = 0 ;
$msg_p = 0 ;
+# If appended, must start with a space
+$mailq_args = '' ;
$state = $ERRORS{'UNKNOWN'};
Getopt::Long::Configure('bundling');
@@ -68,6 +70,10 @@ if ($opt_s) {
$sudo = "";
}
+if ($opt_d) {
+ $mailq_args = $mailq_args . ' -C ' . $opt_d;
+}
+
$SIG{'ALRM'} = sub {
print ("ERROR: timed out waiting for $utils::PATH_TO_MAILQ \n");
exit $ERRORS{"WARNING"};
@@ -309,8 +315,8 @@ elsif ( $mailq eq "postfix" ) {
## open mailq
if ( defined $utils::PATH_TO_MAILQ && -x $utils::PATH_TO_MAILQ ) {
- if (! open (MAILQ, "$sudo $utils::PATH_TO_MAILQ | " ) ) {
- print "ERROR: could not open $utils::PATH_TO_MAILQ \n";
+ if (! open (MAILQ, "$sudo $utils::PATH_TO_MAILQ$mailq_args | " ) ) {
+ print "ERROR: could not open $utils::PATH_TO_MAILQ$mailq_args \n";
exit $ERRORS{'UNKNOWN'};
}
}elsif( defined $utils::PATH_TO_MAILQ){
@@ -330,7 +336,7 @@ elsif ( $mailq eq "postfix" ) {
close MAILQ;
if ( $? ) {
- print "CRITICAL: Error code ".($?>>8)." returned from $utils::PATH_TO_MAILQ",$/;
+ print "CRITICAL: Error code ".($?>>8)." returned from $utils::PATH_TO_MAILQ$mailq_args",$/;
exit $ERRORS{CRITICAL};
}
@@ -343,7 +349,7 @@ elsif ( $mailq eq "postfix" ) {
}elsif ($lines[0]=~/Mail queue is empty/) {
$msg_q = 0;
}else{
- print "Couldn't match $utils::PATH_TO_MAILQ output\n";
+ print "Couldn't match $utils::PATH_TO_MAILQ$mailq_args output\n";
exit $ERRORS{'UNKNOWN'};
}
@@ -533,7 +539,7 @@ elsif ( $mailq eq "nullmailer" ) {
while (<MAILQ>) {
#2006-06-22 16:00:00 282 bytes
- if (/^[1-9][0-9]*-[01][0-9]-[0-3][0-9]\s[0-2][0-9]\:[0-2][0-9]\:[0-2][0-9]\s{2}[0-9]+\sbytes$/) {
+ if (/^[1-9][0-9]*-[01][0-9]-[0-3][0-9]\s[0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\s+[0-9]+\sbytes/) {
$msg_q++ ;
}
}
@@ -568,7 +574,10 @@ sub process_arguments(){
"w=i" => \$opt_w, "warning=i" => \$opt_w, # warning if above this number
"c=i" => \$opt_c, "critical=i" => \$opt_c, # critical if above this number
"t=i" => \$opt_t, "timeout=i" => \$opt_t,
- "s" => \$opt_s, "sudo" => \$opt_s
+ "s" => \$opt_s, "sudo" => \$opt_s,
+ "d:s" => \$opt_d, "configdir:s" => \$opt_d,
+ "W=i" => \$opt_W, # warning if above this number
+ "C=i" => \$opt_C, # critical if above this number
);
if ($opt_V) {
@@ -649,7 +658,7 @@ sub process_arguments(){
}
sub print_usage () {
- print "Usage: $PROGNAME -w <warn> -c <crit> [-W <warn>] [-C <crit>] [-M <MTA>] [-t <timeout>] [-s] [-v]\n";
+ print "Usage: $PROGNAME -w <warn> -c <crit> [-W <warn>] [-C <crit>] [-M <MTA>] [-t <timeout>] [-s] [-d <CONFIGDIR>] [-v]\n";
}
sub print_help () {
@@ -662,11 +671,12 @@ sub print_help () {
print " Feedback/patches to support non-sendmail mailqueue welcome\n\n";
print "-w (--warning) = Min. number of messages in queue to generate warning\n";
print "-c (--critical) = Min. number of messages in queue to generate critical alert ( w < c )\n";
- print "-W (--Warning) = Min. number of messages for same domain in queue to generate warning\n";
- print "-C (--Critical) = Min. number of messages for same domain in queue to generate critical alert ( W < C )\n";
+ print "-W = Min. number of messages for same domain in queue to generate warning\n";
+ print "-C = Min. number of messages for same domain in queue to generate critical alert ( W < C )\n";
print "-t (--timeout) = Plugin timeout in seconds (default = $utils::TIMEOUT)\n";
print "-M (--mailserver) = [ sendmail | qmail | postfix | exim | nullmailer ] (default = autodetect)\n";
print "-s (--sudo) = Use sudo to call the mailq command\n";
+ print "-d (--configdir) = Config file or directory\n";
print "-h (--help)\n";
print "-V (--version)\n";
print "-v (--verbose) = debugging output\n";
diff --git a/plugins-scripts/check_uptime.pl b/plugins-scripts/check_uptime.pl
new file mode 100755
index 00000000..4c9f22da
--- /dev/null
+++ b/plugins-scripts/check_uptime.pl
@@ -0,0 +1,315 @@
+#!@PERL@ -w
+
+# check_uptime - check uptime to see how long the system is running.
+#
+
+# License Information:
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
+# USA
+#
+############################################################################
+
+use POSIX;
+use strict;
+use Getopt::Long;
+use vars qw($opt_V $opt_h $opt_v $verbose $PROGNAME $opt_w $opt_c
+ $opt_f $opt_s
+ $lower_warn_threshold $upper_warn_threshold
+ $lower_crit_threshold $upper_crit_threshold
+ $status $state $msg);
+use FindBin;
+use lib "$FindBin::Bin";
+use utils qw(%ERRORS &print_revision &support &usage );
+
+sub print_help ();
+sub print_usage ();
+sub process_arguments ();
+
+$ENV{'PATH'}='@TRUSTED_PATH@';
+$ENV{'BASH_ENV'}='';
+$ENV{'ENV'}='';
+$PROGNAME = "check_uptime";
+$state = $ERRORS{'UNKNOWN'};
+
+my $uptime_file = "/proc/uptime";
+
+
+# Process arguments
+
+Getopt::Long::Configure('bundling');
+$status = process_arguments();
+if ($status){
+ print "ERROR: processing arguments\n";
+ exit $ERRORS{"UNKNOWN"};
+}
+
+
+# Get uptime info from file
+
+if ( ! -r $uptime_file ) {
+ print "ERROR: file '$uptime_file' is not readable\n";
+ exit $ERRORS{"UNKNOWN"};
+}
+
+if ( ! open FILE, "<", $uptime_file ) {
+ print "ERROR: cannot read from file '$uptime_file'\n";
+ exit $ERRORS{"UNKNOWN"};
+}
+
+chomp( my $file_content = <FILE> );
+close FILE;
+
+print "$uptime_file: $file_content\n" if $verbose;
+
+# Get first digit value (without fraction)
+my ( $uptime_seconds ) = $file_content =~ /^([\d]+)/;
+
+# Bail out if value is not numeric
+if ( $uptime_seconds !~ /^\d+$/ ) {
+ print "ERROR: no numeric value: $uptime_seconds\n";
+ exit $ERRORS{"UNKNOWN"};
+}
+
+
+# Do calculations for a "pretty" format (2 weeks, 5 days, ...)
+
+my ( $secs, $mins, $hours, $days, $weeks );
+$secs = $uptime_seconds;
+$mins = $hours = $days = $weeks = 0;
+if ( $secs > 100 ) {
+ $mins = int( $secs / 60 );
+ $secs -= $mins * 60;
+}
+if ( $mins > 100 ) {
+ $hours = int( $mins / 60 );
+ $mins -= $hours * 60;
+}
+if ( $hours > 48 ) {
+ $days = int( $hours / 24 );
+ $hours -= $days * 24;
+}
+if ( $days > 14 ) {
+ $weeks = int( $days / 7 );
+ $days -= $weeks * 7;
+}
+
+my $pretty_uptime = "";
+$pretty_uptime .= sprintf( "%d week%s, ", $weeks, $weeks == 1 ? "" : "s" ) if $weeks;
+$pretty_uptime .= sprintf( "%d day%s, ", $days, $days == 1 ? "" : "s" ) if $days;
+$pretty_uptime .= sprintf( "%d hour%s, ", $hours, $hours == 1 ? "" : "s" ) if $hours;
+$pretty_uptime .= sprintf( "%d minute%s, ", $mins, $mins == 1 ? "" : "s" ) if $mins;
+# Replace last occurence of comma with "and"
+$pretty_uptime =~ s/, $/ and /;
+# Always print the seconds (though it may be 0 seconds)
+$pretty_uptime .= sprintf( "%d second%s", $secs, $secs == 1 ? "" : "s" );
+
+
+# Default to catch errors in program
+my $state_str = "UNKNOWN";
+
+# Check values
+my $out_of_bounds_text = "";
+if ( $uptime_seconds > $upper_crit_threshold ) {
+ $state_str = "CRITICAL";
+ $out_of_bounds_text = "upper crit";
+} elsif ( $uptime_seconds < $lower_crit_threshold ) {
+ $state_str = "CRITICAL";
+ $out_of_bounds_text = "lower crit";
+} elsif ( $uptime_seconds > $upper_warn_threshold ) {
+ $state_str = "WARNING";
+ $out_of_bounds_text = "upper warn";
+} elsif ( $uptime_seconds < $lower_warn_threshold ) {
+ $state_str = "WARNING";
+ $out_of_bounds_text = "lower warn";
+} else {
+ $state_str = "OK";
+}
+
+$msg = "$state_str: ";
+
+$msg .= "uptime is $uptime_seconds seconds. ";
+$msg .= "Exceeds $out_of_bounds_text threshold. " if $out_of_bounds_text;
+$msg .= "Running for $pretty_uptime. " if $opt_f;
+if ( $opt_s ) {
+ my $up_since = strftime( "%Y-%m-%d %H:%M:%S", localtime( time - $uptime_seconds ) );
+ $msg .= "Running since $up_since. ";
+}
+
+$state = $ERRORS{$state_str};
+
+# Perfdata support
+print "$msg|uptime=${uptime_seconds}s;$upper_warn_threshold;$upper_crit_threshold;0\n";
+exit $state;
+
+
+#####################################
+#### subs
+
+
+sub process_arguments(){
+ GetOptions
+ ("V" => \$opt_V, "version" => \$opt_V,
+ "v" => \$opt_v, "verbose" => \$opt_v,
+ "h" => \$opt_h, "help" => \$opt_h,
+ "w=s" => \$opt_w, "warning=s" => \$opt_w, # warning if above this number
+ "c=s" => \$opt_c, "critical=s" => \$opt_c, # critical if above this number
+ "f" => \$opt_f, "for" => \$opt_f, # show "running for ..."
+ "s" => \$opt_s, "since" => \$opt_s, # show "running since ..."
+ );
+
+ if ($opt_V) {
+ print_revision($PROGNAME,'@NP_VERSION@');
+ exit $ERRORS{'UNKNOWN'};
+ }
+
+ if ($opt_h) {
+ print_help();
+ exit $ERRORS{'UNKNOWN'};
+ }
+
+ if (defined $opt_v) {
+ $verbose = $opt_v;
+ }
+
+ unless ( defined $opt_w && defined $opt_c ) {
+ print_usage();
+ exit $ERRORS{'UNKNOWN'};
+ }
+
+ # Check if a range was supplied ("lowvalue:highvalue") for warning and critical
+ # Otherwise, set 0 as the lower threshold and the parameter value as upper threshold
+ # (the uptime should always be positive, so there should be no issue)
+ if ( $opt_w =~ /^(.+):(.+)$/ ) {
+ $lower_warn_threshold = $1;
+ $upper_warn_threshold = $2;
+ } else {
+ $lower_warn_threshold = 0;
+ $upper_warn_threshold = $opt_w;
+ }
+ if ( $opt_c =~ /^(.+):(.+)$/ ) {
+ $lower_crit_threshold = $1;
+ $upper_crit_threshold = $2;
+ } else {
+ $lower_crit_threshold = 0;
+ $upper_crit_threshold = $opt_c;
+ }
+
+ # Set as seconds (calculate if suffix present)
+ $lower_warn_threshold = calc_as_seconds( $lower_warn_threshold );
+ $lower_crit_threshold = calc_as_seconds( $lower_crit_threshold );
+ $upper_warn_threshold = calc_as_seconds( $upper_warn_threshold );
+ $upper_crit_threshold = calc_as_seconds( $upper_crit_threshold );
+
+ # Check for numeric value of warning parameter
+ if ( $lower_warn_threshold !~ /^\d+$/ ) {
+ print "Lower warning (-w) is not numeric\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+ if ( $upper_warn_threshold !~ /^\d+$/ ) {
+ print "Upper warning (-w) is not numeric\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+ # Check for numeric value of critical parameter
+ if ( $lower_crit_threshold !~ /^\d+$/ ) {
+ print "Lower critical (-c) is not numeric\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+ if ( $upper_crit_threshold !~ /^\d+$/ ) {
+ print "Upper critical (-c) is not numeric\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+
+ # Check boundaries
+ if ( $upper_warn_threshold >= $upper_crit_threshold ) {
+ print "Upper Warning (-w) cannot be greater than Critical (-c)!\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+ # No "<=" since both values are zero if no range (only upper threshold values) is given
+ if ( $lower_warn_threshold < $lower_crit_threshold ) {
+ print "Lower Warning (-w) cannot be less than Critical (-c)!\n";
+ exit $ERRORS{'UNKNOWN'};
+ }
+
+ return $ERRORS{'OK'};
+}
+
+sub print_usage () {
+ print "Usage: $PROGNAME -w <warn> -c <crit> [-v]\n";
+}
+
+sub print_help () {
+ print_revision($PROGNAME,'@NP_VERSION@');
+ print "Copyright (c) 2002 Subhendu Ghosh/Carlos Canau/Benjamin Schmid\n";
+ print "Copyright (c) 2018 Bernd Arnold\n";
+ print "\n";
+ print_usage();
+ print "\n";
+ print " Checks the uptime of the system using $uptime_file\n";
+ print "\n";
+ print "-w (--warning) = Min. number of uptime to generate warning\n";
+ print "-c (--critical) = Min. number of uptime to generate critical alert ( w < c )\n";
+ print "-f (--for) = Show uptime in a pretty format (Running for x weeks, x days, ...)\n";
+ print "-s (--since) = Show last boot in yyyy-mm-dd HH:MM:SS format (output from 'uptime -s')\n";
+ print "-h (--help)\n";
+ print "-V (--version)\n";
+ print "-v (--verbose) = debugging output\n";
+ print "\n\n";
+ print "Note: -w and -c are required arguments.\n";
+ print " You can suffix both values with s for seconds (default), m (minutes), h (hours), d (days) or w (weeks).\n";
+ print "\n";
+ print "Range support: You may specify a range for both warning and critical thresholds.\n";
+ print " This works without additional Perl modules.\n";
+ print "Example: ./check_uptime -w 10m:4w -c 1m:8w\n";
+ print " Results in a critical state when uptime is below 60 seconds or higher than 8 weeks,\n";
+ print " and in a warning state when uptime is below 10 minutes or above 4 weeks.\n";
+ print "\n\n";
+ support();
+}
+
+sub calc_as_seconds () {
+
+ my $parameter = shift;
+
+ # Check if suffix is present
+ # Calculate parameter to seconds (to get an integer value finally)
+ # If no suffix is present, just return the value
+
+ # Possible suffixes:
+ # s = seconds
+ # m = minutes
+ # h = hours
+ # d = days
+ # w = weeks
+ my %factor = ( "s" => 1,
+ "m" => 60,
+ "h" => 60 * 60,
+ "d" => 60 * 60 * 24,
+ "w" => 60 * 60 * 24 * 7,
+ );
+
+ if ( $parameter =~ /^(\d+)([a-z])$/ ) {
+ my $value = $1;
+ my $suffix = $2;
+ print "detected: value=$value, suffix=$suffix\n" if $verbose;
+ if ( ! defined $factor{$suffix} ) {
+ print "Error: wrong suffix ($suffix) for value '$parameter'";
+ exit $ERRORS{'UNKNOWN'};
+ }
+ $parameter = $value * $factor{$suffix};
+ }
+
+ return $parameter;
+
+}
diff --git a/plugins-scripts/t/check_file_age.t b/plugins-scripts/t/check_file_age.t
index 50a2e699..8b876708 100644
--- a/plugins-scripts/t/check_file_age.t
+++ b/plugins-scripts/t/check_file_age.t
@@ -5,14 +5,14 @@
#
use strict;
-use Test::More tests => 17;
+use Test::More tests => 27;
use NPTest;
my $successOutput = '/^FILE_AGE OK: /';
my $warningOutput = '/^FILE_AGE WARNING: /';
my $criticalOutput = '/^FILE_AGE CRITICAL: /';
my $unknownOutput = '/^FILE_AGE UNKNOWN: /';
-my $performanceOutput = '/ \| age=[0-9]+s;[0-9]+;[0-9]+ size=[0-9]+B;[0-9]+;[0-9]+;0$/';
+my $performanceOutput = '/ \| age=[0-9]+s;[0-9:]+;[0-9:]+ size=[0-9]+B;[0-9:]+;[0-9:]+;0$/';
my $result;
my $temp_file = "/tmp/check_file_age.tmp";
@@ -20,64 +20,75 @@ my $temp_link = "/tmp/check_file_age.link.tmp";
unlink $temp_file, $temp_link;
-$result = NPTest->testCmd(
- "./check_file_age"
- );
+$result = NPTest->testCmd("./check_file_age");
cmp_ok( $result->return_code, '==', 3, "Missing parameters" );
like ( $result->output, $unknownOutput, "Output for unknown correct" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file");
cmp_ok( $result->return_code, '==', 2, "File not exists" );
like ( $result->output, $criticalOutput, "Output for file missing correct" );
write_chars(100);
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file");
cmp_ok( $result->return_code, '==', 0, "File is new enough" );
like ( $result->output, $successOutput, "Output for success correct" );
sleep 2;
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -w 1"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -w 1");
cmp_ok( $result->return_code, '==', 1, "Warning for file over 1 second old" );
like ( $result->output, $warningOutput, "Output for warning correct" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -c 1"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1");
cmp_ok( $result->return_code, '==', 2, "Critical for file over 1 second old" );
like ( $result->output, $criticalOutput, "Output for critical correct" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -c 1000 -W 100"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100");
cmp_ok( $result->return_code, '==', 0, "Checking file size" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -c 1000 -W 100"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100");
like( $result->output, $performanceOutput, "Checking for performance Output" );
-$result = NPTest->testCmd(
- "./check_file_age -f /non/existent --ignore-missing"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 100");
+like( $result->output, $performanceOutput, "Checking for performance Output from range" );
+
+$result = NPTest->testCmd("./check_file_age -f /non/existent --ignore-missing");
cmp_ok( $result->return_code, '==', 0, "Honours --ignore-missing" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -c 1000 -W 101"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 101");
cmp_ok( $result->return_code, '==', 1, "One byte too short" );
-$result = NPTest->testCmd(
- "./check_file_age -f $temp_file -c 1000 -C 101"
- );
+$result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 101");
cmp_ok( $result->return_code, '==', 2, "One byte too short - critical" );
+SKIP: {
+ eval 'use Monitoring::Plugin::Range';
+ skip "Monitoring::Plugin::Range module require", 9 if $@;
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -w 0:1");
+ cmp_ok( $result->return_code, '==', 1, "Warning for file over 1 second old by range" );
+ like ( $result->output, $warningOutput, "Output for warning by range correct" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 0:1");
+ cmp_ok( $result->return_code, '==', 2, "Critical for file over 1 second old by range" );
+ like ( $result->output, $criticalOutput, "Output for critical by range correct" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 0:1000 -W 0:100");
+ cmp_ok( $result->return_code, '==', 0, "Checking file size by range" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 101:");
+ cmp_ok( $result->return_code, '==', 1, "One byte too short by range" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -W 0:99");
+ cmp_ok( $result->return_code, '==', 1, "One byte too long by range" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 101:");
+ cmp_ok( $result->return_code, '==', 2, "One byte too short by range - critical" );
+
+ $result = NPTest->testCmd("./check_file_age -f $temp_file -c 1000 -C 0:99");
+ cmp_ok( $result->return_code, '==', 2, "One byte too long by range - critical" );
+};
+
symlink $temp_file, $temp_link or die "Cannot create symlink";
$result = NPTest->testCmd("./check_file_age -f $temp_link -c 10");
cmp_ok( $result->return_code, '==', 0, "Works for symlinks" );
diff --git a/plugins-scripts/t/check_uptime.t b/plugins-scripts/t/check_uptime.t
new file mode 100644
index 00000000..c395307c
--- /dev/null
+++ b/plugins-scripts/t/check_uptime.t
@@ -0,0 +1,129 @@
+#!/usr/bin/perl -w -I ..
+#
+# check_uptime tests
+#
+#
+
+use strict;
+use Test::More tests => 40;
+use NPTest;
+
+my $result;
+
+$result = NPTest->testCmd(
+ "./check_uptime"
+ );
+cmp_ok( $result->return_code, '==', 3, "Missing parameters" );
+like ( $result->output, '/^Usage: check_uptime -w/', "Output for missing parameters correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime --help"
+ );
+cmp_ok( $result->return_code, '==', 3, "Help output requested" );
+like ( $result->output, '/ABSOLUTELY NO WARRANTY/', "Output for help correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 5 -c 2"
+ );
+cmp_ok( $result->return_code, '==', 3, "Warning greater than critical" );
+like ( $result->output, '/^Upper Warning .*cannot be greater than Critical/', "Output for warning greater than critical correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -c 1000 -W 100 2>&1"
+ );
+like ( $result->output, '/^Unknown option: W/', "Output with wrong parameter is correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -f -w 1 -c 2"
+ );
+cmp_ok( $result->return_code, '==', 2, "Uptime higher than 2 seconds" );
+like ( $result->output, '/Running for \d+/', "Output for the f parameter correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -s -w 1 -c 2"
+ );
+cmp_ok( $result->return_code, '==', 2, "Uptime higher than 2 seconds" );
+like ( $result->output, '/Running since \d+/', "Output for the s parameter correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 1 -c 2"
+ );
+cmp_ok( $result->return_code, '==', 2, "Uptime higher than 2 seconds" );
+like ( $result->output, '/^CRITICAL: uptime is \d+ seconds/', "Output for uptime higher than 2 seconds correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 1 -c 9999w"
+ );
+cmp_ok( $result->return_code, '==', 1, "Uptime lower than 9999 weeks" );
+like ( $result->output, '/^WARNING: uptime is \d+ seconds/', "Output for uptime lower than 9999 weeks correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 9998w -c 9999w"
+ );
+cmp_ok( $result->return_code, '==', 0, "Uptime lower than 9998 weeks" );
+like ( $result->output, '/^OK: uptime is \d+ seconds/', "Output for uptime lower than 9998 weeks correct" );
+like ( $result->output, '/\|uptime=[0-9]+s;6046790400;6047395200;/', "Checking for performance output" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 111222d -c 222333d"
+ );
+cmp_ok( $result->return_code, '==', 0, "Uptime lower than 111222 days" );
+like ( $result->output, '/^OK: uptime is \d+ seconds/', "Output for uptime lower than 111222 days correct" );
+like ( $result->output, '/\|uptime=[0-9]+s;9609580800;19209571200;/', "Checking for performance output" );
+
+# Same as before, hopefully uptime is higher than 2 seconds so no warning
+$result = NPTest->testCmd(
+ "./check_uptime -w 2:111222d -c 1:222333d"
+ );
+cmp_ok( $result->return_code, '==', 0, "Uptime lower than 111222 days, and higher 2 seconds" );
+like ( $result->output, '/^OK: uptime is \d+ seconds/', "Output for uptime lower than 111222 days, and higher 2 seconds correct" );
+like ( $result->output, '/\|uptime=[0-9]+s;9609580800;19209571200;/', "Checking for performance output" );
+
+# Same as before, now the low warning should trigger
+$result = NPTest->testCmd(
+ "./check_uptime -w 111221d:111222d -c 1:222333d"
+ );
+cmp_ok( $result->return_code, '==', 1, "Uptime lower than 111221 days raises warning" );
+like ( $result->output, '/^WARNING: uptime is \d+ seconds/', "Output for uptime lower than 111221 days correct" );
+like ( $result->output, '/Exceeds lower warn threshold/', "Exceeds text correct" );
+like ( $result->output, '/\|uptime=[0-9]+s;9609580800;19209571200;/', "Checking for performance output" );
+
+# Same as before, now the low critical should trigger
+$result = NPTest->testCmd(
+ "./check_uptime -w 111221d:111222d -c 111220d:222333d"
+ );
+cmp_ok( $result->return_code, '==', 2, "Uptime lower than 111220 days raises critical" );
+like ( $result->output, '/^CRITICAL: uptime is \d+ seconds/', "Output for uptime lower than 111220 days correct" );
+like ( $result->output, '/Exceeds lower crit threshold/', "Exceeds text correct" );
+like ( $result->output, '/\|uptime=[0-9]+s;9609580800;19209571200;/', "Checking for performance output" );
+
+
+#
+# Range values using ":" without two parts ("a:b") is invalid
+# Strings without two parts are always considered as upper threshold
+#
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 2: -c 1:4"
+ );
+cmp_ok( $result->return_code, '==', 3, "Wrong parameter format raises unknown" );
+like ( $result->output, '/^Upper warning .* is not numeric/', "Output for wrong parameter format correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 2:3 -c 1:"
+ );
+cmp_ok( $result->return_code, '==', 3, "Wrong parameter format raises unknown" );
+like ( $result->output, '/^Upper critical .* is not numeric/', "Output for wrong parameter format correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w :3 -c 1:4"
+ );
+cmp_ok( $result->return_code, '==', 3, "Wrong parameter format raises unknown" );
+like ( $result->output, '/^Upper warning .* is not numeric/', "Output for wrong parameter format correct" );
+
+$result = NPTest->testCmd(
+ "./check_uptime -w 2:3 -c :4"
+ );
+cmp_ok( $result->return_code, '==', 3, "Wrong parameter format raises unknown" );
+like ( $result->output, '/^Upper critical .* is not numeric/', "Output for wrong parameter format correct" );
+
diff --git a/plugins-scripts/t/utils.t b/plugins-scripts/t/utils.t
index 9c2c5695..5c231791 100644
--- a/plugins-scripts/t/utils.t
+++ b/plugins-scripts/t/utils.t
@@ -10,6 +10,7 @@ use strict;
use Test::More;
use NPTest;
+use lib ".";
use lib "..";
use utils;
diff --git a/plugins/Makefile.am b/plugins/Makefile.am
index 0ddf9bd1..3fde54d6 100644
--- a/plugins/Makefile.am
+++ b/plugins/Makefile.am
@@ -38,7 +38,9 @@ check_tcp_programs = check_ftp check_imap check_nntp check_pop \
EXTRA_PROGRAMS = check_mysql check_radius check_pgsql check_snmp check_hpjd \
check_swap check_fping check_ldap check_game check_dig \
check_nagios check_by_ssh check_dns check_nt check_ide_smart \
- check_procs check_mysql_query check_apt check_dbi
+ check_procs check_mysql_query check_apt check_dbi check_curl
+
+SUBDIRS = picohttpparser
EXTRA_DIST = t tests
@@ -69,6 +71,9 @@ test-debug:
check_apt_LDADD = $(BASEOBJS)
check_cluster_LDADD = $(BASEOBJS)
+check_curl_CFLAGS = $(AM_CFLAGS) $(LIBCURLCFLAGS) $(URIPARSERCFLAGS) $(LIBCURLINCLUDE) $(URIPARSERINCLUDE) -Ipicohttpparser
+check_curl_CPPFLAGS = $(AM_CPPFLAGS) $(LIBCURLCFLAGS) $(URIPARSERCFLAGS) $(LIBCURLINCLUDE) $(URIPARSERINCLUDE) -Ipicohttpparser
+check_curl_LDADD = $(NETLIBS) $(LIBCURLLIBS) $(SSLOBJS) $(URIPARSERLIBS) picohttpparser/libpicohttpparser.a
check_dbi_LDADD = $(NETLIBS) $(DBILIBS)
check_dig_LDADD = $(NETLIBS)
check_disk_LDADD = $(BASEOBJS)
@@ -89,7 +94,7 @@ check_mysql_query_CFLAGS = $(AM_CFLAGS) $(MYSQLCFLAGS)
check_mysql_query_CPPFLAGS = $(AM_CPPFLAGS) $(MYSQLINCLUDE)
check_mysql_query_LDADD = $(NETLIBS) $(MYSQLLIBS)
check_nagios_LDADD = $(BASEOBJS)
-check_nt_LDADD = $(NETLIBS)
+check_nt_LDADD = $(NETLIBS)
check_ntp_LDADD = $(NETLIBS) $(MATHLIBS)
check_ntp_peer_LDADD = $(NETLIBS) $(MATHLIBS)
check_nwstat_LDADD = $(NETLIBS)
diff --git a/plugins/check_apt.c b/plugins/check_apt.c
index c90b3df7..d7be5750 100644
--- a/plugins/check_apt.c
+++ b/plugins/check_apt.c
@@ -66,12 +66,17 @@ char* construct_cmdline(upgrade_type u, const char *opts);
/* run an apt-get update */
int run_update(void);
/* run an apt-get upgrade */
-int run_upgrade(int *pkgcount, int *secpkgcount);
+int run_upgrade(int *pkgcount, int *secpkgcount, char ***pkglist, char ***secpkglist);
/* add another clause to a regexp */
char* add_to_regexp(char *expr, const char *next);
+/* extract package name from Inst line */
+char* pkg_name(char *line);
+/* string comparison function for qsort */
+int cmpstringp(const void *p1, const void *p2);
/* configuration variables */
static int verbose = 0; /* -v */
+static int list = 0; /* list packages available for upgrade */
static int do_update = 0; /* whether to call apt-get update */
static int only_critical = 0; /* whether to warn about non-critical updates */
static upgrade_type upgrade = UPGRADE; /* which type of upgrade to do */
@@ -81,13 +86,16 @@ static char *do_include = NULL; /* regexp to only include certain packages */
static char *do_exclude = NULL; /* regexp to only exclude certain packages */
static char *do_critical = NULL; /* regexp specifying critical packages */
static char *input_filename = NULL; /* input filename for testing */
+/* number of packages available for upgrade to return WARNING status */
+static int packages_warning = 1;
/* other global variables */
static int stderr_warning = 0; /* if a cmd issued output on stderr */
static int exec_warning = 0; /* if a cmd exited non-zero */
int main (int argc, char **argv) {
- int result=STATE_UNKNOWN, packages_available=0, sec_count=0;
+ int result=STATE_UNKNOWN, packages_available=0, sec_count=0, i=0;
+ char **packages_list=NULL, **secpackages_list=NULL;
/* Parse extra opts if any */
argv=np_extra_opts(&argc, argv, progname);
@@ -107,11 +115,11 @@ int main (int argc, char **argv) {
if(do_update) result = run_update();
/* apt-get upgrade */
- result = max_state(result, run_upgrade(&packages_available, &sec_count));
+ result = max_state(result, run_upgrade(&packages_available, &sec_count, &packages_list, &secpackages_list));
if(sec_count > 0){
result = max_state(result, STATE_CRITICAL);
- } else if(packages_available > 0 && only_critical == 0){
+ } else if(packages_available >= packages_warning && only_critical == 0){
result = max_state(result, STATE_WARNING);
} else if(result > STATE_UNKNOWN){
result = STATE_UNKNOWN;
@@ -130,6 +138,18 @@ int main (int argc, char **argv) {
sec_count
);
+ if(list) {
+ qsort(secpackages_list, sec_count, sizeof(char*), cmpstringp);
+ qsort(packages_list, packages_available-sec_count, sizeof(char*), cmpstringp);
+
+ for(i = 0; i < sec_count; i++)
+ printf("%s (security)\n", secpackages_list[i]);
+ if (only_critical == 0) {
+ for(i = 0; i < packages_available - sec_count; i++)
+ printf("%s\n", packages_list[i]);
+ }
+ }
+
return result;
}
@@ -146,16 +166,18 @@ int process_arguments (int argc, char **argv) {
{"upgrade", optional_argument, 0, 'U'},
{"no-upgrade", no_argument, 0, 'n'},
{"dist-upgrade", optional_argument, 0, 'd'},
+ {"list", no_argument, 0, 'l'},
{"include", required_argument, 0, 'i'},
{"exclude", required_argument, 0, 'e'},
{"critical", required_argument, 0, 'c'},
{"only-critical", no_argument, 0, 'o'},
{"input-file", required_argument, 0, INPUT_FILE_OPT},
+ {"packages-warning", required_argument, 0, 'w'},
{0, 0, 0, 0}
};
while(1) {
- c = getopt_long(argc, argv, "hVvt:u::U::d::ni:e:c:o", longopts, NULL);
+ c = getopt_long(argc, argv, "hVvt:u::U::d::nli:e:c:ow:", longopts, NULL);
if(c == -1 || c == EOF || c == 1) break;
@@ -196,6 +218,9 @@ int process_arguments (int argc, char **argv) {
if(update_opts==NULL) die(STATE_UNKNOWN, "strdup failed");
}
break;
+ case 'l':
+ list=1;
+ break;
case 'i':
do_include=add_to_regexp(do_include, optarg);
break;
@@ -211,6 +236,9 @@ int process_arguments (int argc, char **argv) {
case INPUT_FILE_OPT:
input_filename = optarg;
break;
+ case 'w':
+ packages_warning = atoi(optarg);
+ break;
default:
/* print short usage statement if args not parsable */
usage5();
@@ -222,7 +250,7 @@ int process_arguments (int argc, char **argv) {
/* run an apt-get upgrade */
-int run_upgrade(int *pkgcount, int *secpkgcount){
+int run_upgrade(int *pkgcount, int *secpkgcount, char ***pkglist, char ***secpkglist){
int i=0, result=STATE_UNKNOWN, regres=0, pc=0, spc=0;
struct output chld_out, chld_err;
regex_t ireg, ereg, sreg;
@@ -278,6 +306,11 @@ int run_upgrade(int *pkgcount, int *secpkgcount){
cmdline);
}
+ *pkglist=malloc(sizeof(char *) * chld_out.lines);
+ if(!pkglist) die(STATE_UNKNOWN, "malloc failed!\n");
+ *secpkglist=malloc(sizeof(char *) * chld_out.lines);
+ if(!secpkglist) die(STATE_UNKNOWN, "malloc failed!\n");
+
/* parse the output, which should only consist of lines like
*
* Inst package ....
@@ -302,6 +335,9 @@ int run_upgrade(int *pkgcount, int *secpkgcount){
if(regexec(&sreg, chld_out.line[i], 0, NULL, 0)==0){
spc++;
if(verbose) printf("*");
+ (*secpkglist)[spc-1] = pkg_name(chld_out.line[i]);
+ } else {
+ (*pkglist)[pc-spc-1] = pkg_name(chld_out.line[i]);
}
if(verbose){
printf("*%s\n", chld_out.line[i]);
@@ -368,6 +404,31 @@ int run_update(void){
return result;
}
+char* pkg_name(char *line){
+ char *start=NULL, *space=NULL, *pkg=NULL;
+ int len=0;
+
+ start = line + strlen(PKGINST_PREFIX);
+ len = strlen(start);
+
+ space = index(start, ' ');
+ if(space!=NULL){
+ len = space - start;
+ }
+
+ pkg=malloc(sizeof(char)*(len+1));
+ if(!pkg) die(STATE_UNKNOWN, "malloc failed!\n");
+
+ strncpy(pkg, start, len);
+ pkg[len]='\0';
+
+ return pkg;
+}
+
+int cmpstringp(const void *p1, const void *p2){
+ return strcmp(* (char * const *) p1, * (char * const *) p2);
+}
+
char* add_to_regexp(char *expr, const char *next){
char *re=NULL;
@@ -450,8 +511,11 @@ print_help (void)
printf (" %s\n", "-d, --dist-upgrade=OPTS");
printf (" %s\n", _("Perform a dist-upgrade instead of normal upgrade. Like with -U OPTS"));
printf (" %s\n", _("can be provided to override the default options."));
- printf (" %s\n", " -n, --no-upgrade");
+ printf (" %s\n", "-n, --no-upgrade");
printf (" %s\n", _("Do not run the upgrade. Probably not useful (without -u at least)."));
+ printf (" %s\n", "-l, --list");
+ printf (" %s\n", _("List packages available for upgrade. Packages are printed sorted by"));
+ printf (" %s\n", _("name with security packages listed first."));
printf (" %s\n", "-i, --include=REGEXP");
printf (" %s\n", _("Include only packages matching REGEXP. Can be specified multiple times"));
printf (" %s\n", _("the values will be combined together. Any packages matching this list"));
@@ -472,7 +536,10 @@ print_help (void)
printf (" %s\n", "-o, --only-critical");
printf (" %s\n", _("Only warn about upgrades matching the critical list. The total number"));
printf (" %s\n", _("of upgrades will be printed, but any non-critical upgrades will not cause"));
- printf (" %s\n\n", _("the plugin to return WARNING status."));
+ printf (" %s\n", _("the plugin to return WARNING status."));
+ printf (" %s\n", "-w, --packages-warning");
+ printf (" %s\n", _("Minumum number of packages available for upgrade to return WARNING status."));
+ printf (" %s\n\n", _("Default is 1 package."));
printf ("%s\n\n", _("The following options require root privileges and should be used with care:"));
printf (" %s\n", "-u, --update=OPTS");
@@ -490,5 +557,5 @@ void
print_usage(void)
{
printf ("%s\n", _("Usage:"));
- printf ("%s [[-d|-u|-U]opts] [-n] [-t timeout]\n", progname);
+ printf ("%s [[-d|-u|-U]opts] [-n] [-l] [-t timeout] [-w packages-warning]\n", progname);
}
diff --git a/plugins/check_by_ssh.c b/plugins/check_by_ssh.c
index 13d8bc3b..485bf3be 100644
--- a/plugins/check_by_ssh.c
+++ b/plugins/check_by_ssh.c
@@ -230,7 +230,6 @@ process_arguments (int argc, char **argv)
timeout_interval = atoi (optarg);
break;
case 'H': /* host */
- host_or_die(optarg);
hostname = optarg;
break;
case 'p': /* port number */
@@ -329,7 +328,6 @@ process_arguments (int argc, char **argv)
if (c <= argc) {
die (STATE_UNKNOWN, _("%s: You must provide a host name\n"), progname);
}
- host_or_die(argv[c]);
hostname = argv[c++];
}
diff --git a/plugins/check_cluster.c b/plugins/check_cluster.c
index b86e501d..e1ede9f7 100644
--- a/plugins/check_cluster.c
+++ b/plugins/check_cluster.c
@@ -143,6 +143,7 @@ int main(int argc, char **argv){
int process_arguments(int argc, char **argv){
int c;
+ char *ptr;
int option=0;
static struct option longopts[]={
{"data", required_argument,0,'d'},
@@ -188,6 +189,15 @@ int process_arguments(int argc, char **argv){
case 'd': /* data values */
data_vals=(char *)strdup(optarg);
+ /* validate data */
+ for (ptr=data_vals;ptr!=NULL;ptr+=2){
+ if (ptr[0]<'0' || ptr[0]>'3')
+ return ERROR;
+ if (ptr[1]=='\0')
+ break;
+ if (ptr[1]!=',')
+ return ERROR;
+ }
break;
case 'l': /* text label */
diff --git a/plugins/check_curl.c b/plugins/check_curl.c
new file mode 100644
index 00000000..14cc8463
--- /dev/null
+++ b/plugins/check_curl.c
@@ -0,0 +1,2467 @@
+/*****************************************************************************
+*
+* Monitoring check_curl plugin
+*
+* License: GPL
+* Copyright (c) 1999-2019 Monitoring Plugins Development Team
+*
+* Description:
+*
+* This file contains the check_curl plugin
+*
+* This plugin tests the HTTP service on the specified host. It can test
+* normal (http) and secure (https) servers, follow redirects, search for
+* strings and regular expressions, check connection times, and report on
+* certificate expiration times.
+*
+* This plugin uses functions from the curl library, see
+* http://curl.haxx.se
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation, either version 3 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*
+*
+*****************************************************************************/
+const char *progname = "check_curl";
+
+const char *copyright = "2006-2019";
+const char *email = "devel@monitoring-plugins.org";
+
+#include <ctype.h>
+
+#include "common.h"
+#include "utils.h"
+
+#ifndef LIBCURL_PROTOCOL_HTTP
+#error libcurl compiled without HTTP support, compiling check_curl plugin does not makes a lot of sense
+#endif
+
+#include "curl/curl.h"
+#include "curl/easy.h"
+
+#include "picohttpparser.h"
+
+#include "uriparser/Uri.h"
+
+#include <arpa/inet.h>
+
+#if defined(HAVE_SSL) && defined(USE_OPENSSL)
+#include <openssl/opensslv.h>
+#endif
+
+#include <netdb.h>
+
+#define MAKE_LIBCURL_VERSION(major, minor, patch) ((major)*0x10000 + (minor)*0x100 + (patch))
+
+#define DEFAULT_BUFFER_SIZE 2048
+#define DEFAULT_SERVER_URL "/"
+#define HTTP_EXPECT "HTTP/"
+#define DEFAULT_MAX_REDIRS 15
+#define INET_ADDR_MAX_SIZE INET6_ADDRSTRLEN
+enum {
+ MAX_IPV4_HOSTLENGTH = 255,
+ HTTP_PORT = 80,
+ HTTPS_PORT = 443,
+ MAX_PORT = 65535
+};
+
+enum {
+ STICKY_NONE = 0,
+ STICKY_HOST = 1,
+ STICKY_PORT = 2
+};
+
+enum {
+ FOLLOW_HTTP_CURL = 0,
+ FOLLOW_LIBCURL = 1
+};
+
+/* for buffers for header and body */
+typedef struct {
+ char *buf;
+ size_t buflen;
+ size_t bufsize;
+} curlhelp_write_curlbuf;
+
+/* for buffering the data sent in PUT */
+typedef struct {
+ char *buf;
+ size_t buflen;
+ off_t pos;
+} curlhelp_read_curlbuf;
+
+/* for parsing the HTTP status line */
+typedef struct {
+ int http_major; /* major version of the protocol, always 1 (HTTP/0.9
+ * never reached the big internet most likely) */
+ int http_minor; /* minor version of the protocol, usually 0 or 1 */
+ int http_code; /* HTTP return code as in RFC 2145 */
+ int http_subcode; /* Microsoft IIS extension, HTTP subcodes, see
+ * http://support.microsoft.com/kb/318380/en-us */
+ const char *msg; /* the human readable message */
+ char *first_line; /* a copy of the first line */
+} curlhelp_statusline;
+
+/* to know the underlying SSL library used by libcurl */
+typedef enum curlhelp_ssl_library {
+ CURLHELP_SSL_LIBRARY_UNKNOWN,
+ CURLHELP_SSL_LIBRARY_OPENSSL,
+ CURLHELP_SSL_LIBRARY_LIBRESSL,
+ CURLHELP_SSL_LIBRARY_GNUTLS,
+ CURLHELP_SSL_LIBRARY_NSS
+} curlhelp_ssl_library;
+
+enum {
+ REGS = 2,
+ MAX_RE_SIZE = 1024
+};
+#include "regex.h"
+regex_t preg;
+regmatch_t pmatch[REGS];
+char regexp[MAX_RE_SIZE];
+int cflags = REG_NOSUB | REG_EXTENDED | REG_NEWLINE;
+int errcode;
+int invert_regex = 0;
+
+char *server_address = NULL;
+char *host_name = NULL;
+char *server_url = 0;
+char server_ip[DEFAULT_BUFFER_SIZE];
+struct curl_slist *server_ips = NULL;
+int specify_port = FALSE;
+unsigned short server_port = HTTP_PORT;
+unsigned short virtual_port = 0;
+int host_name_length;
+char output_header_search[30] = "";
+char output_string_search[30] = "";
+char *warning_thresholds = NULL;
+char *critical_thresholds = NULL;
+int days_till_exp_warn, days_till_exp_crit;
+thresholds *thlds;
+char user_agent[DEFAULT_BUFFER_SIZE];
+int verbose = 0;
+int show_extended_perfdata = FALSE;
+int show_body = FALSE;
+int min_page_len = 0;
+int max_page_len = 0;
+int redir_depth = 0;
+int max_depth = DEFAULT_MAX_REDIRS;
+char *http_method = NULL;
+char *http_post_data = NULL;
+char *http_content_type = NULL;
+CURL *curl;
+struct curl_slist *header_list = NULL;
+curlhelp_write_curlbuf body_buf;
+curlhelp_write_curlbuf header_buf;
+curlhelp_statusline status_line;
+curlhelp_read_curlbuf put_buf;
+char http_header[DEFAULT_BUFFER_SIZE];
+long code;
+long socket_timeout = DEFAULT_SOCKET_TIMEOUT;
+double total_time;
+double time_connect;
+double time_appconnect;
+double time_headers;
+double time_firstbyte;
+char errbuf[CURL_ERROR_SIZE+1];
+CURLcode res;
+char url[DEFAULT_BUFFER_SIZE];
+char msg[DEFAULT_BUFFER_SIZE];
+char perfstring[DEFAULT_BUFFER_SIZE];
+char header_expect[MAX_INPUT_BUFFER] = "";
+char string_expect[MAX_INPUT_BUFFER] = "";
+char server_expect[MAX_INPUT_BUFFER] = HTTP_EXPECT;
+int server_expect_yn = 0;
+char user_auth[MAX_INPUT_BUFFER] = "";
+char proxy_auth[MAX_INPUT_BUFFER] = "";
+char **http_opt_headers;
+int http_opt_headers_count = 0;
+int display_html = FALSE;
+int onredirect = STATE_OK;
+int followmethod = FOLLOW_HTTP_CURL;
+int followsticky = STICKY_NONE;
+int use_ssl = FALSE;
+int use_sni = TRUE;
+int check_cert = FALSE;
+typedef union {
+ struct curl_slist* to_info;
+ struct curl_certinfo* to_certinfo;
+} cert_ptr_union;
+cert_ptr_union cert_ptr;
+int ssl_version = CURL_SSLVERSION_DEFAULT;
+char *client_cert = NULL;
+char *client_privkey = NULL;
+char *ca_cert = NULL;
+int verify_peer_and_host = FALSE;
+int is_openssl_callback = FALSE;
+#if defined(HAVE_SSL) && defined(USE_OPENSSL)
+X509 *cert = NULL;
+#endif /* defined(HAVE_SSL) && defined(USE_OPENSSL) */
+int no_body = FALSE;
+int maximum_age = -1;
+int address_family = AF_UNSPEC;
+curlhelp_ssl_library ssl_library = CURLHELP_SSL_LIBRARY_UNKNOWN;
+int curl_http_version = CURL_HTTP_VERSION_NONE;
+int automatic_decompression = FALSE;
+
+int process_arguments (int, char**);
+void handle_curl_option_return_code (CURLcode res, const char* option);
+int check_http (void);
+void redir (curlhelp_write_curlbuf*);
+char *perfd_time (double microsec);
+char *perfd_time_connect (double microsec);
+char *perfd_time_ssl (double microsec);
+char *perfd_time_firstbyte (double microsec);
+char *perfd_time_headers (double microsec);
+char *perfd_time_transfer (double microsec);
+char *perfd_size (int page_len);
+void print_help (void);
+void print_usage (void);
+void print_curl_version (void);
+int curlhelp_initwritebuffer (curlhelp_write_curlbuf*);
+int curlhelp_buffer_write_callback (void*, size_t , size_t , void*);
+void curlhelp_freewritebuffer (curlhelp_write_curlbuf*);
+int curlhelp_initreadbuffer (curlhelp_read_curlbuf *, const char *, size_t);
+int curlhelp_buffer_read_callback (void *, size_t , size_t , void *);
+void curlhelp_freereadbuffer (curlhelp_read_curlbuf *);
+curlhelp_ssl_library curlhelp_get_ssl_library (CURL*);
+const char* curlhelp_get_ssl_library_string (curlhelp_ssl_library);
+int net_noopenssl_check_certificate (cert_ptr_union*, int, int);
+
+int curlhelp_parse_statusline (const char*, curlhelp_statusline *);
+void curlhelp_free_statusline (curlhelp_statusline *);
+char *get_header_value (const struct phr_header* headers, const size_t nof_headers, const char* header);
+int check_document_dates (const curlhelp_write_curlbuf *, char (*msg)[DEFAULT_BUFFER_SIZE]);
+int get_content_length (const curlhelp_write_curlbuf* header_buf, const curlhelp_write_curlbuf* body_buf);
+
+#if defined(HAVE_SSL) && defined(USE_OPENSSL)
+int np_net_ssl_check_certificate(X509 *certificate, int days_till_exp_warn, int days_till_exp_crit);
+#endif /* defined(HAVE_SSL) && defined(USE_OPENSSL) */
+
+void remove_newlines (char *);
+void test_file (char *);
+
+int
+main (int argc, char **argv)
+{
+ int result = STATE_UNKNOWN;
+
+ setlocale (LC_ALL, "");
+ bindtextdomain (PACKAGE, LOCALEDIR);
+ textdomain (PACKAGE);
+
+ /* Parse extra opts if any */
+ argv = np_extra_opts (&argc, argv, progname);
+
+ /* set defaults */
+ snprintf( user_agent, DEFAULT_BUFFER_SIZE, "%s/v%s (monitoring-plugins %s, %s)",
+ progname, NP_VERSION, VERSION, curl_version());
+
+ /* parse arguments */
+ if (process_arguments (argc, argv) == ERROR)
+ usage4 (_("Could not parse arguments"));
+
+ if (display_html == TRUE)
+ printf ("<A HREF=\"%s://%s:%d%s\" target=\"_blank\">",
+ use_ssl ? "https" : "http",
+ host_name ? host_name : server_address,
+ virtual_port ? virtual_port : server_port,
+ server_url);
+
+ result = check_http ();
+ return result;
+}
+
+#ifdef HAVE_SSL
+#ifdef USE_OPENSSL
+
+int verify_callback(int preverify_ok, X509_STORE_CTX *x509_ctx)
+{
+ /* TODO: we get all certificates of the chain, so which ones
+ * should we test?
+ * TODO: is the last certificate always the server certificate?
+ */
+ cert = X509_STORE_CTX_get_current_cert(x509_ctx);
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L
+ X509_up_ref(cert);
+#endif
+ if (verbose>=2) {
+ puts("* SSL verify callback with certificate:");
+ X509_NAME *subject, *issuer;
+ printf("* issuer:\n");
+ issuer = X509_get_issuer_name( cert );
+ X509_NAME_print_ex_fp(stdout, issuer, 5, XN_FLAG_MULTILINE);
+ printf("* curl verify_callback:\n* subject:\n");
+ subject = X509_get_subject_name( cert );
+ X509_NAME_print_ex_fp(stdout, subject, 5, XN_FLAG_MULTILINE);
+ puts("");
+ }
+ return 1;
+}
+
+CURLcode sslctxfun(CURL *curl, SSL_CTX *sslctx, void *parm)
+{
+ SSL_CTX_set_verify(sslctx, SSL_VERIFY_PEER, verify_callback);
+
+ return CURLE_OK;
+}
+
+#endif /* USE_OPENSSL */
+#endif /* HAVE_SSL */
+
+/* returns a string "HTTP/1.x" or "HTTP/2" */
+static char *string_statuscode (int major, int minor)
+{
+ static char buf[10];
+
+ switch (major) {
+ case 1:
+ snprintf (buf, sizeof (buf), "HTTP/%d.%d", major, minor);
+ break;
+ case 2:
+ case 3:
+ snprintf (buf, sizeof (buf), "HTTP/%d", major);
+ break;
+ default:
+ /* assuming here HTTP/N with N>=4 */
+ snprintf (buf, sizeof (buf), "HTTP/%d", major);
+ break;
+ }
+
+ return buf;
+}
+
+/* Checks if the server 'reply' is one of the expected 'statuscodes' */
+static int
+expected_statuscode (const char *reply, const char *statuscodes)
+{
+ char *expected, *code;
+ int result = 0;
+
+ if ((expected = strdup (statuscodes)) == NULL)
+ die (STATE_UNKNOWN, _("HTTP UNKNOWN - Memory allocation error\n"));
+
+ for (code = strtok (expected, ","); code != NULL; code = strtok (NULL, ","))
+ if (strstr (reply, code) != NULL) {
+ result = 1;
+ break;
+ }
+
+ free (expected);
+ return result;
+}
+
+void
+handle_curl_option_return_code (CURLcode res, const char* option)
+{
+ if (res != CURLE_OK) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Error while setting cURL option '%s': cURL returned %d - %s"),
+ option, res, curl_easy_strerror(res));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+}
+
+int
+lookup_host (const char *host, char *buf, size_t buflen)
+{
+ struct addrinfo hints, *res, *result;
+ int errcode;
+ void *ptr;
+
+ memset (&hints, 0, sizeof (hints));
+ hints.ai_family = address_family;
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_flags |= AI_CANONNAME;
+
+ errcode = getaddrinfo (host, NULL, &hints, &result);
+ if (errcode != 0)
+ return errcode;
+
+ res = result;
+
+ while (res) {
+ inet_ntop (res->ai_family, res->ai_addr->sa_data, buf, buflen);
+ switch (res->ai_family) {
+ case AF_INET:
+ ptr = &((struct sockaddr_in *) res->ai_addr)->sin_addr;
+ break;
+ case AF_INET6:
+ ptr = &((struct sockaddr_in6 *) res->ai_addr)->sin6_addr;
+ break;
+ }
+ inet_ntop (res->ai_family, ptr, buf, buflen);
+ if (verbose >= 1)
+ printf ("* getaddrinfo IPv%d address: %s\n",
+ res->ai_family == PF_INET6 ? 6 : 4, buf);
+ res = res->ai_next;
+ }
+
+ freeaddrinfo(result);
+
+ return 0;
+}
+
+int
+check_http (void)
+{
+ int result = STATE_OK;
+ int page_len = 0;
+ int i;
+ char *force_host_header = NULL;
+ struct curl_slist *host = NULL;
+ char addrstr[100];
+ char dnscache[DEFAULT_BUFFER_SIZE];
+
+ /* initialize curl */
+ if (curl_global_init (CURL_GLOBAL_DEFAULT) != CURLE_OK)
+ die (STATE_UNKNOWN, "HTTP UNKNOWN - curl_global_init failed\n");
+
+ if ((curl = curl_easy_init()) == NULL)
+ die (STATE_UNKNOWN, "HTTP UNKNOWN - curl_easy_init failed\n");
+
+ if (verbose >= 1)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_VERBOSE, TRUE), "CURLOPT_VERBOSE");
+
+ /* print everything on stdout like check_http would do */
+ handle_curl_option_return_code (curl_easy_setopt(curl, CURLOPT_STDERR, stdout), "CURLOPT_STDERR");
+
+ if (automatic_decompression)
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 21, 6)
+ handle_curl_option_return_code (curl_easy_setopt(curl, CURLOPT_ACCEPT_ENCODING, ""), "CURLOPT_ACCEPT_ENCODING");
+#else
+ handle_curl_option_return_code (curl_easy_setopt(curl, CURLOPT_ENCODING, ""), "CURLOPT_ENCODING");
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 21, 6) */
+
+ /* initialize buffer for body of the answer */
+ if (curlhelp_initwritebuffer(&body_buf) < 0)
+ die (STATE_UNKNOWN, "HTTP CRITICAL - out of memory allocating buffer for body\n");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_WRITEFUNCTION, (curl_write_callback)curlhelp_buffer_write_callback), "CURLOPT_WRITEFUNCTION");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_WRITEDATA, (void *)&body_buf), "CURLOPT_WRITEDATA");
+
+ /* initialize buffer for header of the answer */
+ if (curlhelp_initwritebuffer( &header_buf ) < 0)
+ die (STATE_UNKNOWN, "HTTP CRITICAL - out of memory allocating buffer for header\n" );
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_HEADERFUNCTION, (curl_write_callback)curlhelp_buffer_write_callback), "CURLOPT_HEADERFUNCTION");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_WRITEHEADER, (void *)&header_buf), "CURLOPT_WRITEHEADER");
+
+ /* set the error buffer */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_ERRORBUFFER, errbuf), "CURLOPT_ERRORBUFFER");
+
+ /* set timeouts */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CONNECTTIMEOUT, socket_timeout), "CURLOPT_CONNECTTIMEOUT");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_TIMEOUT, socket_timeout), "CURLOPT_TIMEOUT");
+
+ // fill dns resolve cache to make curl connect to the given server_address instead of the host_name, only required for ssl, because we use the host_name later on to make SNI happy
+ if(use_ssl && host_name != NULL) {
+ if ( (res=lookup_host (server_address, addrstr, 100)) != 0) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Unable to lookup IP address for '%s': getaddrinfo returned %d - %s"),
+ server_address, res, gai_strerror (res));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+ snprintf (dnscache, DEFAULT_BUFFER_SIZE, "%s:%d:%s", host_name, server_port, addrstr);
+ host = curl_slist_append(NULL, dnscache);
+ curl_easy_setopt(curl, CURLOPT_RESOLVE, host);
+ if (verbose>=1)
+ printf ("* curl CURLOPT_RESOLVE: %s\n", dnscache);
+ }
+
+ /* compose URL: use the address we want to connect to, set Host: header later */
+ snprintf (url, DEFAULT_BUFFER_SIZE, "%s://%s:%d%s",
+ use_ssl ? "https" : "http",
+ use_ssl & host_name != NULL ? host_name : server_address,
+ server_port,
+ server_url
+ );
+
+ if (verbose>=1)
+ printf ("* curl CURLOPT_URL: %s\n", url);
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_URL, url), "CURLOPT_URL");
+
+ /* extract proxy information for legacy proxy https requests */
+ if (!strcmp(http_method, "CONNECT") || strstr(server_url, "http") == server_url) {
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_PROXY, server_address), "CURLOPT_PROXY");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_PROXYPORT, (long)server_port), "CURLOPT_PROXYPORT");
+ if (verbose>=2)
+ printf ("* curl CURLOPT_PROXY: %s:%d\n", server_address, server_port);
+ http_method = "GET";
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_URL, server_url), "CURLOPT_URL");
+ }
+
+ /* disable body for HEAD request */
+ if (http_method && !strcmp (http_method, "HEAD" )) {
+ no_body = TRUE;
+ }
+
+ /* set HTTP protocol version */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_HTTP_VERSION, curl_http_version), "CURLOPT_HTTP_VERSION");
+
+ /* set HTTP method */
+ if (http_method) {
+ if (!strcmp(http_method, "POST"))
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_POST, 1), "CURLOPT_POST");
+ else if (!strcmp(http_method, "PUT"))
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_UPLOAD, 1), "CURLOPT_UPLOAD");
+ else
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CUSTOMREQUEST, http_method), "CURLOPT_CUSTOMREQUEST");
+ }
+
+ /* check if Host header is explicitly set in options */
+ if (http_opt_headers_count) {
+ for (i = 0; i < http_opt_headers_count ; i++) {
+ if (strncmp(http_opt_headers[i], "Host:", 5) == 0) {
+ force_host_header = http_opt_headers[i];
+ }
+ }
+ }
+
+ /* set hostname (virtual hosts), not needed if CURLOPT_CONNECT_TO is used, but left in anyway */
+ if(host_name != NULL && force_host_header == NULL) {
+ if((virtual_port != HTTP_PORT && !use_ssl) || (virtual_port != HTTPS_PORT && use_ssl)) {
+ snprintf(http_header, DEFAULT_BUFFER_SIZE, "Host: %s:%d", host_name, virtual_port);
+ } else {
+ snprintf(http_header, DEFAULT_BUFFER_SIZE, "Host: %s", host_name);
+ }
+ header_list = curl_slist_append (header_list, http_header);
+ }
+
+ /* always close connection, be nice to servers */
+ snprintf (http_header, DEFAULT_BUFFER_SIZE, "Connection: close");
+ header_list = curl_slist_append (header_list, http_header);
+
+ /* attach additional headers supplied by the user */
+ /* optionally send any other header tag */
+ if (http_opt_headers_count) {
+ for (i = 0; i < http_opt_headers_count ; i++) {
+ header_list = curl_slist_append (header_list, http_opt_headers[i]);
+ }
+ /* This cannot be free'd here because a redirection will then try to access this and segfault */
+ /* Covered in a testcase in tests/check_http.t */
+ /* free(http_opt_headers); */
+ }
+
+ /* set HTTP headers */
+ handle_curl_option_return_code (curl_easy_setopt( curl, CURLOPT_HTTPHEADER, header_list ), "CURLOPT_HTTPHEADER");
+
+#ifdef LIBCURL_FEATURE_SSL
+
+ /* set SSL version, warn about unsecure or unsupported versions */
+ if (use_ssl) {
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_SSLVERSION, ssl_version), "CURLOPT_SSLVERSION");
+ }
+
+ /* client certificate and key to present to server (SSL) */
+ if (client_cert)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_SSLCERT, client_cert), "CURLOPT_SSLCERT");
+ if (client_privkey)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_SSLKEY, client_privkey), "CURLOPT_SSLKEY");
+ if (ca_cert) {
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CAINFO, ca_cert), "CURLOPT_CAINFO");
+ }
+ if (ca_cert || verify_peer_and_host) {
+ /* per default if we have a CA verify both the peer and the
+ * hostname in the certificate, can be switched off later */
+ handle_curl_option_return_code (curl_easy_setopt( curl, CURLOPT_SSL_VERIFYPEER, 1), "CURLOPT_SSL_VERIFYPEER");
+ handle_curl_option_return_code (curl_easy_setopt( curl, CURLOPT_SSL_VERIFYHOST, 2), "CURLOPT_SSL_VERIFYHOST");
+ } else {
+ /* backward-compatible behaviour, be tolerant in checks
+ * TODO: depending on more options have aspects we want
+ * to be less tolerant about ssl verfications
+ */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_SSL_VERIFYPEER, 0), "CURLOPT_SSL_VERIFYPEER");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_SSL_VERIFYHOST, 0), "CURLOPT_SSL_VERIFYHOST");
+ }
+
+ /* detect SSL library used by libcurl */
+ ssl_library = curlhelp_get_ssl_library (curl);
+
+ /* try hard to get a stack of certificates to verify against */
+ if (check_cert) {
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 19, 1)
+ /* inform curl to report back certificates */
+ switch (ssl_library) {
+ case CURLHELP_SSL_LIBRARY_OPENSSL:
+ case CURLHELP_SSL_LIBRARY_LIBRESSL:
+ /* set callback to extract certificate with OpenSSL context function (works with
+ * OpenSSL-style libraries only!) */
+#ifdef USE_OPENSSL
+ /* libcurl and monitoring plugins built with OpenSSL, good */
+ handle_curl_option_return_code (curl_easy_setopt(curl, CURLOPT_SSL_CTX_FUNCTION, sslctxfun), "CURLOPT_SSL_CTX_FUNCTION");
+ is_openssl_callback = TRUE;
+#else /* USE_OPENSSL */
+#endif /* USE_OPENSSL */
+ /* libcurl is built with OpenSSL, monitoring plugins, so falling
+ * back to manually extracting certificate information */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CERTINFO, 1L), "CURLOPT_CERTINFO");
+ break;
+
+ case CURLHELP_SSL_LIBRARY_NSS:
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0)
+ /* NSS: support for CERTINFO is implemented since 7.34.0 */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CERTINFO, 1L), "CURLOPT_CERTINFO");
+#else /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0) */
+ die (STATE_CRITICAL, "HTTP CRITICAL - Cannot retrieve certificates (libcurl linked with SSL library '%s' is too old)\n", curlhelp_get_ssl_library_string (ssl_library));
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0) */
+ break;
+
+ case CURLHELP_SSL_LIBRARY_GNUTLS:
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 42, 0)
+ /* GnuTLS: support for CERTINFO is implemented since 7.42.0 */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_CERTINFO, 1L), "CURLOPT_CERTINFO");
+#else /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 42, 0) */
+ die (STATE_CRITICAL, "HTTP CRITICAL - Cannot retrieve certificates (libcurl linked with SSL library '%s' is too old)\n", curlhelp_get_ssl_library_string (ssl_library));
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 42, 0) */
+ break;
+
+ case CURLHELP_SSL_LIBRARY_UNKNOWN:
+ default:
+ die (STATE_CRITICAL, "HTTP CRITICAL - Cannot retrieve certificates (unknown SSL library '%s', must implement first)\n", curlhelp_get_ssl_library_string (ssl_library));
+ break;
+ }
+#else /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 19, 1) */
+ /* old libcurl, our only hope is OpenSSL, otherwise we are out of luck */
+ if (ssl_library == CURLHELP_SSL_LIBRARY_OPENSSL || ssl_library == CURLHELP_SSL_LIBRARY_LIBRESSL)
+ handle_curl_option_return_code (curl_easy_setopt(curl, CURLOPT_SSL_CTX_FUNCTION, sslctxfun), "CURLOPT_SSL_CTX_FUNCTION");
+ else
+ die (STATE_CRITICAL, "HTTP CRITICAL - Cannot retrieve certificates (no CURLOPT_SSL_CTX_FUNCTION, no OpenSSL library or libcurl too old and has no CURLOPT_CERTINFO)\n");
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 19, 1) */
+ }
+
+#endif /* LIBCURL_FEATURE_SSL */
+
+ /* set default or user-given user agent identification */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_USERAGENT, user_agent), "CURLOPT_USERAGENT");
+
+ /* proxy-authentication */
+ if (strcmp(proxy_auth, ""))
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_PROXYUSERPWD, proxy_auth), "CURLOPT_PROXYUSERPWD");
+
+ /* authentication */
+ if (strcmp(user_auth, ""))
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_USERPWD, user_auth), "CURLOPT_USERPWD");
+
+ /* TODO: parameter auth method, bitfield of following methods:
+ * CURLAUTH_BASIC (default)
+ * CURLAUTH_DIGEST
+ * CURLAUTH_DIGEST_IE
+ * CURLAUTH_NEGOTIATE
+ * CURLAUTH_NTLM
+ * CURLAUTH_NTLM_WB
+ *
+ * convenience tokens for typical sets of methods:
+ * CURLAUTH_ANYSAFE: most secure, without BASIC
+ * or CURLAUTH_ANY: most secure, even BASIC if necessary
+ *
+ * handle_curl_option_return_code (curl_easy_setopt( curl, CURLOPT_HTTPAUTH, (long)CURLAUTH_DIGEST ), "CURLOPT_HTTPAUTH");
+ */
+
+ /* handle redirections */
+ if (onredirect == STATE_DEPENDENT) {
+ if( followmethod == FOLLOW_LIBCURL ) {
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_FOLLOWLOCATION, 1), "CURLOPT_FOLLOWLOCATION");
+
+ /* default -1 is infinite, not good, could lead to zombie plugins!
+ Setting it to one bigger than maximal limit to handle errors nicely below
+ */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_MAXREDIRS, max_depth+1), "CURLOPT_MAXREDIRS");
+
+ /* for now allow only http and https (we are a http(s) check plugin in the end) */
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 19, 4)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_REDIR_PROTOCOLS, CURLPROTO_HTTP | CURLPROTO_HTTPS), "CURLOPT_REDIRECT_PROTOCOLS");
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 19, 4) */
+
+ /* TODO: handle the following aspects of redirection, make them
+ * command line options too later:
+ CURLOPT_POSTREDIR: method switch
+ CURLINFO_REDIRECT_URL: custom redirect option
+ CURLOPT_REDIRECT_PROTOCOLS: allow people to step outside safe protocols
+ CURLINFO_REDIRECT_COUNT: get the number of redirects, print it, maybe a range option here is nice like for expected page size?
+ */
+ } else {
+ /* old style redirection is handled below */
+ }
+ }
+
+ /* no-body */
+ if (no_body)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_NOBODY, 1), "CURLOPT_NOBODY");
+
+ /* IPv4 or IPv6 forced DNS resolution */
+ if (address_family == AF_UNSPEC)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_WHATEVER), "CURLOPT_IPRESOLVE(CURL_IPRESOLVE_WHATEVER)");
+ else if (address_family == AF_INET)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4), "CURLOPT_IPRESOLVE(CURL_IPRESOLVE_V4)");
+#if defined (USE_IPV6) && defined (LIBCURL_FEATURE_IPV6)
+ else if (address_family == AF_INET6)
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V6), "CURLOPT_IPRESOLVE(CURL_IPRESOLVE_V6)");
+#endif
+
+ /* either send http POST data (any data, not only POST)*/
+ if (!strcmp(http_method, "POST") ||!strcmp(http_method, "PUT")) {
+ /* set content of payload for POST and PUT */
+ if (http_content_type) {
+ snprintf (http_header, DEFAULT_BUFFER_SIZE, "Content-Type: %s", http_content_type);
+ header_list = curl_slist_append (header_list, http_header);
+ }
+ /* NULL indicates "HTTP Continue" in libcurl, provide an empty string
+ * in case of no POST/PUT data */
+ if (!http_post_data)
+ http_post_data = "";
+ if (!strcmp(http_method, "POST")) {
+ /* POST method, set payload with CURLOPT_POSTFIELDS */
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_POSTFIELDS, http_post_data), "CURLOPT_POSTFIELDS");
+ } else if (!strcmp(http_method, "PUT")) {
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_READFUNCTION, (curl_read_callback)curlhelp_buffer_read_callback), "CURLOPT_READFUNCTION");
+ curlhelp_initreadbuffer (&put_buf, http_post_data, strlen (http_post_data));
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_READDATA, (void *)&put_buf), "CURLOPT_READDATA");
+ handle_curl_option_return_code (curl_easy_setopt (curl, CURLOPT_INFILESIZE, (curl_off_t)strlen (http_post_data)), "CURLOPT_INFILESIZE");
+ }
+ }
+
+ /* do the request */
+ res = curl_easy_perform(curl);
+
+ if (verbose>=2 && http_post_data)
+ printf ("**** REQUEST CONTENT ****\n%s\n", http_post_data);
+
+ /* free header and server IP resolve lists, we don't need it anymore */
+ curl_slist_free_all (header_list); header_list = NULL;
+ curl_slist_free_all (server_ips); server_ips = NULL;
+
+ /* Curl errors, result in critical Nagios state */
+ if (res != CURLE_OK) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Invalid HTTP response received from host on port %d: cURL returned %d - %s"),
+ server_port, res, errbuf[0] ? errbuf : curl_easy_strerror(res));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+
+ /* certificate checks */
+#ifdef LIBCURL_FEATURE_SSL
+ if (use_ssl == TRUE) {
+ if (check_cert == TRUE) {
+ if (is_openssl_callback) {
+#ifdef USE_OPENSSL
+ /* check certificate with OpenSSL functions, curl has been built against OpenSSL
+ * and we actually have OpenSSL in the monitoring tools
+ */
+ result = np_net_ssl_check_certificate(cert, days_till_exp_warn, days_till_exp_crit);
+ return result;
+#else /* USE_OPENSSL */
+ die (STATE_CRITICAL, "HTTP CRITICAL - Cannot retrieve certificates - OpenSSL callback used and not linked against OpenSSL\n");
+#endif /* USE_OPENSSL */
+ } else {
+ int i;
+ struct curl_slist *slist;
+
+ cert_ptr.to_info = NULL;
+ res = curl_easy_getinfo (curl, CURLINFO_CERTINFO, &cert_ptr.to_info);
+ if (!res && cert_ptr.to_info) {
+#ifdef USE_OPENSSL
+ /* We have no OpenSSL in libcurl, but we can use OpenSSL for X509 cert parsing
+ * We only check the first certificate and assume it's the one of the server
+ */
+ const char* raw_cert = NULL;
+ for (i = 0; i < cert_ptr.to_certinfo->num_of_certs; i++) {
+ for (slist = cert_ptr.to_certinfo->certinfo[i]; slist; slist = slist->next) {
+ if (verbose >= 2)
+ printf ("%d ** %s\n", i, slist->data);
+ if (strncmp (slist->data, "Cert:", 5) == 0) {
+ raw_cert = &slist->data[5];
+ goto GOT_FIRST_CERT;
+ }
+ }
+ }
+GOT_FIRST_CERT:
+ if (!raw_cert) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Cannot retrieve certificates from CERTINFO information - certificate data was empty"));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+ BIO* cert_BIO = BIO_new (BIO_s_mem());
+ BIO_write (cert_BIO, raw_cert, strlen(raw_cert));
+ cert = PEM_read_bio_X509 (cert_BIO, NULL, NULL, NULL);
+ if (!cert) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Cannot read certificate from CERTINFO information - BIO error"));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+ BIO_free (cert_BIO);
+ result = np_net_ssl_check_certificate(cert, days_till_exp_warn, days_till_exp_crit);
+ return result;
+#else /* USE_OPENSSL */
+ /* We assume we don't have OpenSSL and np_net_ssl_check_certificate at our disposal,
+ * so we use the libcurl CURLINFO data
+ */
+ result = net_noopenssl_check_certificate(&cert_ptr, days_till_exp_warn, days_till_exp_crit);
+ return result;
+#endif /* USE_OPENSSL */
+ } else {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("Cannot retrieve certificates - cURL returned %d - %s"),
+ res, curl_easy_strerror(res));
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s\n", msg);
+ }
+ }
+ }
+ }
+#endif /* LIBCURL_FEATURE_SSL */
+
+ /* we got the data and we executed the request in a given time, so we can append
+ * performance data to the answer always
+ */
+ handle_curl_option_return_code (curl_easy_getinfo (curl, CURLINFO_TOTAL_TIME, &total_time), "CURLINFO_TOTAL_TIME");
+ page_len = get_content_length(&header_buf, &body_buf);
+ if(show_extended_perfdata) {
+ handle_curl_option_return_code (curl_easy_getinfo(curl, CURLINFO_CONNECT_TIME, &time_connect), "CURLINFO_CONNECT_TIME");
+ handle_curl_option_return_code (curl_easy_getinfo(curl, CURLINFO_APPCONNECT_TIME, &time_appconnect), "CURLINFO_APPCONNECT_TIME");
+ handle_curl_option_return_code (curl_easy_getinfo(curl, CURLINFO_PRETRANSFER_TIME, &time_headers), "CURLINFO_PRETRANSFER_TIME");
+ handle_curl_option_return_code (curl_easy_getinfo(curl, CURLINFO_STARTTRANSFER_TIME, &time_firstbyte), "CURLINFO_STARTTRANSFER_TIME");
+ snprintf(perfstring, DEFAULT_BUFFER_SIZE, "%s %s %s %s %s %s %s",
+ perfd_time(total_time),
+ perfd_size(page_len),
+ perfd_time_connect(time_connect),
+ use_ssl == TRUE ? perfd_time_ssl (time_appconnect-time_connect) : "",
+ perfd_time_headers(time_headers - time_appconnect),
+ perfd_time_firstbyte(time_firstbyte - time_headers),
+ perfd_time_transfer(total_time-time_firstbyte)
+ );
+ } else {
+ snprintf(perfstring, DEFAULT_BUFFER_SIZE, "%s %s",
+ perfd_time(total_time),
+ perfd_size(page_len)
+ );
+ }
+
+ /* return a CRITICAL status if we couldn't read any data */
+ if (strlen(header_buf.buf) == 0 && strlen(body_buf.buf) == 0)
+ die (STATE_CRITICAL, _("HTTP CRITICAL - No header received from host\n"));
+
+ /* get status line of answer, check sanity of HTTP code */
+ if (curlhelp_parse_statusline (header_buf.buf, &status_line) < 0) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, "Unparsable status line in %.3g seconds response time|%s\n",
+ total_time, perfstring);
+ /* we cannot know the major/minor version here for sure as we cannot parse the first line */
+ die (STATE_CRITICAL, "HTTP CRITICAL HTTP/x.x %ld unknown - %s", code, msg);
+ }
+
+ /* get result code from cURL */
+ handle_curl_option_return_code (curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &code), "CURLINFO_RESPONSE_CODE");
+ if (verbose>=2)
+ printf ("* curl CURLINFO_RESPONSE_CODE is %ld\n", code);
+
+ /* print status line, header, body if verbose */
+ if (verbose >= 2) {
+ printf ("**** HEADER ****\n%s\n**** CONTENT ****\n%s\n", header_buf.buf,
+ (no_body ? " [[ skipped ]]" : body_buf.buf));
+ }
+
+ /* make sure the status line matches the response we are looking for */
+ if (!expected_statuscode(status_line.first_line, server_expect)) {
+ if (server_port == HTTP_PORT)
+ snprintf(msg, DEFAULT_BUFFER_SIZE, _("Invalid HTTP response received from host: %s\n"), status_line.first_line);
+ else
+ snprintf(msg, DEFAULT_BUFFER_SIZE, _("Invalid HTTP response received from host on port %d: %s\n"), server_port, status_line.first_line);
+ die (STATE_CRITICAL, "HTTP CRITICAL - %s%s%s", msg,
+ show_body ? "\n" : "",
+ show_body ? body_buf.buf : "");
+ }
+
+ if( server_expect_yn ) {
+ snprintf(msg, DEFAULT_BUFFER_SIZE, _("Status line output matched \"%s\" - "), server_expect);
+ if (verbose)
+ printf ("%s\n",msg);
+ result = STATE_OK;
+ }
+ else {
+ /* illegal return codes result in a critical state */
+ if (code >= 600 || code < 100) {
+ die (STATE_CRITICAL, _("HTTP CRITICAL: Invalid Status (%d, %.40s)\n"), status_line.http_code, status_line.msg);
+ /* server errors result in a critical state */
+ } else if (code >= 500) {
+ result = STATE_CRITICAL;
+ /* client errors result in a warning state */
+ } else if (code >= 400) {
+ result = STATE_WARNING;
+ /* check redirected page if specified */
+ } else if (code >= 300) {
+ if (onredirect == STATE_DEPENDENT) {
+ if( followmethod == FOLLOW_LIBCURL ) {
+ code = status_line.http_code;
+ } else {
+ /* old check_http style redirection, if we come
+ * back here, we are in the same status as with
+ * the libcurl method
+ */
+ redir (&header_buf);
+ }
+ } else {
+ /* this is a specific code in the command line to
+ * be returned when a redirection is encoutered
+ */
+ }
+ result = max_state_alt (onredirect, result);
+ /* all other codes are considered ok */
+ } else {
+ result = STATE_OK;
+ }
+ }
+
+ /* libcurl redirection internally, handle error states here */
+ if( followmethod == FOLLOW_LIBCURL ) {
+ handle_curl_option_return_code (curl_easy_getinfo (curl, CURLINFO_REDIRECT_COUNT, &redir_depth), "CURLINFO_REDIRECT_COUNT");
+ if (verbose >= 2)
+ printf(_("* curl LIBINFO_REDIRECT_COUNT is %d\n"), redir_depth);
+ if (redir_depth > max_depth) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, "maximum redirection depth %d exceeded in libcurl",
+ max_depth);
+ die (STATE_WARNING, "HTTP WARNING - %s", msg);
+ }
+ }
+
+ /* check status codes, set exit status accordingly */
+ if( status_line.http_code != code ) {
+ die (STATE_CRITICAL, _("HTTP CRITICAL %s %d %s - different HTTP codes (cUrl has %ld)\n"),
+ string_statuscode (status_line.http_major, status_line.http_minor),
+ status_line.http_code, status_line.msg, code);
+ }
+
+ if (maximum_age >= 0) {
+ result = max_state_alt(check_document_dates(&header_buf, &msg), result);
+ }
+
+ /* Page and Header content checks go here */
+
+ if (strlen (header_expect)) {
+ if (!strstr (header_buf.buf, header_expect)) {
+ strncpy(&output_header_search[0],header_expect,sizeof(output_header_search));
+ if(output_header_search[sizeof(output_header_search)-1]!='\0') {
+ bcopy("...",&output_header_search[sizeof(output_header_search)-4],4);
+ }
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%sheader '%s' not found on '%s://%s:%d%s', "), msg, output_header_search, use_ssl ? "https" : "http", host_name ? host_name : server_address, server_port, server_url);
+ result = STATE_CRITICAL;
+ }
+ }
+
+ if (strlen (string_expect)) {
+ if (!strstr (body_buf.buf, string_expect)) {
+ strncpy(&output_string_search[0],string_expect,sizeof(output_string_search));
+ if(output_string_search[sizeof(output_string_search)-1]!='\0') {
+ bcopy("...",&output_string_search[sizeof(output_string_search)-4],4);
+ }
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%sstring '%s' not found on '%s://%s:%d%s', "), msg, output_string_search, use_ssl ? "https" : "http", host_name ? host_name : server_address, server_port, server_url);
+ result = STATE_CRITICAL;
+ }
+ }
+
+ if (strlen (regexp)) {
+ errcode = regexec (&preg, body_buf.buf, REGS, pmatch, 0);
+ if ((errcode == 0 && invert_regex == 0) || (errcode == REG_NOMATCH && invert_regex == 1)) {
+ /* OK - No-op to avoid changing the logic around it */
+ result = max_state_alt(STATE_OK, result);
+ }
+ else if ((errcode == REG_NOMATCH && invert_regex == 0) || (errcode == 0 && invert_regex == 1)) {
+ if (invert_regex == 0)
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%spattern not found, "), msg);
+ else
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%spattern found, "), msg);
+ result = STATE_CRITICAL;
+ }
+ else {
+ regerror (errcode, &preg, errbuf, MAX_INPUT_BUFFER);
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%sExecute Error: %s, "), msg, errbuf);
+ result = STATE_UNKNOWN;
+ }
+ }
+
+ /* make sure the page is of an appropriate size */
+ if ((max_page_len > 0) && (page_len > max_page_len)) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%spage size %d too large, "), msg, page_len);
+ result = max_state_alt(STATE_WARNING, result);
+ } else if ((min_page_len > 0) && (page_len < min_page_len)) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("%spage size %d too small, "), msg, page_len);
+ result = max_state_alt(STATE_WARNING, result);
+ }
+
+ /* -w, -c: check warning and critical level */
+ result = max_state_alt(get_status(total_time, thlds), result);
+
+ /* Cut-off trailing characters */
+ if(msg[strlen(msg)-2] == ',')
+ msg[strlen(msg)-2] = '\0';
+ else
+ msg[strlen(msg)-3] = '\0';
+
+ /* TODO: separate _() msg and status code: die (result, "HTTP %s: %s\n", state_text(result), msg); */
+ die (result, "HTTP %s: %s %d %s%s%s - %d bytes in %.3f second response time %s|%s\n%s%s",
+ state_text(result), string_statuscode (status_line.http_major, status_line.http_minor),
+ status_line.http_code, status_line.msg,
+ strlen(msg) > 0 ? " - " : "",
+ msg, page_len, total_time,
+ (display_html ? "</A>" : ""),
+ perfstring,
+ (show_body ? body_buf.buf : ""),
+ (show_body ? "\n" : "") );
+
+ /* proper cleanup after die? */
+ curlhelp_free_statusline(&status_line);
+ curl_easy_cleanup (curl);
+ curl_global_cleanup ();
+ curlhelp_freewritebuffer (&body_buf);
+ curlhelp_freewritebuffer (&header_buf);
+ if (!strcmp (http_method, "PUT")) {
+ curlhelp_freereadbuffer (&put_buf);
+ }
+
+ return result;
+}
+
+int
+uri_strcmp (const UriTextRangeA range, const char* s)
+{
+ if (!range.first) return -1;
+ if (range.afterLast - range.first < strlen (s)) return -1;
+ return strncmp (s, range.first, min( range.afterLast - range.first, strlen (s)));
+}
+
+char*
+uri_string (const UriTextRangeA range, char* buf, size_t buflen)
+{
+ if (!range.first) return "(null)";
+ strncpy (buf, range.first, max (buflen-1, range.afterLast - range.first));
+ buf[max (buflen-1, range.afterLast - range.first)] = '\0';
+ buf[range.afterLast - range.first] = '\0';
+ return buf;
+}
+
+void
+redir (curlhelp_write_curlbuf* header_buf)
+{
+ char *location = NULL;
+ curlhelp_statusline status_line;
+ struct phr_header headers[255];
+ size_t nof_headers = 255;
+ size_t msglen;
+ char buf[DEFAULT_BUFFER_SIZE];
+ char ipstr[INET_ADDR_MAX_SIZE];
+ int new_port;
+ char *new_host;
+ char *new_url;
+
+ int res = phr_parse_response (header_buf->buf, header_buf->buflen,
+ &status_line.http_minor, &status_line.http_code, &status_line.msg, &msglen,
+ headers, &nof_headers, 0);
+
+ location = get_header_value (headers, nof_headers, "location");
+
+ if (verbose >= 2)
+ printf(_("* Seen redirect location %s\n"), location);
+
+ if (++redir_depth > max_depth)
+ die (STATE_WARNING,
+ _("HTTP WARNING - maximum redirection depth %d exceeded - %s%s\n"),
+ max_depth, location, (display_html ? "</A>" : ""));
+
+ UriParserStateA state;
+ UriUriA uri;
+ state.uri = &uri;
+ if (uriParseUriA (&state, location) != URI_SUCCESS) {
+ if (state.errorCode == URI_ERROR_SYNTAX) {
+ die (STATE_UNKNOWN,
+ _("HTTP UNKNOWN - Could not parse redirect location '%s'%s\n"),
+ location, (display_html ? "</A>" : ""));
+ } else if (state.errorCode == URI_ERROR_MALLOC) {
+ die (STATE_UNKNOWN, _("HTTP UNKNOWN - Could not allocate URL\n"));
+ }
+ }
+
+ if (verbose >= 2) {
+ printf (_("** scheme: %s\n"),
+ uri_string (uri.scheme, buf, DEFAULT_BUFFER_SIZE));
+ printf (_("** host: %s\n"),
+ uri_string (uri.hostText, buf, DEFAULT_BUFFER_SIZE));
+ printf (_("** port: %s\n"),
+ uri_string (uri.portText, buf, DEFAULT_BUFFER_SIZE));
+ if (uri.hostData.ip4) {
+ inet_ntop (AF_INET, uri.hostData.ip4->data, ipstr, sizeof (ipstr));
+ printf (_("** IPv4: %s\n"), ipstr);
+ }
+ if (uri.hostData.ip6) {
+ inet_ntop (AF_INET, uri.hostData.ip6->data, ipstr, sizeof (ipstr));
+ printf (_("** IPv6: %s\n"), ipstr);
+ }
+ if (uri.pathHead) {
+ printf (_("** path: "));
+ const UriPathSegmentA* p = uri.pathHead;
+ for (; p; p = p->next) {
+ printf ("/%s", uri_string (p->text, buf, DEFAULT_BUFFER_SIZE));
+ }
+ puts ("");
+ }
+ if (uri.query.first) {
+ printf (_("** query: %s\n"),
+ uri_string (uri.query, buf, DEFAULT_BUFFER_SIZE));
+ }
+ if (uri.fragment.first) {
+ printf (_("** fragment: %s\n"),
+ uri_string (uri.fragment, buf, DEFAULT_BUFFER_SIZE));
+ }
+ }
+
+ use_ssl = !uri_strcmp (uri.scheme, "https");
+
+ /* we do a sloppy test here only, because uriparser would have failed
+ * above, if the port would be invalid, we just check for MAX_PORT
+ */
+ if (uri.portText.first) {
+ new_port = atoi (uri_string (uri.portText, buf, DEFAULT_BUFFER_SIZE));
+ } else {
+ new_port = HTTP_PORT;
+ if (use_ssl)
+ new_port = HTTPS_PORT;
+ }
+ if (new_port > MAX_PORT)
+ die (STATE_UNKNOWN,
+ _("HTTP UNKNOWN - Redirection to port above %d - %s%s\n"),
+ MAX_PORT, location, display_html ? "</A>" : "");
+
+ /* by RFC 7231 relative URLs in Location should be taken relative to
+ * the original URL, so wy try to form a new absolute URL here
+ */
+ if (!uri.scheme.first && !uri.hostText.first) {
+ new_host = strdup (host_name ? host_name : server_address);
+ } else {
+ new_host = strdup (uri_string (uri.hostText, buf, DEFAULT_BUFFER_SIZE));
+ }
+
+ /* compose new path */
+ /* TODO: handle fragments and query part of URL */
+ new_url = (char *)calloc( 1, DEFAULT_BUFFER_SIZE);
+ if (uri.pathHead) {
+ const UriPathSegmentA* p = uri.pathHead;
+ for (; p; p = p->next) {
+ strncat (new_url, "/", DEFAULT_BUFFER_SIZE);
+ strncat (new_url, uri_string (p->text, buf, DEFAULT_BUFFER_SIZE), DEFAULT_BUFFER_SIZE-1);
+ }
+ }
+
+ if (server_port==new_port &&
+ !strncmp(server_address, new_host, MAX_IPV4_HOSTLENGTH) &&
+ (host_name && !strncmp(host_name, new_host, MAX_IPV4_HOSTLENGTH)) &&
+ !strcmp(server_url, new_url))
+ die (STATE_CRITICAL,
+ _("HTTP CRITICAL - redirection creates an infinite loop - %s://%s:%d%s%s\n"),
+ use_ssl ? "https" : "http", new_host, new_port, new_url, (display_html ? "</A>" : ""));
+
+ /* set new values for redirected request */
+
+ if (!(followsticky & STICKY_HOST)) {
+ free (server_address);
+ server_address = strndup (new_host, MAX_IPV4_HOSTLENGTH);
+ }
+ if (!(followsticky & STICKY_PORT)) {
+ server_port = (unsigned short)new_port;
+ }
+
+ free (host_name);
+ host_name = strndup (new_host, MAX_IPV4_HOSTLENGTH);
+
+ /* reset virtual port */
+ virtual_port = server_port;
+
+ free(new_host);
+ free (server_url);
+ server_url = new_url;
+
+ uriFreeUriMembersA (&uri);
+
+ if (verbose)
+ printf (_("Redirection to %s://%s:%d%s\n"), use_ssl ? "https" : "http",
+ host_name ? host_name : server_address, server_port, server_url);
+
+ /* TODO: the hash component MUST be taken from the original URL and
+ * attached to the URL in Location
+ */
+
+ check_http ();
+}
+
+/* check whether a file exists */
+void
+test_file (char *path)
+{
+ if (access(path, R_OK) == 0)
+ return;
+ usage2 (_("file does not exist or is not readable"), path);
+}
+
+int
+process_arguments (int argc, char **argv)
+{
+ char *p;
+ int c = 1;
+ char *temp;
+
+ enum {
+ INVERT_REGEX = CHAR_MAX + 1,
+ SNI_OPTION,
+ CA_CERT_OPTION,
+ HTTP_VERSION_OPTION,
+ AUTOMATIC_DECOMPRESSION
+ };
+
+ int option = 0;
+ int got_plus = 0;
+ static struct option longopts[] = {
+ STD_LONG_OPTS,
+ {"link", no_argument, 0, 'L'},
+ {"nohtml", no_argument, 0, 'n'},
+ {"ssl", optional_argument, 0, 'S'},
+ {"sni", no_argument, 0, SNI_OPTION},
+ {"post", required_argument, 0, 'P'},
+ {"method", required_argument, 0, 'j'},
+ {"IP-address", required_argument, 0, 'I'},
+ {"url", required_argument, 0, 'u'},
+ {"port", required_argument, 0, 'p'},
+ {"authorization", required_argument, 0, 'a'},
+ {"proxy-authorization", required_argument, 0, 'b'},
+ {"header-string", required_argument, 0, 'd'},
+ {"string", required_argument, 0, 's'},
+ {"expect", required_argument, 0, 'e'},
+ {"regex", required_argument, 0, 'r'},
+ {"ereg", required_argument, 0, 'r'},
+ {"eregi", required_argument, 0, 'R'},
+ {"linespan", no_argument, 0, 'l'},
+ {"onredirect", required_argument, 0, 'f'},
+ {"certificate", required_argument, 0, 'C'},
+ {"client-cert", required_argument, 0, 'J'},
+ {"private-key", required_argument, 0, 'K'},
+ {"ca-cert", required_argument, 0, CA_CERT_OPTION},
+ {"verify-cert", no_argument, 0, 'D'},
+ {"useragent", required_argument, 0, 'A'},
+ {"header", required_argument, 0, 'k'},
+ {"no-body", no_argument, 0, 'N'},
+ {"max-age", required_argument, 0, 'M'},
+ {"content-type", required_argument, 0, 'T'},
+ {"pagesize", required_argument, 0, 'm'},
+ {"invert-regex", no_argument, NULL, INVERT_REGEX},
+ {"use-ipv4", no_argument, 0, '4'},
+ {"use-ipv6", no_argument, 0, '6'},
+ {"extended-perfdata", no_argument, 0, 'E'},
+ {"show-body", no_argument, 0, 'B'},
+ {"http-version", required_argument, 0, HTTP_VERSION_OPTION},
+ {"enable-automatic-decompression", no_argument, 0, AUTOMATIC_DECOMPRESSION},
+ {0, 0, 0, 0}
+ };
+
+ if (argc < 2)
+ return ERROR;
+
+ /* support check_http compatible arguments */
+ for (c = 1; c < argc; c++) {
+ if (strcmp ("-to", argv[c]) == 0)
+ strcpy (argv[c], "-t");
+ if (strcmp ("-hn", argv[c]) == 0)
+ strcpy (argv[c], "-H");
+ if (strcmp ("-wt", argv[c]) == 0)
+ strcpy (argv[c], "-w");
+ if (strcmp ("-ct", argv[c]) == 0)
+ strcpy (argv[c], "-c");
+ if (strcmp ("-nohtml", argv[c]) == 0)
+ strcpy (argv[c], "-n");
+ }
+
+ server_url = strdup(DEFAULT_SERVER_URL);
+
+ while (1) {
+ c = getopt_long (argc, argv, "Vvh46t:c:w:A:k:H:P:j:T:I:a:b:d:e:p:s:R:r:u:f:C:J:K:DnlLS::m:M:NEB", longopts, &option);
+ if (c == -1 || c == EOF || c == 1)
+ break;
+
+ switch (c) {
+ case 'h':
+ print_help();
+ exit(STATE_UNKNOWN);
+ break;
+ case 'V':
+ print_revision(progname, NP_VERSION);
+ print_curl_version();
+ exit(STATE_UNKNOWN);
+ break;
+ case 'v':
+ verbose++;
+ break;
+ case 't': /* timeout period */
+ if (!is_intnonneg (optarg))
+ usage2 (_("Timeout interval must be a positive integer"), optarg);
+ else
+ socket_timeout = (int)strtol (optarg, NULL, 10);
+ break;
+ case 'c': /* critical time threshold */
+ critical_thresholds = optarg;
+ break;
+ case 'w': /* warning time threshold */
+ warning_thresholds = optarg;
+ break;
+ case 'H': /* virtual host */
+ host_name = strdup (optarg);
+ if (host_name[0] == '[') {
+ if ((p = strstr (host_name, "]:")) != NULL) { /* [IPv6]:port */
+ virtual_port = atoi (p + 2);
+ /* cut off the port */
+ host_name_length = strlen (host_name) - strlen (p) - 1;
+ free (host_name);
+ host_name = strndup (optarg, host_name_length);
+ }
+ } else if ((p = strchr (host_name, ':')) != NULL
+ && strchr (++p, ':') == NULL) { /* IPv4:port or host:port */
+ virtual_port = atoi (p);
+ /* cut off the port */
+ host_name_length = strlen (host_name) - strlen (p) - 1;
+ free (host_name);
+ host_name = strndup (optarg, host_name_length);
+ }
+ break;
+ case 'I': /* internet address */
+ server_address = strdup (optarg);
+ break;
+ case 'u': /* URL path */
+ server_url = strdup (optarg);
+ break;
+ case 'p': /* Server port */
+ if (!is_intnonneg (optarg))
+ usage2 (_("Invalid port number, expecting a non-negative number"), optarg);
+ else {
+ if( strtol(optarg, NULL, 10) > MAX_PORT)
+ usage2 (_("Invalid port number, supplied port number is too big"), optarg);
+ server_port = (unsigned short)strtol(optarg, NULL, 10);
+ specify_port = TRUE;
+ }
+ break;
+ case 'a': /* authorization info */
+ strncpy (user_auth, optarg, MAX_INPUT_BUFFER - 1);
+ user_auth[MAX_INPUT_BUFFER - 1] = 0;
+ break;
+ case 'b': /* proxy-authorization info */
+ strncpy (proxy_auth, optarg, MAX_INPUT_BUFFER - 1);
+ proxy_auth[MAX_INPUT_BUFFER - 1] = 0;
+ break;
+ case 'P': /* HTTP POST data in URL encoded format; ignored if settings already */
+ if (! http_post_data)
+ http_post_data = strdup (optarg);
+ if (! http_method)
+ http_method = strdup("POST");
+ break;
+ case 'j': /* Set HTTP method */
+ if (http_method)
+ free(http_method);
+ http_method = strdup (optarg);
+ break;
+ case 'A': /* useragent */
+ strncpy (user_agent, optarg, DEFAULT_BUFFER_SIZE);
+ user_agent[DEFAULT_BUFFER_SIZE-1] = '\0';
+ break;
+ case 'k': /* Additional headers */
+ if (http_opt_headers_count == 0)
+ http_opt_headers = malloc (sizeof (char *) * (++http_opt_headers_count));
+ else
+ http_opt_headers = realloc (http_opt_headers, sizeof (char *) * (++http_opt_headers_count));
+ http_opt_headers[http_opt_headers_count - 1] = optarg;
+ break;
+ case 'L': /* show html link */
+ display_html = TRUE;
+ break;
+ case 'n': /* do not show html link */
+ display_html = FALSE;
+ break;
+ case 'C': /* Check SSL cert validity */
+#ifdef LIBCURL_FEATURE_SSL
+ if ((temp=strchr(optarg,','))!=NULL) {
+ *temp='\0';
+ if (!is_intnonneg (optarg))
+ usage2 (_("Invalid certificate expiration period"), optarg);
+ days_till_exp_warn = atoi(optarg);
+ *temp=',';
+ temp++;
+ if (!is_intnonneg (temp))
+ usage2 (_("Invalid certificate expiration period"), temp);
+ days_till_exp_crit = atoi (temp);
+ }
+ else {
+ days_till_exp_crit=0;
+ if (!is_intnonneg (optarg))
+ usage2 (_("Invalid certificate expiration period"), optarg);
+ days_till_exp_warn = atoi (optarg);
+ }
+ check_cert = TRUE;
+ goto enable_ssl;
+#endif
+ case 'J': /* use client certificate */
+#ifdef LIBCURL_FEATURE_SSL
+ test_file(optarg);
+ client_cert = optarg;
+ goto enable_ssl;
+#endif
+ case 'K': /* use client private key */
+#ifdef LIBCURL_FEATURE_SSL
+ test_file(optarg);
+ client_privkey = optarg;
+ goto enable_ssl;
+#endif
+#ifdef LIBCURL_FEATURE_SSL
+ case CA_CERT_OPTION: /* use CA chain file */
+ test_file(optarg);
+ ca_cert = optarg;
+ goto enable_ssl;
+#endif
+#ifdef LIBCURL_FEATURE_SSL
+ case 'D': /* verify peer certificate & host */
+ verify_peer_and_host = TRUE;
+ break;
+#endif
+ case 'S': /* use SSL */
+#ifdef LIBCURL_FEATURE_SSL
+ enable_ssl:
+ use_ssl = TRUE;
+ /* ssl_version initialized to CURL_SSLVERSION_DEFAULT as a default.
+ * Only set if it's non-zero. This helps when we include multiple
+ * parameters, like -S and -C combinations */
+ ssl_version = CURL_SSLVERSION_DEFAULT;
+ if (c=='S' && optarg != NULL) {
+ char *plus_ptr = strchr(optarg, '+');
+ if (plus_ptr) {
+ got_plus = 1;
+ *plus_ptr = '\0';
+ }
+
+ if (optarg[0] == '2')
+ ssl_version = CURL_SSLVERSION_SSLv2;
+ else if (optarg[0] == '3')
+ ssl_version = CURL_SSLVERSION_SSLv3;
+ else if (!strcmp (optarg, "1") || !strcmp (optarg, "1.0"))
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0)
+ ssl_version = CURL_SSLVERSION_TLSv1_0;
+#else
+ ssl_version = CURL_SSLVERSION_DEFAULT;
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0) */
+ else if (!strcmp (optarg, "1.1"))
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0)
+ ssl_version = CURL_SSLVERSION_TLSv1_1;
+#else
+ ssl_version = CURL_SSLVERSION_DEFAULT;
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0) */
+ else if (!strcmp (optarg, "1.2"))
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0)
+ ssl_version = CURL_SSLVERSION_TLSv1_2;
+#else
+ ssl_version = CURL_SSLVERSION_DEFAULT;
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 34, 0) */
+ else if (!strcmp (optarg, "1.3"))
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 52, 0)
+ ssl_version = CURL_SSLVERSION_TLSv1_3;
+#else
+ ssl_version = CURL_SSLVERSION_DEFAULT;
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 52, 0) */
+ else
+ usage4 (_("Invalid option - Valid SSL/TLS versions: 2, 3, 1, 1.1, 1.2, 1.3 (with optional '+' suffix)"));
+ }
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 54, 0)
+ if (got_plus) {
+ switch (ssl_version) {
+ case CURL_SSLVERSION_TLSv1_3:
+ ssl_version |= CURL_SSLVERSION_MAX_TLSv1_3;
+ break;
+ case CURL_SSLVERSION_TLSv1_2:
+ case CURL_SSLVERSION_TLSv1_1:
+ case CURL_SSLVERSION_TLSv1_0:
+ ssl_version |= CURL_SSLVERSION_MAX_DEFAULT;
+ break;
+ }
+ } else {
+ switch (ssl_version) {
+ case CURL_SSLVERSION_TLSv1_3:
+ ssl_version |= CURL_SSLVERSION_MAX_TLSv1_3;
+ break;
+ case CURL_SSLVERSION_TLSv1_2:
+ ssl_version |= CURL_SSLVERSION_MAX_TLSv1_2;
+ break;
+ case CURL_SSLVERSION_TLSv1_1:
+ ssl_version |= CURL_SSLVERSION_MAX_TLSv1_1;
+ break;
+ case CURL_SSLVERSION_TLSv1_0:
+ ssl_version |= CURL_SSLVERSION_MAX_TLSv1_0;
+ break;
+ }
+ }
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 54, 0) */
+ if (verbose >= 2)
+ printf(_("* Set SSL/TLS version to %d\n"), ssl_version);
+ if (specify_port == FALSE)
+ server_port = HTTPS_PORT;
+ break;
+#else /* LIBCURL_FEATURE_SSL */
+ /* -C -J and -K fall through to here without SSL */
+ usage4 (_("Invalid option - SSL is not available"));
+ break;
+ case SNI_OPTION: /* --sni is parsed, but ignored, the default is TRUE with libcurl */
+ use_sni = TRUE;
+ break;
+#endif /* LIBCURL_FEATURE_SSL */
+ case 'f': /* onredirect */
+ if (!strcmp (optarg, "ok"))
+ onredirect = STATE_OK;
+ else if (!strcmp (optarg, "warning"))
+ onredirect = STATE_WARNING;
+ else if (!strcmp (optarg, "critical"))
+ onredirect = STATE_CRITICAL;
+ else if (!strcmp (optarg, "unknown"))
+ onredirect = STATE_UNKNOWN;
+ else if (!strcmp (optarg, "follow"))
+ onredirect = STATE_DEPENDENT;
+ else if (!strcmp (optarg, "stickyport"))
+ onredirect = STATE_DEPENDENT, followmethod = FOLLOW_HTTP_CURL, followsticky = STICKY_HOST|STICKY_PORT;
+ else if (!strcmp (optarg, "sticky"))
+ onredirect = STATE_DEPENDENT, followmethod = FOLLOW_HTTP_CURL, followsticky = STICKY_HOST;
+ else if (!strcmp (optarg, "follow"))
+ onredirect = STATE_DEPENDENT, followmethod = FOLLOW_HTTP_CURL, followsticky = STICKY_NONE;
+ else if (!strcmp (optarg, "curl"))
+ onredirect = STATE_DEPENDENT, followmethod = FOLLOW_LIBCURL;
+ else usage2 (_("Invalid onredirect option"), optarg);
+ if (verbose >= 2)
+ printf(_("* Following redirects set to %s\n"), state_text(onredirect));
+ break;
+ case 'd': /* string or substring */
+ strncpy (header_expect, optarg, MAX_INPUT_BUFFER - 1);
+ header_expect[MAX_INPUT_BUFFER - 1] = 0;
+ break;
+ case 's': /* string or substring */
+ strncpy (string_expect, optarg, MAX_INPUT_BUFFER - 1);
+ string_expect[MAX_INPUT_BUFFER - 1] = 0;
+ break;
+ case 'e': /* string or substring */
+ strncpy (server_expect, optarg, MAX_INPUT_BUFFER - 1);
+ server_expect[MAX_INPUT_BUFFER - 1] = 0;
+ server_expect_yn = 1;
+ break;
+ case 'T': /* Content-type */
+ http_content_type = strdup (optarg);
+ break;
+ case 'l': /* linespan */
+ cflags &= ~REG_NEWLINE;
+ break;
+ case 'R': /* regex */
+ cflags |= REG_ICASE;
+ case 'r': /* regex */
+ strncpy (regexp, optarg, MAX_RE_SIZE - 1);
+ regexp[MAX_RE_SIZE - 1] = 0;
+ errcode = regcomp (&preg, regexp, cflags);
+ if (errcode != 0) {
+ (void) regerror (errcode, &preg, errbuf, MAX_INPUT_BUFFER);
+ printf (_("Could Not Compile Regular Expression: %s"), errbuf);
+ return ERROR;
+ }
+ break;
+ case INVERT_REGEX:
+ invert_regex = 1;
+ break;
+ case '4':
+ address_family = AF_INET;
+ break;
+ case '6':
+#if defined (USE_IPV6) && defined (LIBCURL_FEATURE_IPV6)
+ address_family = AF_INET6;
+#else
+ usage4 (_("IPv6 support not available"));
+#endif
+ break;
+ case 'm': /* min_page_length */
+ {
+ char *tmp;
+ if (strchr(optarg, ':') != (char *)NULL) {
+ /* range, so get two values, min:max */
+ tmp = strtok(optarg, ":");
+ if (tmp == NULL) {
+ printf("Bad format: try \"-m min:max\"\n");
+ exit (STATE_WARNING);
+ } else
+ min_page_len = atoi(tmp);
+
+ tmp = strtok(NULL, ":");
+ if (tmp == NULL) {
+ printf("Bad format: try \"-m min:max\"\n");
+ exit (STATE_WARNING);
+ } else
+ max_page_len = atoi(tmp);
+ } else
+ min_page_len = atoi (optarg);
+ break;
+ }
+ case 'N': /* no-body */
+ no_body = TRUE;
+ break;
+ case 'M': /* max-age */
+ {
+ int L = strlen(optarg);
+ if (L && optarg[L-1] == 'm')
+ maximum_age = atoi (optarg) * 60;
+ else if (L && optarg[L-1] == 'h')
+ maximum_age = atoi (optarg) * 60 * 60;
+ else if (L && optarg[L-1] == 'd')
+ maximum_age = atoi (optarg) * 60 * 60 * 24;
+ else if (L && (optarg[L-1] == 's' ||
+ isdigit (optarg[L-1])))
+ maximum_age = atoi (optarg);
+ else {
+ fprintf (stderr, "unparsable max-age: %s\n", optarg);
+ exit (STATE_WARNING);
+ }
+ if (verbose >= 2)
+ printf ("* Maximal age of document set to %d seconds\n", maximum_age);
+ }
+ break;
+ case 'E': /* show extended perfdata */
+ show_extended_perfdata = TRUE;
+ break;
+ case 'B': /* print body content after status line */
+ show_body = TRUE;
+ break;
+ case HTTP_VERSION_OPTION:
+ curl_http_version = CURL_HTTP_VERSION_NONE;
+ if (strcmp (optarg, "1.0") == 0) {
+ curl_http_version = CURL_HTTP_VERSION_1_0;
+ } else if (strcmp (optarg, "1.1") == 0) {
+ curl_http_version = CURL_HTTP_VERSION_1_1;
+ } else if ((strcmp (optarg, "2.0") == 0) || (strcmp (optarg, "2") == 0)) {
+#if LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 33, 0)
+ curl_http_version = CURL_HTTP_VERSION_2_0;
+#else
+ curl_http_version = CURL_HTTP_VERSION_NONE;
+#endif /* LIBCURL_VERSION_NUM >= MAKE_LIBCURL_VERSION(7, 33, 0) */
+ } else {
+ fprintf (stderr, "unkown http-version parameter: %s\n", optarg);
+ exit (STATE_WARNING);
+ }
+ break;
+ case AUTOMATIC_DECOMPRESSION:
+ automatic_decompression = TRUE;
+ break;
+ case '?':
+ /* print short usage statement if args not parsable */
+ usage5 ();
+ break;
+ }
+ }
+
+ c = optind;
+
+ if (server_address == NULL && c < argc)
+ server_address = strdup (argv[c++]);
+
+ if (host_name == NULL && c < argc)
+ host_name = strdup (argv[c++]);
+
+ if (server_address == NULL) {
+ if (host_name == NULL)
+ usage4 (_("You must specify a server address or host name"));
+ else
+ server_address = strdup (host_name);
+ }
+
+ set_thresholds(&thlds, warning_thresholds, critical_thresholds);
+
+ if (critical_thresholds && thlds->critical->end>(double)socket_timeout)
+ socket_timeout = (int)thlds->critical->end + 1;
+ if (verbose >= 2)
+ printf ("* Socket timeout set to %ld seconds\n", socket_timeout);
+
+ if (http_method == NULL)
+ http_method = strdup ("GET");
+
+ if (client_cert && !client_privkey)
+ usage4 (_("If you use a client certificate you must also specify a private key file"));
+
+ if (virtual_port == 0)
+ virtual_port = server_port;
+ else {
+ if ((use_ssl && server_port == HTTPS_PORT) || (!use_ssl && server_port == HTTP_PORT))
+ if(specify_port == FALSE)
+ server_port = virtual_port;
+ }
+
+ return TRUE;
+}
+
+char *perfd_time (double elapsed_time)
+{
+ return fperfdata ("time", elapsed_time, "s",
+ thlds->warning?TRUE:FALSE, thlds->warning?thlds->warning->end:0,
+ thlds->critical?TRUE:FALSE, thlds->critical?thlds->critical->end:0,
+ TRUE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_time_connect (double elapsed_time_connect)
+{
+ return fperfdata ("time_connect", elapsed_time_connect, "s", FALSE, 0, FALSE, 0, FALSE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_time_ssl (double elapsed_time_ssl)
+{
+ return fperfdata ("time_ssl", elapsed_time_ssl, "s", FALSE, 0, FALSE, 0, FALSE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_time_headers (double elapsed_time_headers)
+{
+ return fperfdata ("time_headers", elapsed_time_headers, "s", FALSE, 0, FALSE, 0, FALSE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_time_firstbyte (double elapsed_time_firstbyte)
+{
+ return fperfdata ("time_firstbyte", elapsed_time_firstbyte, "s", FALSE, 0, FALSE, 0, FALSE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_time_transfer (double elapsed_time_transfer)
+{
+ return fperfdata ("time_transfer", elapsed_time_transfer, "s", FALSE, 0, FALSE, 0, FALSE, 0, TRUE, socket_timeout);
+}
+
+char *perfd_size (int page_len)
+{
+ return perfdata ("size", page_len, "B",
+ (min_page_len>0?TRUE:FALSE), min_page_len,
+ (min_page_len>0?TRUE:FALSE), 0,
+ TRUE, 0, FALSE, 0);
+}
+
+void
+print_help (void)
+{
+ print_revision (progname, NP_VERSION);
+
+ printf ("Copyright (c) 1999 Ethan Galstad <nagios@nagios.org>\n");
+ printf (COPYRIGHT, copyright, email);
+
+ printf ("%s\n", _("This plugin tests the HTTP service on the specified host. It can test"));
+ printf ("%s\n", _("normal (http) and secure (https) servers, follow redirects, search for"));
+ printf ("%s\n", _("strings and regular expressions, check connection times, and report on"));
+ printf ("%s\n", _("certificate expiration times."));
+ printf ("\n");
+ printf ("%s\n", _("It makes use of libcurl to do so. It tries to be as compatible to check_http"));
+ printf ("%s\n", _("as possible."));
+
+ printf ("\n\n");
+
+ print_usage ();
+
+ printf (_("NOTE: One or both of -H and -I must be specified"));
+
+ printf ("\n");
+
+ printf (UT_HELP_VRSN);
+ printf (UT_EXTRA_OPTS);
+
+ printf (" %s\n", "-H, --hostname=ADDRESS");
+ printf (" %s\n", _("Host name argument for servers using host headers (virtual host)"));
+ printf (" %s\n", _("Append a port to include it in the header (eg: example.com:5000)"));
+ printf (" %s\n", "-I, --IP-address=ADDRESS");
+ printf (" %s\n", _("IP address or name (use numeric address if possible to bypass DNS lookup)."));
+ printf (" %s\n", "-p, --port=INTEGER");
+ printf (" %s", _("Port number (default: "));
+ printf ("%d)\n", HTTP_PORT);
+
+ printf (UT_IPv46);
+
+#ifdef LIBCURL_FEATURE_SSL
+ printf (" %s\n", "-S, --ssl=VERSION[+]");
+ printf (" %s\n", _("Connect via SSL. Port defaults to 443. VERSION is optional, and prevents"));
+ printf (" %s\n", _("auto-negotiation (2 = SSLv2, 3 = SSLv3, 1 = TLSv1, 1.1 = TLSv1.1,"));
+ printf (" %s\n", _("1.2 = TLSv1.2, 1.3 = TLSv1.3). With a '+' suffix, newer versions are also accepted."));
+ printf (" %s\n", _("Note: SSLv2 and SSLv3 are deprecated and are usually disabled in libcurl"));
+ printf (" %s\n", "--sni");
+ printf (" %s\n", _("Enable SSL/TLS hostname extension support (SNI)"));
+#if LIBCURL_VERSION_NUM >= 0x071801
+ printf (" %s\n", _("Note: --sni is the default in libcurl as SSLv2 and SSLV3 are deprecated and"));
+ printf (" %s\n", _(" SNI only really works since TLSv1.0"));
+#else
+ printf (" %s\n", _("Note: SNI is not supported in libcurl before 7.18.1"));
+#endif
+ printf (" %s\n", "-C, --certificate=INTEGER[,INTEGER]");
+ printf (" %s\n", _("Minimum number of days a certificate has to be valid. Port defaults to 443"));
+ printf (" %s\n", _("(when this option is used the URL is not checked.)"));
+ printf (" %s\n", "-J, --client-cert=FILE");
+ printf (" %s\n", _("Name of file that contains the client certificate (PEM format)"));
+ printf (" %s\n", _("to be used in establishing the SSL session"));
+ printf (" %s\n", "-K, --private-key=FILE");
+ printf (" %s\n", _("Name of file containing the private key (PEM format)"));
+ printf (" %s\n", _("matching the client certificate"));
+ printf (" %s\n", "--ca-cert=FILE");
+ printf (" %s\n", _("CA certificate file to verify peer against"));
+ printf (" %s\n", "-D, --verify-cert");
+ printf (" %s\n", _("Verify the peer's SSL certificate and hostname"));
+#endif
+
+ printf (" %s\n", "-e, --expect=STRING");
+ printf (" %s\n", _("Comma-delimited list of strings, at least one of them is expected in"));
+ printf (" %s", _("the first (status) line of the server response (default: "));
+ printf ("%s)\n", HTTP_EXPECT);
+ printf (" %s\n", _("If specified skips all other status line logic (ex: 3xx, 4xx, 5xx processing)"));
+ printf (" %s\n", "-d, --header-string=STRING");
+ printf (" %s\n", _("String to expect in the response headers"));
+ printf (" %s\n", "-s, --string=STRING");
+ printf (" %s\n", _("String to expect in the content"));
+ printf (" %s\n", "-u, --url=PATH");
+ printf (" %s\n", _("URL to GET or POST (default: /)"));
+ printf (" %s\n", "-P, --post=STRING");
+ printf (" %s\n", _("URL encoded http POST data"));
+ printf (" %s\n", "-j, --method=STRING (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT)");
+ printf (" %s\n", _("Set HTTP method."));
+ printf (" %s\n", "-N, --no-body");
+ printf (" %s\n", _("Don't wait for document body: stop reading after headers."));
+ printf (" %s\n", _("(Note that this still does an HTTP GET or POST, not a HEAD.)"));
+ printf (" %s\n", "-M, --max-age=SECONDS");
+ printf (" %s\n", _("Warn if document is more than SECONDS old. the number can also be of"));
+ printf (" %s\n", _("the form \"10m\" for minutes, \"10h\" for hours, or \"10d\" for days."));
+ printf (" %s\n", "-T, --content-type=STRING");
+ printf (" %s\n", _("specify Content-Type header media type when POSTing\n"));
+ printf (" %s\n", "-l, --linespan");
+ printf (" %s\n", _("Allow regex to span newlines (must precede -r or -R)"));
+ printf (" %s\n", "-r, --regex, --ereg=STRING");
+ printf (" %s\n", _("Search page for regex STRING"));
+ printf (" %s\n", "-R, --eregi=STRING");
+ printf (" %s\n", _("Search page for case-insensitive regex STRING"));
+ printf (" %s\n", "--invert-regex");
+ printf (" %s\n", _("Return CRITICAL if found, OK if not\n"));
+ printf (" %s\n", "-a, --authorization=AUTH_PAIR");
+ printf (" %s\n", _("Username:password on sites with basic authentication"));
+ printf (" %s\n", "-b, --proxy-authorization=AUTH_PAIR");
+ printf (" %s\n", _("Username:password on proxy-servers with basic authentication"));
+ printf (" %s\n", "-A, --useragent=STRING");
+ printf (" %s\n", _("String to be sent in http header as \"User Agent\""));
+ printf (" %s\n", "-k, --header=STRING");
+ printf (" %s\n", _("Any other tags to be sent in http header. Use multiple times for additional headers"));
+ printf (" %s\n", "-E, --extended-perfdata");
+ printf (" %s\n", _("Print additional performance data"));
+ printf (" %s\n", "-B, --show-body");
+ printf (" %s\n", _("Print body content below status line"));
+ printf (" %s\n", "-L, --link");
+ printf (" %s\n", _("Wrap output in HTML link (obsoleted by urlize)"));
+ printf (" %s\n", "-f, --onredirect=<ok|warning|critical|follow|sticky|stickyport|curl>");
+ printf (" %s\n", _("How to handle redirected pages. sticky is like follow but stick to the"));
+ printf (" %s\n", _("specified IP address. stickyport also ensures port stays the same."));
+ printf (" %s\n", _("follow uses the old redirection algorithm of check_http."));
+ printf (" %s\n", _("curl uses CURL_FOLLOWLOCATION built into libcurl."));
+ printf (" %s\n", "-m, --pagesize=INTEGER<:INTEGER>");
+ printf (" %s\n", _("Minimum page size required (bytes) : Maximum page size required (bytes)"));
+ printf ("\n");
+ printf (" %s\n", "--http-version=VERSION");
+ printf (" %s\n", _("Connect via specific HTTP protocol."));
+ printf (" %s\n", _("1.0 = HTTP/1.0, 1.1 = HTTP/1.1, 2.0 = HTTP/2 (HTTP/2 will fail without -S)"));
+ printf (" %s\n", "--enable-automatic-decompression");
+ printf (" %s\n", _("Enable automatic decompression of body (CURLOPT_ACCEPT_ENCODING)."));
+ printf ("\n");
+
+ printf (UT_WARN_CRIT);
+
+ printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT);
+
+ printf (UT_VERBOSE);
+
+ printf ("\n");
+ printf ("%s\n", _("Notes:"));
+ printf (" %s\n", _("This plugin will attempt to open an HTTP connection with the host."));
+ printf (" %s\n", _("Successful connects return STATE_OK, refusals and timeouts return STATE_CRITICAL"));
+ printf (" %s\n", _("other errors return STATE_UNKNOWN. Successful connects, but incorrect response"));
+ printf (" %s\n", _("messages from the host result in STATE_WARNING return values. If you are"));
+ printf (" %s\n", _("checking a virtual server that uses 'host headers' you must supply the FQDN"));
+ printf (" %s\n", _("(fully qualified domain name) as the [host_name] argument."));
+
+#ifdef LIBCURL_FEATURE_SSL
+ printf ("\n");
+ printf (" %s\n", _("This plugin can also check whether an SSL enabled web server is able to"));
+ printf (" %s\n", _("serve content (optionally within a specified time) or whether the X509 "));
+ printf (" %s\n", _("certificate is still valid for the specified number of days."));
+ printf ("\n");
+ printf (" %s\n", _("Please note that this plugin does not check if the presented server"));
+ printf (" %s\n", _("certificate matches the hostname of the server, or if the certificate"));
+ printf (" %s\n", _("has a valid chain of trust to one of the locally installed CAs."));
+ printf ("\n");
+ printf ("%s\n", _("Examples:"));
+ printf (" %s\n\n", "CHECK CONTENT: check_curl -w 5 -c 10 --ssl -H www.verisign.com");
+ printf (" %s\n", _("When the 'www.verisign.com' server returns its content within 5 seconds,"));
+ printf (" %s\n", _("a STATE_OK will be returned. When the server returns its content but exceeds"));
+ printf (" %s\n", _("the 5-second threshold, a STATE_WARNING will be returned. When an error occurs,"));
+ printf (" %s\n", _("a STATE_CRITICAL will be returned."));
+ printf ("\n");
+ printf (" %s\n\n", "CHECK CERTIFICATE: check_curl -H www.verisign.com -C 14");
+ printf (" %s\n", _("When the certificate of 'www.verisign.com' is valid for more than 14 days,"));
+ printf (" %s\n", _("a STATE_OK is returned. When the certificate is still valid, but for less than"));
+ printf (" %s\n", _("14 days, a STATE_WARNING is returned. A STATE_CRITICAL will be returned when"));
+ printf (" %s\n\n", _("the certificate is expired."));
+ printf ("\n");
+ printf (" %s\n\n", "CHECK CERTIFICATE: check_curl -H www.verisign.com -C 30,14");
+ printf (" %s\n", _("When the certificate of 'www.verisign.com' is valid for more than 30 days,"));
+ printf (" %s\n", _("a STATE_OK is returned. When the certificate is still valid, but for less than"));
+ printf (" %s\n", _("30 days, but more than 14 days, a STATE_WARNING is returned."));
+ printf (" %s\n", _("A STATE_CRITICAL will be returned when certificate expires in less than 14 days"));
+#endif
+
+ printf ("\n %s\n", "CHECK WEBSERVER CONTENT VIA PROXY:");
+ printf (" %s\n", _("It is recommended to use an environment proxy like:"));
+ printf (" %s\n", _("http_proxy=http://192.168.100.35:3128 ./check_curl -H www.monitoring-plugins.org"));
+ printf (" %s\n", _("legacy proxy requests in check_http style still work:"));
+ printf (" %s\n", _("check_curl -I 192.168.100.35 -p 3128 -u http://www.monitoring-plugins.org/ -H www.monitoring-plugins.org"));
+
+#ifdef LIBCURL_FEATURE_SSL
+ printf ("\n %s\n", "CHECK SSL WEBSERVER CONTENT VIA PROXY USING HTTP 1.1 CONNECT: ");
+ printf (" %s\n", _("It is recommended to use an environment proxy like:"));
+ printf (" %s\n", _("https_proxy=http://192.168.100.35:3128 ./check_curl -H www.verisign.com -S"));
+ printf (" %s\n", _("legacy proxy requests in check_http style still work:"));
+ printf (" %s\n", _("check_curl -I 192.168.100.35 -p 3128 -u https://www.verisign.com/ -S -j CONNECT -H www.verisign.com "));
+ printf (" %s\n", _("all these options are needed: -I <proxy> -p <proxy-port> -u <check-url> -S(sl) -j CONNECT -H <webserver>"));
+ printf (" %s\n", _("a STATE_OK will be returned. When the server returns its content but exceeds"));
+ printf (" %s\n", _("the 5-second threshold, a STATE_WARNING will be returned. When an error occurs,"));
+ printf (" %s\n", _("a STATE_CRITICAL will be returned."));
+
+#endif
+
+ printf (UT_SUPPORT);
+
+}
+
+
+
+void
+print_usage (void)
+{
+ printf ("%s\n", _("Usage:"));
+ printf (" %s -H <vhost> | -I <IP-address> [-u <uri>] [-p <port>]\n",progname);
+ printf (" [-J <client certificate file>] [-K <private key>] [--ca-cert <CA certificate file>] [-D]\n");
+ printf (" [-w <warn time>] [-c <critical time>] [-t <timeout>] [-L] [-E] [-a auth]\n");
+ printf (" [-b proxy_auth] [-f <ok|warning|critcal|follow|sticky|stickyport|curl>]\n");
+ printf (" [-e <expect>] [-d string] [-s string] [-l] [-r <regex> | -R <case-insensitive regex>]\n");
+ printf (" [-P string] [-m <min_pg_size>:<max_pg_size>] [-4|-6] [-N] [-M <age>]\n");
+ printf (" [-A string] [-k string] [-S <version>] [--sni]\n");
+ printf (" [-T <content-type>] [-j method]\n");
+ printf (" [--http-version=<version>]\n");
+ printf (" %s -H <vhost> | -I <IP-address> -C <warn_age>[,<crit_age>]\n",progname);
+ printf (" [-p <port>] [-t <timeout>] [-4|-6] [--sni]\n");
+ printf ("\n");
+#ifdef LIBCURL_FEATURE_SSL
+ printf ("%s\n", _("In the first form, make an HTTP request."));
+ printf ("%s\n\n", _("In the second form, connect to the server and check the TLS certificate."));
+#endif
+ printf ("%s\n", _("WARNING: check_curl is experimental. Please use"));
+ printf ("%s\n\n", _("check_http if you need a stable version."));
+}
+
+void
+print_curl_version (void)
+{
+ printf( "%s\n", curl_version());
+}
+
+int
+curlhelp_initwritebuffer (curlhelp_write_curlbuf *buf)
+{
+ buf->bufsize = DEFAULT_BUFFER_SIZE;
+ buf->buflen = 0;
+ buf->buf = (char *)malloc ((size_t)buf->bufsize);
+ if (buf->buf == NULL) return -1;
+ return 0;
+}
+
+int
+curlhelp_buffer_write_callback (void *buffer, size_t size, size_t nmemb, void *stream)
+{
+ curlhelp_write_curlbuf *buf = (curlhelp_write_curlbuf *)stream;
+
+ while (buf->bufsize < buf->buflen + size * nmemb + 1) {
+ buf->bufsize *= buf->bufsize * 2;
+ buf->buf = (char *)realloc (buf->buf, buf->bufsize);
+ if (buf->buf == NULL) return -1;
+ }
+
+ memcpy (buf->buf + buf->buflen, buffer, size * nmemb);
+ buf->buflen += size * nmemb;
+ buf->buf[buf->buflen] = '\0';
+
+ return (int)(size * nmemb);
+}
+
+int
+curlhelp_buffer_read_callback (void *buffer, size_t size, size_t nmemb, void *stream)
+{
+ curlhelp_read_curlbuf *buf = (curlhelp_read_curlbuf *)stream;
+
+ size_t n = min (nmemb * size, buf->buflen - buf->pos);
+
+ memcpy (buffer, buf->buf + buf->pos, n);
+ buf->pos += n;
+
+ return (int)n;
+}
+
+void
+curlhelp_freewritebuffer (curlhelp_write_curlbuf *buf)
+{
+ free (buf->buf);
+ buf->buf = NULL;
+}
+
+int
+curlhelp_initreadbuffer (curlhelp_read_curlbuf *buf, const char *data, size_t datalen)
+{
+ buf->buflen = datalen;
+ buf->buf = (char *)malloc ((size_t)buf->buflen);
+ if (buf->buf == NULL) return -1;
+ memcpy (buf->buf, data, datalen);
+ buf->pos = 0;
+ return 0;
+}
+
+void
+curlhelp_freereadbuffer (curlhelp_read_curlbuf *buf)
+{
+ free (buf->buf);
+ buf->buf = NULL;
+}
+
+/* TODO: where to put this, it's actually part of sstrings2 (logically)?
+ */
+const char*
+strrstr2(const char *haystack, const char *needle)
+{
+ int counter;
+ size_t len;
+ const char *prev_pos;
+ const char *pos;
+
+ if (haystack == NULL || needle == NULL)
+ return NULL;
+
+ if (haystack[0] == '\0' || needle[0] == '\0')
+ return NULL;
+
+ counter = 0;
+ prev_pos = NULL;
+ pos = haystack;
+ len = strlen (needle);
+ for (;;) {
+ pos = strstr (pos, needle);
+ if (pos == NULL) {
+ if (counter == 0)
+ return NULL;
+ else
+ return prev_pos;
+ }
+ counter++;
+ prev_pos = pos;
+ pos += len;
+ if (*pos == '\0') return prev_pos;
+ }
+}
+
+int
+curlhelp_parse_statusline (const char *buf, curlhelp_statusline *status_line)
+{
+ char *first_line_end;
+ char *p;
+ size_t first_line_len;
+ char *pp;
+ const char *start;
+ char *first_line_buf;
+
+ /* find last start of a new header */
+ start = strrstr2 (buf, "\r\nHTTP/");
+ if (start != NULL) {
+ start += 2;
+ buf = start;
+ }
+
+ first_line_end = strstr(buf, "\r\n");
+ if (first_line_end == NULL) return -1;
+
+ first_line_len = (size_t)(first_line_end - buf);
+ status_line->first_line = (char *)malloc (first_line_len + 1);
+ if (status_line->first_line == NULL) return -1;
+ memcpy (status_line->first_line, buf, first_line_len);
+ status_line->first_line[first_line_len] = '\0';
+ first_line_buf = strdup( status_line->first_line );
+
+ /* protocol and version: "HTTP/x.x" SP or "HTTP/2" SP */
+
+ p = strtok(first_line_buf, "/");
+ if( p == NULL ) { free( first_line_buf ); return -1; }
+ if( strcmp( p, "HTTP" ) != 0 ) { free( first_line_buf ); return -1; }
+
+ p = strtok( NULL, " " );
+ if( p == NULL ) { free( first_line_buf ); return -1; }
+ if( strchr( p, '.' ) != NULL ) {
+
+ /* HTTP 1.x case */
+ char *ppp;
+ ppp = strtok( p, "." );
+ status_line->http_major = (int)strtol( p, &pp, 10 );
+ if( *pp != '\0' ) { free( first_line_buf ); return -1; }
+ ppp = strtok( NULL, " " );
+ status_line->http_minor = (int)strtol( p, &pp, 10 );
+ if( *pp != '\0' ) { free( first_line_buf ); return -1; }
+ p += 4; /* 1.x SP */
+ } else {
+ /* HTTP 2 case */
+ status_line->http_major = (int)strtol( p, &pp, 10 );
+ status_line->http_minor = 0;
+ p += 2; /* 2 SP */
+ }
+
+ /* status code: "404" or "404.1", then SP */
+
+ p = strtok( p, " " );
+ if( p == NULL ) { free( first_line_buf ); return -1; }
+ if( strchr( p, '.' ) != NULL ) {
+ char *ppp;
+ ppp = strtok( p, "." );
+ status_line->http_code = (int)strtol( ppp, &pp, 10 );
+ if( *pp != '\0' ) { free( first_line_buf ); return -1; }
+ ppp = strtok( NULL, "" );
+ status_line->http_subcode = (int)strtol( ppp, &pp, 10 );
+ if( *pp != '\0' ) { free( first_line_buf ); return -1; }
+ p += 6; /* 400.1 SP */
+ } else {
+ status_line->http_code = (int)strtol( p, &pp, 10 );
+ status_line->http_subcode = -1;
+ if( *pp != '\0' ) { free( first_line_buf ); return -1; }
+ p += 4; /* 400 SP */
+ }
+
+ /* Human readable message: "Not Found" CRLF */
+
+ p = strtok( p, "" );
+ if( p == NULL ) { status_line->msg = ""; return 0; }
+ status_line->msg = status_line->first_line + ( p - first_line_buf );
+ free( first_line_buf );
+
+ return 0;
+}
+
+void
+curlhelp_free_statusline (curlhelp_statusline *status_line)
+{
+ free (status_line->first_line);
+}
+
+void
+remove_newlines (char *s)
+{
+ char *p;
+
+ for (p = s; *p != '\0'; p++)
+ if (*p == '\r' || *p == '\n')
+ *p = ' ';
+}
+
+char *
+get_header_value (const struct phr_header* headers, const size_t nof_headers, const char* header)
+{
+ int i;
+ for( i = 0; i < nof_headers; i++ ) {
+ if(headers[i].name != NULL && strncasecmp( header, headers[i].name, max( headers[i].name_len, 4 ) ) == 0 ) {
+ return strndup( headers[i].value, headers[i].value_len );
+ }
+ }
+ return NULL;
+}
+
+int
+check_document_dates (const curlhelp_write_curlbuf *header_buf, char (*msg)[DEFAULT_BUFFER_SIZE])
+{
+ char *server_date = NULL;
+ char *document_date = NULL;
+ int date_result = STATE_OK;
+ curlhelp_statusline status_line;
+ struct phr_header headers[255];
+ size_t nof_headers = 255;
+ size_t msglen;
+
+ int res = phr_parse_response (header_buf->buf, header_buf->buflen,
+ &status_line.http_minor, &status_line.http_code, &status_line.msg, &msglen,
+ headers, &nof_headers, 0);
+
+ server_date = get_header_value (headers, nof_headers, "date");
+ document_date = get_header_value (headers, nof_headers, "last-modified");
+
+ if (!server_date || !*server_date) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sServer date unknown, "), *msg);
+ date_result = max_state_alt(STATE_UNKNOWN, date_result);
+ } else if (!document_date || !*document_date) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sDocument modification date unknown, "), *msg);
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ } else {
+ time_t srv_data = curl_getdate (server_date, NULL);
+ time_t doc_data = curl_getdate (document_date, NULL);
+ if (verbose >= 2)
+ printf ("* server date: '%s' (%d), doc_date: '%s' (%d)\n", server_date, (int)srv_data, document_date, (int)doc_data);
+ if (srv_data <= 0) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sServer date \"%100s\" unparsable, "), *msg, server_date);
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ } else if (doc_data <= 0) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sDocument date \"%100s\" unparsable, "), *msg, document_date);
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ } else if (doc_data > srv_data + 30) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sDocument is %d seconds in the future, "), *msg, (int)doc_data - (int)srv_data);
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ } else if (doc_data < srv_data - maximum_age) {
+ int n = (srv_data - doc_data);
+ if (n > (60 * 60 * 24 * 2)) {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sLast modified %.1f days ago, "), *msg, ((float) n) / (60 * 60 * 24));
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ } else {
+ snprintf (*msg, DEFAULT_BUFFER_SIZE, _("%sLast modified %d:%02d:%02d ago, "), *msg, n / (60 * 60), (n / 60) % 60, n % 60);
+ date_result = max_state_alt(STATE_CRITICAL, date_result);
+ }
+ }
+ }
+
+ if (server_date) free (server_date);
+ if (document_date) free (document_date);
+
+ return date_result;
+}
+
+
+int
+get_content_length (const curlhelp_write_curlbuf* header_buf, const curlhelp_write_curlbuf* body_buf)
+{
+ const char *s;
+ int content_length = 0;
+ char *copy;
+ struct phr_header headers[255];
+ size_t nof_headers = 255;
+ size_t msglen;
+ char *content_length_s = NULL;
+ curlhelp_statusline status_line;
+
+ int res = phr_parse_response (header_buf->buf, header_buf->buflen,
+ &status_line.http_minor, &status_line.http_code, &status_line.msg, &msglen,
+ headers, &nof_headers, 0);
+
+ content_length_s = get_header_value (headers, nof_headers, "content-length");
+ if (!content_length_s) {
+ return header_buf->buflen + body_buf->buflen;
+ }
+ content_length_s += strspn (content_length_s, " \t");
+ content_length = atoi (content_length_s);
+ if (content_length != body_buf->buflen) {
+ /* TODO: should we warn if the actual and the reported body length don't match? */
+ }
+
+ if (content_length_s) free (content_length_s);
+
+ return header_buf->buflen + body_buf->buflen;
+}
+
+/* TODO: is there a better way in libcurl to check for the SSL library? */
+curlhelp_ssl_library
+curlhelp_get_ssl_library (CURL* curl)
+{
+ curl_version_info_data* version_data;
+ char *ssl_version;
+ char *library;
+ curlhelp_ssl_library ssl_library = CURLHELP_SSL_LIBRARY_UNKNOWN;
+
+ version_data = curl_version_info (CURLVERSION_NOW);
+ if (version_data == NULL) return CURLHELP_SSL_LIBRARY_UNKNOWN;
+
+ ssl_version = strdup (version_data->ssl_version);
+ if (ssl_version == NULL ) return CURLHELP_SSL_LIBRARY_UNKNOWN;
+
+ library = strtok (ssl_version, "/");
+ if (library == NULL) return CURLHELP_SSL_LIBRARY_UNKNOWN;
+
+ if (strcmp (library, "OpenSSL") == 0)
+ ssl_library = CURLHELP_SSL_LIBRARY_OPENSSL;
+ else if (strcmp (library, "LibreSSL") == 0)
+ ssl_library = CURLHELP_SSL_LIBRARY_LIBRESSL;
+ else if (strcmp (library, "GnuTLS") == 0)
+ ssl_library = CURLHELP_SSL_LIBRARY_GNUTLS;
+ else if (strcmp (library, "NSS") == 0)
+ ssl_library = CURLHELP_SSL_LIBRARY_NSS;
+
+ if (verbose >= 2)
+ printf ("* SSL library string is : %s %s (%d)\n", version_data->ssl_version, library, ssl_library);
+
+ free (ssl_version);
+
+ return ssl_library;
+}
+
+const char*
+curlhelp_get_ssl_library_string (curlhelp_ssl_library ssl_library)
+{
+ switch (ssl_library) {
+ case CURLHELP_SSL_LIBRARY_OPENSSL:
+ return "OpenSSL";
+ case CURLHELP_SSL_LIBRARY_LIBRESSL:
+ return "LibreSSL";
+ case CURLHELP_SSL_LIBRARY_GNUTLS:
+ return "GnuTLS";
+ case CURLHELP_SSL_LIBRARY_NSS:
+ return "NSS";
+ case CURLHELP_SSL_LIBRARY_UNKNOWN:
+ default:
+ return "unknown";
+ }
+}
+
+#ifdef LIBCURL_FEATURE_SSL
+#ifndef USE_OPENSSL
+time_t
+parse_cert_date (const char *s)
+{
+ struct tm tm;
+ time_t date;
+ char *res;
+
+ if (!s) return -1;
+
+ /* Jan 17 14:25:12 2020 GMT */
+ res = strptime (s, "%Y-%m-%d %H:%M:%S GMT", &tm);
+ /* Sep 11 12:00:00 2020 GMT */
+ if (res == NULL) strptime (s, "%Y %m %d %H:%M:%S GMT", &tm);
+ date = mktime (&tm);
+
+ return date;
+}
+
+/* TODO: this needs cleanup in the sslutils.c, maybe we the #else case to
+ * OpenSSL could be this function
+ */
+int
+net_noopenssl_check_certificate (cert_ptr_union* cert_ptr, int days_till_exp_warn, int days_till_exp_crit)
+{
+ int i;
+ struct curl_slist* slist;
+ int cname_found = 0;
+ char* start_date_str = NULL;
+ char* end_date_str = NULL;
+ time_t start_date;
+ time_t end_date;
+ char *tz;
+ float time_left;
+ int days_left;
+ int time_remaining;
+ char timestamp[50] = "";
+ int status = STATE_UNKNOWN;
+
+ if (verbose >= 2)
+ printf ("**** REQUEST CERTIFICATES ****\n");
+
+ for (i = 0; i < cert_ptr->to_certinfo->num_of_certs; i++) {
+ for (slist = cert_ptr->to_certinfo->certinfo[i]; slist; slist = slist->next) {
+ /* find first common name in subject,
+ * TODO: check alternative subjects for
+ * TODO: have a decent parser here and not a hack
+ * multi-host certificate, check wildcards
+ */
+ if (strncasecmp (slist->data, "Subject:", 8) == 0) {
+ int d = 3;
+ char* p = strstr (slist->data, "CN=");
+ if (p == NULL) {
+ d = 5;
+ p = strstr (slist->data, "CN = ");
+ }
+ if (p != NULL) {
+ if (strncmp (host_name, p+d, strlen (host_name)) == 0) {
+ cname_found = 1;
+ }
+ }
+ } else if (strncasecmp (slist->data, "Start Date:", 11) == 0) {
+ start_date_str = &slist->data[11];
+ } else if (strncasecmp (slist->data, "Expire Date:", 12) == 0) {
+ end_date_str = &slist->data[12];
+ } else if (strncasecmp (slist->data, "Cert:", 5) == 0) {
+ goto HAVE_FIRST_CERT;
+ }
+ if (verbose >= 2)
+ printf ("%d ** %s\n", i, slist->data);
+ }
+ }
+HAVE_FIRST_CERT:
+
+ if (verbose >= 2)
+ printf ("**** REQUEST CERTIFICATES ****\n");
+
+ if (!cname_found) {
+ printf("%s\n",_("CRITICAL - Cannot retrieve certificate subject."));
+ return STATE_CRITICAL;
+ }
+
+ start_date = parse_cert_date (start_date_str);
+ if (start_date <= 0) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("WARNING - Unparsable 'Start Date' in certificate: '%s'"),
+ start_date_str);
+ puts (msg);
+ return STATE_WARNING;
+ }
+
+ end_date = parse_cert_date (end_date_str);
+ if (end_date <= 0) {
+ snprintf (msg, DEFAULT_BUFFER_SIZE, _("WARNING - Unparsable 'Expire Date' in certificate: '%s'"),
+ start_date_str);
+ puts (msg);
+ return STATE_WARNING;
+ }
+
+ time_left = difftime (end_date, time(NULL));
+ days_left = time_left / 86400;
+ tz = getenv("TZ");
+ setenv("TZ", "GMT", 1);
+ tzset();
+ strftime(timestamp, 50, "%c %z", localtime(&end_date));
+ if (tz)
+ setenv("TZ", tz, 1);
+ else
+ unsetenv("TZ");
+ tzset();
+
+ if (days_left > 0 && days_left <= days_till_exp_warn) {
+ printf (_("%s - Certificate '%s' expires in %d day(s) (%s).\n"), (days_left>days_till_exp_crit)?"WARNING":"CRITICAL", host_name, days_left, timestamp);
+ if (days_left > days_till_exp_crit)
+ status = STATE_WARNING;
+ else
+ status = STATE_CRITICAL;
+ } else if (days_left == 0 && time_left > 0) {
+ if (time_left >= 3600)
+ time_remaining = (int) time_left / 3600;
+ else
+ time_remaining = (int) time_left / 60;
+
+ printf (_("%s - Certificate '%s' expires in %u %s (%s)\n"),
+ (days_left>days_till_exp_crit) ? "WARNING" : "CRITICAL", host_name, time_remaining,
+ time_left >= 3600 ? "hours" : "minutes", timestamp);
+
+ if ( days_left > days_till_exp_crit)
+ status = STATE_WARNING;
+ else
+ status = STATE_CRITICAL;
+ } else if (time_left < 0) {
+ printf(_("CRITICAL - Certificate '%s' expired on %s.\n"), host_name, timestamp);
+ status=STATE_CRITICAL;
+ } else if (days_left == 0) {
+ printf (_("%s - Certificate '%s' just expired (%s).\n"), (days_left>days_till_exp_crit)?"WARNING":"CRITICAL", host_name, timestamp);
+ if (days_left > days_till_exp_crit)
+ status = STATE_WARNING;
+ else
+ status = STATE_CRITICAL;
+ } else {
+ printf(_("OK - Certificate '%s' will expire on %s.\n"), host_name, timestamp);
+ status = STATE_OK;
+ }
+ return status;
+}
+#endif /* USE_OPENSSL */
+#endif /* LIBCURL_FEATURE_SSL */
diff --git a/plugins/check_dbi.c b/plugins/check_dbi.c
index 826eb8d9..ced13d05 100644
--- a/plugins/check_dbi.c
+++ b/plugins/check_dbi.c
@@ -35,6 +35,7 @@ const char *email = "devel@monitoring-plugins.org";
#include "common.h"
#include "utils.h"
+#include "utils_cmd.h"
#include "netutils.h"
diff --git a/plugins/check_dig.c b/plugins/check_dig.c
index da4f0ded..5d85ae26 100644
--- a/plugins/check_dig.c
+++ b/plugins/check_dig.c
@@ -331,7 +331,7 @@ print_help (void)
printf ("Copyright (c) 2000 Karl DeBisschop <kdebisschop@users.sourceforge.net>\n");
printf (COPYRIGHT, copyright, email);
- printf (_("This plugin test the DNS service on the specified host using dig"));
+ printf (_("This plugin tests the DNS service on the specified host using dig"));
printf ("\n\n");
diff --git a/plugins/check_disk.c b/plugins/check_disk.c
index e73a0083..54befcad 100644
--- a/plugins/check_disk.c
+++ b/plugins/check_disk.c
@@ -1,29 +1,29 @@
/*****************************************************************************
-*
+*
* Monitoring check_disk plugin
-*
+*
* License: GPL
* Copyright (c) 1999-2008 Monitoring Plugins Development Team
-*
+*
* Description:
-*
+*
* This file contains the check_disk plugin
-*
-*
+*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
-*
+*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-*
+*
+*
*****************************************************************************/
const char *progname = "check_disk";
@@ -46,7 +46,7 @@ const char *email = "devel@monitoring-plugins.org";
#include <stdarg.h>
#include "fsusage.h"
#include "mountlist.h"
-#include "intprops.h" /* necessary for TYPE_MAXIMUM */
+#include "intprops.h" /* necessary for TYPE_MAXIMUM */
#if HAVE_LIMITS_H
# include <limits.h>
#endif
@@ -141,6 +141,7 @@ int erronly = FALSE;
int display_mntp = FALSE;
int exact_match = FALSE;
int freespace_ignore_reserved = FALSE;
+int display_inodes_perfdata = FALSE;
char *warn_freespace_units = NULL;
char *crit_freespace_units = NULL;
char *warn_freespace_percent = NULL;
@@ -167,11 +168,10 @@ main (int argc, char **argv)
char *output;
char *details;
char *perf;
+ char *perf_ilabel;
char *preamble;
char *flag_header;
double inode_space_pct;
- double warning_high_tide;
- double critical_high_tide;
int temp_result;
struct mount_entry *me;
@@ -186,6 +186,7 @@ main (int argc, char **argv)
output = strdup ("");
details = strdup ("");
perf = strdup ("");
+ perf_ilabel = strdup ("");
stat_buf = malloc(sizeof *stat_buf);
setlocale (LC_ALL, "");
@@ -242,17 +243,17 @@ main (int argc, char **argv)
#ifdef __CYGWIN__
if (strncmp(path->name, "/cygdrive/", 10) != 0 || strlen(path->name) > 11)
- continue;
+ continue;
snprintf(mountdir, sizeof(mountdir), "%s:\\", me->me_mountdir + 10);
if (GetDriveType(mountdir) != DRIVE_FIXED)
- me->me_remote = 1;
+ me->me_remote = 1;
#endif
/* Filters */
/* Remove filesystems already seen */
if (np_seen_name(seen, me->me_mountdir)) {
continue;
- }
+ }
np_add_name(&seen, me->me_mountdir);
if (path->group == NULL) {
@@ -285,8 +286,17 @@ main (int argc, char **argv)
get_stats (path, &fsp);
if (verbose >= 3) {
- printf ("For %s, used_pct=%g free_pct=%g used_units=%g free_units=%g total_units=%g used_inodes_pct=%g free_inodes_pct=%g fsp.fsu_blocksize=%llu mult=%llu\n",
- me->me_mountdir, path->dused_pct, path->dfree_pct, path->dused_units, path->dfree_units, path->dtotal_units, path->dused_inodes_percent, path->dfree_inodes_percent, fsp.fsu_blocksize, mult);
+ printf ("For %s, used_pct=%g free_pct=%g used_units=%llu free_units=%llu total_units=%llu used_inodes_pct=%g free_inodes_pct=%g fsp.fsu_blocksize=%llu mult=%llu\n",
+ me->me_mountdir,
+ path->dused_pct,
+ path->dfree_pct,
+ path->dused_units,
+ path->dfree_units,
+ path->dtotal_units,
+ path->dused_inodes_percent,
+ path->dfree_inodes_percent,
+ fsp.fsu_blocksize,
+ mult);
}
/* Threshold comparisons */
@@ -323,54 +333,79 @@ main (int argc, char **argv)
*/
/* *_high_tide must be reinitialized at each run */
- warning_high_tide = UINT_MAX;
- critical_high_tide = UINT_MAX;
+ uint64_t warning_high_tide = UINT64_MAX;
if (path->freespace_units->warning != NULL) {
- warning_high_tide = path->dtotal_units - path->freespace_units->warning->end;
+ warning_high_tide = (path->dtotal_units - path->freespace_units->warning->end) * mult;
}
if (path->freespace_percent->warning != NULL) {
- warning_high_tide = abs( min( (double) warning_high_tide, (double) (1.0 - path->freespace_percent->warning->end/100)*path->dtotal_units ));
+ warning_high_tide = min( warning_high_tide, (uint64_t)((1.0 - path->freespace_percent->warning->end/100) * (path->dtotal_units * mult)) );
}
+
+ uint64_t critical_high_tide = UINT64_MAX;
+
if (path->freespace_units->critical != NULL) {
- critical_high_tide = path->dtotal_units - path->freespace_units->critical->end;
+ critical_high_tide = (path->dtotal_units - path->freespace_units->critical->end) * mult;
}
if (path->freespace_percent->critical != NULL) {
- critical_high_tide = abs( min( (double) critical_high_tide, (double) (1.0 - path->freespace_percent->critical->end/100)*path->dtotal_units ));
+ critical_high_tide = min( critical_high_tide, (uint64_t)((1.0 - path->freespace_percent->critical->end/100) * (path->dtotal_units * mult)) );
}
- /* Nb: *_high_tide are unset when == UINT_MAX */
+ /* Nb: *_high_tide are unset when == UINT64_MAX */
xasprintf (&perf, "%s %s", perf,
- perfdata ((!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir,
- path->dused_units, units,
- (warning_high_tide != UINT_MAX ? TRUE : FALSE), warning_high_tide,
- (critical_high_tide != UINT_MAX ? TRUE : FALSE), critical_high_tide,
- TRUE, 0,
- TRUE, path->dtotal_units));
+ perfdata_uint64 (
+ (!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir,
+ path->dused_units * mult, "B",
+ (warning_high_tide == UINT64_MAX ? FALSE : TRUE), warning_high_tide,
+ (critical_high_tide == UINT64_MAX ? FALSE : TRUE), critical_high_tide,
+ TRUE, 0,
+ TRUE, path->dtotal_units * mult));
+
+ if (display_inodes_perfdata) {
+ /* *_high_tide must be reinitialized at each run */
+ warning_high_tide = UINT64_MAX;
+ critical_high_tide = UINT64_MAX;
+
+ if (path->freeinodes_percent->warning != NULL) {
+ warning_high_tide = llabs( min( (double) warning_high_tide, (double) (1.0 - path->freeinodes_percent->warning->end/100)*path->inodes_total ));
+ }
+ if (path->freeinodes_percent->critical != NULL) {
+ critical_high_tide = llabs( min( (double) critical_high_tide, (double) (1.0 - path->freeinodes_percent->critical->end/100)*path->inodes_total ));
+ }
+
+ xasprintf (&perf_ilabel, "%s (inodes)", (!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir);
+ /* Nb: *_high_tide are unset when == UINT64_MAX */
+ xasprintf (&perf, "%s %s", perf,
+ perfdata_uint64 (perf_ilabel,
+ path->inodes_used, "",
+ (warning_high_tide != UINT64_MAX ? TRUE : FALSE), warning_high_tide,
+ (critical_high_tide != UINT64_MAX ? TRUE : FALSE), critical_high_tide,
+ TRUE, 0,
+ TRUE, path->inodes_total));
+ }
if (disk_result==STATE_OK && erronly && !verbose)
continue;
- if(disk_result && verbose >= 1) {
- xasprintf(&flag_header, " %s [", state_text (disk_result));
- } else {
- xasprintf(&flag_header, "");
- }
- xasprintf (&output, "%s%s %s %.0f %s (%.0f%%",
- output, flag_header,
- (!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir,
- path->dfree_units,
- units,
- path->dfree_pct);
- if (path->dused_inodes_percent < 0) {
- xasprintf(&output, "%s inode=-)%s;", output, (disk_result ? "]" : ""));
- } else {
- xasprintf(&output, "%s inode=%.0f%%)%s;", output, path->dfree_inodes_percent, ((disk_result && verbose >= 1) ? "]" : ""));
- }
+ if(disk_result && verbose >= 1) {
+ xasprintf(&flag_header, " %s [", state_text (disk_result));
+ } else {
+ xasprintf(&flag_header, "");
+ }
+ xasprintf (&output, "%s%s %s %llu%s (%.0f%%",
+ output, flag_header,
+ (!strcmp(me->me_mountdir, "none") || display_mntp) ? me->me_devname : me->me_mountdir,
+ path->dfree_units,
+ units,
+ path->dfree_pct);
+ if (path->dused_inodes_percent < 0) {
+ xasprintf(&output, "%s inode=-)%s;", output, (disk_result ? "]" : ""));
+ } else {
+ xasprintf(&output, "%s inode=%.0f%%)%s;", output, path->dfree_inodes_percent, ((disk_result && verbose >= 1) ? "]" : ""));
+ }
free(flag_header);
/* TODO: Need to do a similar debug line
- xasprintf (&details, _("%s\n\
-%.0f of %.0f %s (%.0f%% inode=%.0f%%) free on %s (type %s mounted on %s) warn:%lu crit:%lu warn%%:%.0f%% crit%%:%.0f%%"),
+ xasprintf (&details, _("%s\n\%.0f of %.0f %s (%.0f%% inode=%.0f%%) free on %s (type %s mounted on %s) warn:%lu crit:%lu warn%%:%.0f%% crit%%:%.0f%%"),
details, dfree_units, dtotal_units, units, dfree_pct, inode_space_pct,
me->me_devname, me->me_type, me->me_mountdir,
(unsigned long)w_df, (unsigned long)c_df, w_dfp, c_dfp);
@@ -455,6 +490,7 @@ process_arguments (int argc, char **argv)
{"ignore-eregi-partition", required_argument, 0, 'I'},
{"local", no_argument, 0, 'l'},
{"stat-remote-fs", no_argument, 0, 'L'},
+ {"iperfdata", no_argument, 0, 'P'},
{"mountpoint", no_argument, 0, 'M'},
{"errors-only", no_argument, 0, 'e'},
{"exact-match", no_argument, 0, 'E'},
@@ -477,7 +513,7 @@ process_arguments (int argc, char **argv)
strcpy (argv[c], "-t");
while (1) {
- c = getopt_long (argc, argv, "+?VqhvefCt:c:w:K:W:u:p:x:X:N:mklLg:R:r:i:I:MEA", longopts, &option);
+ c = getopt_long (argc, argv, "+?VqhvefCt:c:w:K:W:u:p:x:X:N:mklLPg:R:r:i:I:MEA", longopts, &option);
if (c == -1 || c == EOF)
break;
@@ -530,14 +566,14 @@ process_arguments (int argc, char **argv)
}
break;
- case 'W': /* warning inode threshold */
+ case 'W': /* warning inode threshold */
if (*optarg == '@') {
warn_freeinodes_percent = optarg;
} else {
xasprintf(&warn_freeinodes_percent, "@%s", optarg);
}
break;
- case 'K': /* critical inode threshold */
+ case 'K': /* critical inode threshold */
if (*optarg == '@') {
crit_freeinodes_percent = optarg;
} else {
@@ -547,21 +583,24 @@ process_arguments (int argc, char **argv)
case 'u':
if (units)
free(units);
- if (! strcmp (optarg, "bytes")) {
+ if (! strcasecmp (optarg, "bytes")) {
mult = (uintmax_t)1;
units = strdup ("B");
- } else if (! strcmp (optarg, "kB")) {
+ } else if ( (! strcmp (optarg, "kB")) || (!strcmp(optarg, "KiB")) ) {
mult = (uintmax_t)1024;
- units = strdup ("kB");
- } else if (! strcmp (optarg, "MB")) {
+ units = strdup ("kiB");
+ } else if ( (! strcmp (optarg, "MB")) || (!strcmp(optarg, "MiB")) ) {
mult = (uintmax_t)1024 * 1024;
- units = strdup ("MB");
- } else if (! strcmp (optarg, "GB")) {
+ units = strdup ("MiB");
+ } else if ( (! strcmp (optarg, "GB")) || (!strcmp(optarg, "GiB")) ) {
mult = (uintmax_t)1024 * 1024 * 1024;
- units = strdup ("GB");
- } else if (! strcmp (optarg, "TB")) {
+ units = strdup ("GiB");
+ } else if ( (! strcmp (optarg, "TB")) || (!strcmp(optarg, "TiB")) ) {
mult = (uintmax_t)1024 * 1024 * 1024 * 1024;
- units = strdup ("TB");
+ units = strdup ("TiB");
+ } else if ( (! strcmp (optarg, "PB")) || (!strcmp(optarg, "PiB")) ) {
+ mult = (uintmax_t)1024 * 1024 * 1024 * 1024 * 1024;
+ units = strdup ("PiB");
} else {
die (STATE_UNKNOWN, _("unit type %s not known\n"), optarg);
}
@@ -572,19 +611,23 @@ process_arguments (int argc, char **argv)
mult = 1024;
if (units)
free(units);
- units = strdup ("kB");
+ units = strdup ("kiB");
break;
case 'm': /* display mountpoint */
mult = 1024 * 1024;
if (units)
free(units);
- units = strdup ("MB");
+ units = strdup ("MiB");
break;
case 'L':
stat_remote_fs = 1;
+ /* fallthrough */
case 'l':
show_local_fs = 1;
break;
+ case 'P':
+ display_inodes_perfdata = 1;
+ break;
case 'p': /* select path */
if (! (warn_freespace_units || crit_freespace_units || warn_freespace_percent ||
crit_freespace_percent || warn_usedspace_units || crit_usedspace_units ||
@@ -781,7 +824,7 @@ process_arguments (int argc, char **argv)
}
if (units == NULL) {
- units = strdup ("MB");
+ units = strdup ("MiB");
mult = (uintmax_t)1024 * 1024;
}
@@ -904,6 +947,8 @@ print_help (void)
printf (" %s\n", _("Display only devices/mountpoints with errors"));
printf (" %s\n", "-f, --freespace-ignore-reserved");
printf (" %s\n", _("Don't account root-reserved blocks into freespace in perfdata"));
+ printf (" %s\n", "-P, --iperfdata");
+ printf (" %s\n", _("Display inode usage in perfdata"));
printf (" %s\n", "-g, --group=NAME");
printf (" %s\n", _("Group paths. Thresholds apply to (free-)space of all partitions together"));
printf (" %s\n", "-k, --kilobytes");
@@ -993,50 +1038,59 @@ get_stats (struct parameter_list *p, struct fs_usage *fsp) {
if (p_list->group && ! (strcmp(p_list->group, p->group))) {
stat_path(p_list);
get_fs_usage (p_list->best_match->me_mountdir, p_list->best_match->me_devname, &tmpfsp);
- get_path_stats(p_list, &tmpfsp);
+ get_path_stats(p_list, &tmpfsp);
if (verbose >= 3)
- printf("Group %s: adding %llu blocks sized %llu, (%s) used_units=%g free_units=%g total_units=%g fsu_blocksize=%llu mult=%llu\n",
- p_list->group, tmpfsp.fsu_bavail, tmpfsp.fsu_blocksize, p_list->best_match->me_mountdir, p_list->dused_units, p_list->dfree_units,
- p_list->dtotal_units, mult);
-
- /* prevent counting the first FS of a group twice since its parameter_list entry
+ printf("Group %s: adding %llu blocks sized %llu, (%s) used_units=%lu free_units=%llu total_units=%llu mult=%llu\n",
+ p_list->group,
+ tmpfsp.fsu_blocks,
+ tmpfsp.fsu_blocksize,
+ p_list->best_match->me_mountdir,
+ p_list->dused_units,
+ p_list->dfree_units,
+ p_list->dtotal_units,
+ mult);
+
+ /* prevent counting the first FS of a group twice since its parameter_list entry
* is used to carry the information of all file systems of the entire group */
if (! first) {
p->total += p_list->total;
p->available += p_list->available;
p->available_to_root += p_list->available_to_root;
p->used += p_list->used;
-
+
p->dused_units += p_list->dused_units;
p->dfree_units += p_list->dfree_units;
p->dtotal_units += p_list->dtotal_units;
p->inodes_total += p_list->inodes_total;
p->inodes_free += p_list->inodes_free;
+ p->inodes_free_to_root += p_list->inodes_free_to_root;
+ p->inodes_used += p_list->inodes_used;
}
first = 0;
}
- if (verbose >= 3)
- printf("Group %s now has: used_units=%g free_units=%g total_units=%g fsu_blocksize=%llu mult=%llu\n",
- p->group, tmpfsp.fsu_bavail, tmpfsp.fsu_blocksize, p->best_match->me_mountdir, p->dused_units,
- p->dfree_units, p->dtotal_units, mult);
+ if (verbose >= 3)
+ printf("Group %s now has: used_units=%llu free_units=%llu total_units=%llu fsu_blocksize=%llu mult=%llu\n",
+ p->group,
+ p->dused_units,
+ p->dfree_units,
+ p->dtotal_units,
+ tmpfsp.fsu_blocksize,
+ mult);
}
/* modify devname and mountdir for output */
p->best_match->me_mountdir = p->best_match->me_devname = p->group;
}
/* finally calculate percentages for either plain FS or summed up group */
- p->dused_pct = calculate_percent( p->used, p->used + p->available ); /* used + available can never be > uintmax */
+ p->dused_pct = calculate_percent( p->used, p->used + p->available ); /* used + available can never be > uintmax */
p->dfree_pct = 100 - p->dused_pct;
p->dused_inodes_percent = calculate_percent(p->inodes_total - p->inodes_free, p->inodes_total);
p->dfree_inodes_percent = 100 - p->dused_inodes_percent;
-
+
}
void
get_path_stats (struct parameter_list *p, struct fs_usage *fsp) {
- /* 2007-12-08 - Workaround for Gnulib reporting insanely high available
- * space on BSD (the actual value should be negative but fsp->fsu_bavail
- * is unsigned) */
- p->available = fsp->fsu_bavail > fsp->fsu_bfree ? 0 : fsp->fsu_bavail;
+ p->available = fsp->fsu_bavail;
p->available_to_root = fsp->fsu_bfree;
p->used = fsp->fsu_blocks - fsp->fsu_bfree;
if (freespace_ignore_reserved) {
@@ -1046,11 +1100,22 @@ get_path_stats (struct parameter_list *p, struct fs_usage *fsp) {
/* default behaviour : take all the blocks into account */
p->total = fsp->fsu_blocks;
}
-
+
p->dused_units = p->used*fsp->fsu_blocksize/mult;
p->dfree_units = p->available*fsp->fsu_blocksize/mult;
p->dtotal_units = p->total*fsp->fsu_blocksize/mult;
- p->inodes_total = fsp->fsu_files; /* Total file nodes. */
- p->inodes_free = fsp->fsu_ffree; /* Free file nodes. */
+ /* Free file nodes. Not sure the workaround is required, but in case...*/
+ p->inodes_free = fsp->fsu_favail > fsp->fsu_ffree ? 0 : fsp->fsu_favail;
+ p->inodes_free_to_root = fsp->fsu_ffree; /* Free file nodes for root. */
+ p->inodes_used = fsp->fsu_files - fsp->fsu_ffree;
+ if (freespace_ignore_reserved) {
+ /* option activated : we substract the root-reserved inodes from the total */
+ /* not all OS report fsp->fsu_favail, only the ones with statvfs syscall */
+ /* for others, fsp->fsu_ffree == fsp->fsu_favail */
+ p->inodes_total = fsp->fsu_files - p->inodes_free_to_root + p->inodes_free;
+ } else {
+ /* default behaviour : take all the inodes into account */
+ p->inodes_total = fsp->fsu_files;
+ }
np_add_name(&seen, p->best_match->me_mountdir);
}
diff --git a/plugins/check_dns.c b/plugins/check_dns.c
index 5feafc80..9de6caf5 100644
--- a/plugins/check_dns.c
+++ b/plugins/check_dns.c
@@ -41,7 +41,7 @@ const char *email = "devel@monitoring-plugins.org";
int process_arguments (int, char **);
int validate_arguments (void);
-int error_scan (char *);
+int error_scan (char *, int *);
int ip_match_cidr(const char *, const char *);
unsigned long ip2long(const char *);
void print_help (void);
@@ -54,8 +54,10 @@ char ptr_server[ADDRESS_LENGTH] = "";
int verbose = FALSE;
char **expected_address = NULL;
int expected_address_cnt = 0;
+int expect_nxdomain = FALSE;
int expect_authority = FALSE;
+int all_match = FALSE;
thresholds *time_thresholds = NULL;
static int
@@ -86,6 +88,7 @@ main (int argc, char **argv)
int parse_address = FALSE; /* This flag scans for Address: but only after Name: */
output chld_out, chld_err;
size_t i;
+ int is_nxdomain = FALSE;
setlocale (LC_ALL, "");
bindtextdomain (PACKAGE, LOCALEDIR);
@@ -168,8 +171,8 @@ main (int argc, char **argv)
temp_buffer++;
/* Strip leading spaces */
- for (; *temp_buffer != '\0' && *temp_buffer == ' '; temp_buffer++)
- /* NOOP */;
+ while (*temp_buffer == ' ')
+ temp_buffer++;
strip(temp_buffer);
if (temp_buffer==NULL || strlen(temp_buffer)==0) {
@@ -185,7 +188,7 @@ main (int argc, char **argv)
}
- result = error_scan (chld_out.line[i]);
+ result = error_scan (chld_out.line[i], &is_nxdomain);
if (result != STATE_OK) {
msg = strchr (chld_out.line[i], ':');
if(msg) msg++;
@@ -198,13 +201,20 @@ main (int argc, char **argv)
if (verbose)
puts(chld_err.line[i]);
- if (error_scan (chld_err.line[i]) != STATE_OK) {
- result = max_state (result, error_scan (chld_err.line[i]));
+ if (error_scan (chld_err.line[i], &is_nxdomain) != STATE_OK) {
+ result = max_state (result, error_scan (chld_err.line[i], &is_nxdomain));
msg = strchr(input_buffer, ':');
- if(msg) msg++;
+ if(msg)
+ msg++;
+ else
+ msg = input_buffer;
}
}
+ if (is_nxdomain && !expect_nxdomain) {
+ die (STATE_CRITICAL, _("Domain '%s' was not found by the server\n"), query_address);
+ }
+
if (addresses) {
int i,slen;
char *adrp;
@@ -228,16 +238,27 @@ main (int argc, char **argv)
if (result == STATE_OK && expected_address_cnt > 0) {
result = STATE_CRITICAL;
temp_buffer = "";
+ unsigned long expect_match = (1 << expected_address_cnt) - 1;
+ unsigned long addr_match = (1 << n_addresses) - 1;
for (i=0; i<expected_address_cnt; i++) {
+ int j;
/* check if we get a match on 'raw' ip or cidr */
- if ( strcmp(address, expected_address[i]) == 0
- || ip_match_cidr(address, expected_address[i]) )
- result = STATE_OK;
+ for (j=0; j<n_addresses; j++) {
+ if ( strcmp(addresses[j], expected_address[i]) == 0
+ || ip_match_cidr(addresses[j], expected_address[i]) ) {
+ result = STATE_OK;
+ addr_match &= ~(1 << j);
+ expect_match &= ~(1 << i);
+ }
+ }
/* prepare an error string */
xasprintf(&temp_buffer, "%s%s; ", temp_buffer, expected_address[i]);
}
+ /* check if expected_address must cover all in addresses and none may be missing */
+ if (all_match && (expect_match != 0 || addr_match != 0))
+ result = STATE_CRITICAL;
if (result == STATE_CRITICAL) {
/* Strip off last semicolon... */
temp_buffer[strlen(temp_buffer)-2] = '\0';
@@ -245,6 +266,16 @@ main (int argc, char **argv)
}
}
+ if (expect_nxdomain) {
+ if (!is_nxdomain) {
+ result = STATE_CRITICAL;
+ xasprintf(&msg, _("Domain '%s' was found by the server: '%s'\n"), query_address, address);
+ } else {
+ if (address != NULL) free(address);
+ address = "NXDOMAIN";
+ }
+ }
+
/* check if authoritative */
if (result == STATE_OK && expect_authority && non_authoritative) {
result = STATE_CRITICAL;
@@ -324,9 +355,15 @@ ip2long(const char* src) {
}
int
-error_scan (char *input_buffer)
+error_scan (char *input_buffer, int *is_nxdomain)
{
+ const int nxdomain = strstr (input_buffer, "Non-existent") ||
+ strstr (input_buffer, "** server can't find") ||
+ strstr (input_buffer, "** Can't find") ||
+ strstr (input_buffer, "NXDOMAIN");
+ if (nxdomain) *is_nxdomain = TRUE;
+
/* the DNS lookup timed out */
if (strstr (input_buffer, _("Note: nslookup is deprecated and may be removed from future releases.")) ||
strstr (input_buffer, _("Consider using the `dig' or `host' programs instead. Run nslookup with")) ||
@@ -336,6 +373,8 @@ error_scan (char *input_buffer)
/* DNS server is not running... */
else if (strstr (input_buffer, "No response from server"))
die (STATE_CRITICAL, _("No response from DNS %s\n"), dns_server);
+ else if (strstr (input_buffer, "no servers could be reached"))
+ die (STATE_CRITICAL, _("No response from DNS %s\n"), dns_server);
/* Host name is valid, but server doesn't have records... */
else if (strstr (input_buffer, "No records"))
@@ -343,7 +382,7 @@ error_scan (char *input_buffer)
/* Connection was refused */
else if (strstr (input_buffer, "Connection refused") ||
- strstr (input_buffer, "Couldn't find server") ||
+ strstr (input_buffer, "Couldn't find server") ||
strstr (input_buffer, "Refused") ||
(strstr (input_buffer, "** server can't find") &&
strstr (input_buffer, ": REFUSED")))
@@ -357,12 +396,6 @@ error_scan (char *input_buffer)
else if (strstr (input_buffer, "No information"))
die (STATE_CRITICAL, _("No information returned by DNS server at %s\n"), dns_server);
- /* Host or domain name does not exist */
- else if (strstr (input_buffer, "Non-existent") ||
- strstr (input_buffer, "** server can't find") ||
- strstr (input_buffer,"NXDOMAIN"))
- die (STATE_CRITICAL, _("Domain %s was not found by the server\n"), query_address);
-
/* Network is unreachable */
else if (strstr (input_buffer, "Network is unreachable"))
die (STATE_CRITICAL, _("Network is unreachable\n"));
@@ -399,7 +432,9 @@ process_arguments (int argc, char **argv)
{"server", required_argument, 0, 's'},
{"reverse-server", required_argument, 0, 'r'},
{"expected-address", required_argument, 0, 'a'},
+ {"expect-nxdomain", no_argument, 0, 'n'},
{"expect-authority", no_argument, 0, 'A'},
+ {"all", no_argument, 0, 'L'},
{"warning", required_argument, 0, 'w'},
{"critical", required_argument, 0, 'c'},
{0, 0, 0, 0}
@@ -413,7 +448,7 @@ process_arguments (int argc, char **argv)
strcpy (argv[c], "-t");
while (1) {
- c = getopt_long (argc, argv, "hVvAt:H:s:r:a:w:c:", long_opts, &opt_index);
+ c = getopt_long (argc, argv, "hVvALnt:H:s:r:a:w:c:", long_opts, &opt_index);
if (c == -1 || c == EOF)
break;
@@ -454,13 +489,33 @@ process_arguments (int argc, char **argv)
case 'a': /* expected address */
if (strlen (optarg) >= ADDRESS_LENGTH)
die (STATE_UNKNOWN, _("Input buffer overflow\n"));
- expected_address = (char **)realloc(expected_address, (expected_address_cnt+1) * sizeof(char**));
- expected_address[expected_address_cnt] = strdup(optarg);
- expected_address_cnt++;
+ if (strchr(optarg, ',') != NULL) {
+ char *comma = strchr(optarg, ',');
+ while (comma != NULL) {
+ expected_address = (char **)realloc(expected_address, (expected_address_cnt+1) * sizeof(char**));
+ expected_address[expected_address_cnt] = strndup(optarg, comma - optarg);
+ expected_address_cnt++;
+ optarg = comma + 1;
+ comma = strchr(optarg, ',');
+ }
+ expected_address = (char **)realloc(expected_address, (expected_address_cnt+1) * sizeof(char**));
+ expected_address[expected_address_cnt] = strdup(optarg);
+ expected_address_cnt++;
+ } else {
+ expected_address = (char **)realloc(expected_address, (expected_address_cnt+1) * sizeof(char**));
+ expected_address[expected_address_cnt] = strdup(optarg);
+ expected_address_cnt++;
+ }
+ break;
+ case 'n': /* expect NXDOMAIN */
+ expect_nxdomain = TRUE;
break;
case 'A': /* expect authority */
expect_authority = TRUE;
break;
+ case 'L': /* all must match */
+ all_match = TRUE;
+ break;
case 'w':
warning = optarg;
break;
@@ -496,8 +551,15 @@ process_arguments (int argc, char **argv)
int
validate_arguments ()
{
- if (query_address[0] == 0)
+ if (query_address[0] == 0) {
+ printf ("missing --host argument\n");
return ERROR;
+ }
+
+ if (expected_address_cnt > 0 && expect_nxdomain) {
+ printf ("--expected-address and --expect-nxdomain cannot be combined\n");
+ return ERROR;
+ }
return OK;
}
@@ -529,14 +591,19 @@ print_help (void)
printf (" -a, --expected-address=IP-ADDRESS|CIDR|HOST\n");
printf (" %s\n", _("Optional IP-ADDRESS/CIDR you expect the DNS server to return. HOST must end"));
printf (" %s\n", _("with a dot (.). This option can be repeated multiple times (Returns OK if any"));
- printf (" %s\n", _("value match). If multiple addresses are returned at once, you have to match"));
- printf (" %s\n", _("the whole string of addresses separated with commas (sorted alphabetically)."));
+ printf (" %s\n", _("value matches)."));
+ printf (" -n, --expect-nxdomain\n");
+ printf (" %s\n", _("Expect the DNS server to return NXDOMAIN (i.e. the domain was not found)"));
+ printf (" %s\n", _("Cannot be used together with -a"));
printf (" -A, --expect-authority\n");
printf (" %s\n", _("Optionally expect the DNS server to be authoritative for the lookup"));
printf (" -w, --warning=seconds\n");
printf (" %s\n", _("Return warning if elapsed time exceeds value. Default off"));
printf (" -c, --critical=seconds\n");
printf (" %s\n", _("Return critical if elapsed time exceeds value. Default off"));
+ printf (" -L, --all\n");
+ printf (" %s\n", _("Return critical if the list of expected addresses does not match all addresses"));
+ printf (" %s\n", _("returned. Default off"));
printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT);
@@ -548,5 +615,5 @@ void
print_usage (void)
{
printf ("%s\n", _("Usage:"));
- printf ("%s -H host [-s server] [-a expected-address] [-A] [-t timeout] [-w warn] [-c crit]\n", progname);
+ printf ("%s -H host [-s server] [-a expected-address] [-n] [-A] [-t timeout] [-w warn] [-c crit] [-L]\n", progname);
}
diff --git a/plugins/check_fping.c b/plugins/check_fping.c
index da1ce1a6..521d0fef 100644
--- a/plugins/check_fping.c
+++ b/plugins/check_fping.c
@@ -184,7 +184,7 @@ textscan (char *buf)
int status = STATE_UNKNOWN;
if (strstr (buf, "not found")) {
- die (STATE_CRITICAL, _("FPING UNKNOW - %s not found\n"), server_name);
+ die (STATE_CRITICAL, _("FPING UNKNOWN - %s not found\n"), server_name);
}
else if (strstr (buf, "is unreachable") || strstr (buf, "Unreachable")) {
diff --git a/plugins/check_hpjd.c b/plugins/check_hpjd.c
index f159f5a2..c4b44178 100644
--- a/plugins/check_hpjd.c
+++ b/plugins/check_hpjd.c
@@ -66,7 +66,8 @@ void print_usage (void);
char *community = NULL;
char *address = NULL;
-char *port = NULL;
+unsigned int port = 0;
+int check_paper_out = 1;
int
main (int argc, char **argv)
@@ -120,8 +121,12 @@ main (int argc, char **argv)
HPJD_GD_DOOR_OPEN, HPJD_GD_PAPER_OUTPUT, HPJD_GD_STATUS_DISPLAY);
/* get the command to run */
- sprintf (command_line, "%s -OQa -m : -v 1 -c %s %s:%hd %s", PATH_TO_SNMPGET, community,
- address, port, query_string);
+ sprintf (command_line, "%s -OQa -m : -v 1 -c %s %s:%u %s",
+ PATH_TO_SNMPGET,
+ community,
+ address,
+ port,
+ query_string);
/* run the command */
child_process = spopen (command_line);
@@ -240,7 +245,8 @@ main (int argc, char **argv)
strcpy (errmsg, _("Paper Jam"));
}
else if (paper_out) {
- result = STATE_WARNING;
+ if (check_paper_out)
+ result = STATE_WARNING;
strcpy (errmsg, _("Out of Paper"));
}
else if (line_status == OFFLINE) {
@@ -325,7 +331,7 @@ process_arguments (int argc, char **argv)
while (1) {
- c = getopt_long (argc, argv, "+hVH:C:p:", longopts, &option);
+ c = getopt_long (argc, argv, "+hVH:C:p:D", longopts, &option);
if (c == -1 || c == EOF || c == 1)
break;
@@ -347,6 +353,8 @@ process_arguments (int argc, char **argv)
usage2 (_("Port must be a positive short integer"), optarg);
else
port = atoi(optarg);
+ case 'D': /* disable paper out check*/
+ check_paper_out = 0;
break;
case 'V': /* version */
print_revision (progname, NP_VERSION);
@@ -376,11 +384,8 @@ process_arguments (int argc, char **argv)
community = strdup (DEFAULT_COMMUNITY);
}
- if (port == NULL) {
- if (argv[c] != NULL )
- port = argv[c];
- else
- port = atoi (DEFAULT_PORT);
+ if (port == 0) {
+ port = atoi(DEFAULT_PORT);
}
return validate_arguments ();
@@ -420,6 +425,8 @@ print_help (void)
printf (" %s", _("Specify the port to check "));
printf (_("(default=%s)"), DEFAULT_PORT);
printf ("\n");
+ printf (" %s\n", "-D");
+ printf (" %s", _("Disable paper check "));
printf (UT_SUPPORT);
}
@@ -430,5 +437,5 @@ void
print_usage (void)
{
printf ("%s\n", _("Usage:"));
- printf ("%s -H host [-C community] [-p port]\n", progname);
+ printf ("%s -H host [-C community] [-p port] [-D]\n", progname);
}
diff --git a/plugins/check_http.c b/plugins/check_http.c
index e5ef7cc4..34fb4f01 100644
--- a/plugins/check_http.c
+++ b/plugins/check_http.c
@@ -72,7 +72,7 @@ int maximum_age = -1;
enum {
REGS = 2,
- MAX_RE_SIZE = 256
+ MAX_RE_SIZE = 1024
};
#include "regex.h"
regex_t preg;
@@ -120,12 +120,14 @@ int use_ssl = FALSE;
int use_sni = FALSE;
int verbose = FALSE;
int show_extended_perfdata = FALSE;
+int show_body = FALSE;
int sd;
int min_page_len = 0;
int max_page_len = 0;
int redir_depth = 0;
int max_depth = 15;
char *http_method;
+char *http_method_proxy;
char *http_post_data;
char *http_content_type;
char buffer[MAX_INPUT_BUFFER];
@@ -239,6 +241,7 @@ process_arguments (int argc, char **argv)
{"use-ipv4", no_argument, 0, '4'},
{"use-ipv6", no_argument, 0, '6'},
{"extended-perfdata", no_argument, 0, 'E'},
+ {"show-body", no_argument, 0, 'B'},
{0, 0, 0, 0}
};
@@ -259,7 +262,7 @@ process_arguments (int argc, char **argv)
}
while (1) {
- c = getopt_long (argc, argv, "Vvh46t:c:w:A:k:H:P:j:T:I:a:b:d:e:p:s:R:r:u:f:C:J:K:nlLS::m:M:NE", longopts, &option);
+ c = getopt_long (argc, argv, "Vvh46t:c:w:A:k:H:P:j:T:I:a:b:d:e:p:s:R:r:u:f:C:J:K:nlLS::m:M:NEB", longopts, &option);
if (c == -1 || c == EOF)
break;
@@ -446,6 +449,12 @@ process_arguments (int argc, char **argv)
if (http_method)
free(http_method);
http_method = strdup (optarg);
+ char *tmp;
+ if ((tmp = strstr(http_method, ":")) > 0) {
+ tmp[0] = '\0';
+ http_method = http_method;
+ http_method_proxy = ++tmp;
+ }
break;
case 'd': /* string or substring */
strncpy (header_expect, optarg, MAX_INPUT_BUFFER - 1);
@@ -540,6 +549,9 @@ process_arguments (int argc, char **argv)
case 'E': /* show extended perfdata */
show_extended_perfdata = TRUE;
break;
+ case 'B': /* print body content after status line */
+ show_body = TRUE;
+ break;
}
}
@@ -566,6 +578,9 @@ process_arguments (int argc, char **argv)
if (http_method == NULL)
http_method = strdup ("GET");
+ if (http_method_proxy == NULL)
+ http_method_proxy = strdup ("GET");
+
if (client_cert && !client_privkey)
usage4 (_("If you use a client certificate you must also specify a private key file"));
@@ -916,6 +931,21 @@ check_http (void)
if (verbose) printf ("Entering CONNECT tunnel mode with proxy %s:%d to dst %s:%d\n", server_address, server_port, host_name, HTTPS_PORT);
asprintf (&buf, "%s %s:%d HTTP/1.1\r\n%s\r\n", http_method, host_name, HTTPS_PORT, user_agent);
+ if (strlen(proxy_auth)) {
+ base64_encode_alloc (proxy_auth, strlen (proxy_auth), &auth);
+ xasprintf (&buf, "%sProxy-Authorization: Basic %s\r\n", buf, auth);
+ }
+ /* optionally send any other header tag */
+ if (http_opt_headers_count) {
+ for (i = 0; i < http_opt_headers_count ; i++) {
+ if (force_host_header != http_opt_headers[i]) {
+ xasprintf (&buf, "%s%s\r\n", buf, http_opt_headers[i]);
+ }
+ }
+ /* This cannot be free'd here because a redirection will then try to access this and segfault */
+ /* Covered in a testcase in tests/check_http.t */
+ /* free(http_opt_headers); */
+ }
asprintf (&buf, "%sProxy-Connection: keep-alive\r\n", buf);
asprintf (&buf, "%sHost: %s\r\n", buf, host_name);
/* we finished our request, send empty line with CRLF */
@@ -950,7 +980,7 @@ check_http (void)
if ( server_address != NULL && strcmp(http_method, "CONNECT") == 0
&& host_name != NULL && use_ssl == TRUE)
- asprintf (&buf, "%s %s %s\r\n%s\r\n", "GET", server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent);
+ asprintf (&buf, "%s %s %s\r\n%s\r\n", http_method_proxy, server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent);
else
asprintf (&buf, "%s %s %s\r\n%s\r\n", http_method, server_url, host_name ? "HTTP/1.1" : "HTTP/1.0", user_agent);
@@ -1140,6 +1170,8 @@ check_http (void)
xasprintf (&msg,
_("Invalid HTTP response received from host on port %d: %s\n"),
server_port, status_line);
+ if (show_body)
+ xasprintf (&msg, _("%s\n%s"), msg, page);
die (STATE_CRITICAL, "HTTP CRITICAL - %s", msg);
}
@@ -1290,6 +1322,9 @@ check_http (void)
perfd_time (elapsed_time),
perfd_size (page_len));
+ if (show_body)
+ xasprintf (&msg, _("%s\n%s"), msg, page);
+
result = max_state_alt(get_status(elapsed_time, thlds), result);
die (result, "HTTP %s: %s\n", state_text(result), msg);
@@ -1418,8 +1453,8 @@ redir (char *pos, char *status_line)
!strncmp(server_address, addr, MAX_IPV4_HOSTLENGTH) &&
(host_name && !strncmp(host_name, addr, MAX_IPV4_HOSTLENGTH)) &&
!strcmp(server_url, url))
- die (STATE_WARNING,
- _("HTTP WARNING - redirection creates an infinite loop - %s://%s:%d%s%s\n"),
+ die (STATE_CRITICAL,
+ _("HTTP CRITICAL - redirection creates an infinite loop - %s://%s:%d%s%s\n"),
type, addr, i, url, (display_html ? "</A>" : ""));
strcpy (server_type, type);
@@ -1532,6 +1567,10 @@ print_help (void)
print_usage ();
+#ifdef HAVE_SSL
+ printf (_("In the first form, make an HTTP request."));
+ printf (_("In the second form, connect to the server and check the TLS certificate."));
+#endif
printf (_("NOTE: One or both of -H and -I must be specified"));
printf ("\n");
@@ -1581,7 +1620,7 @@ print_help (void)
printf (" %s\n", _("URL to GET or POST (default: /)"));
printf (" %s\n", "-P, --post=STRING");
printf (" %s\n", _("URL encoded http POST data"));
- printf (" %s\n", "-j, --method=STRING (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT)");
+ printf (" %s\n", "-j, --method=STRING (for example: HEAD, OPTIONS, TRACE, PUT, DELETE, CONNECT, CONNECT:POST)");
printf (" %s\n", _("Set HTTP method."));
printf (" %s\n", "-N, --no-body");
printf (" %s\n", _("Don't wait for document body: stop reading after headers."));
@@ -1611,6 +1650,8 @@ print_help (void)
printf (" %s\n", _("Any other tags to be sent in http header. Use multiple times for additional headers"));
printf (" %s\n", "-E, --extended-perfdata");
printf (" %s\n", _("Print additional performance data"));
+ printf (" %s\n", "-B, --show-body");
+ printf (" %s\n", _("Print body content below status line"));
printf (" %s\n", "-L, --link");
printf (" %s\n", _("Wrap output in HTML link (obsoleted by urlize)"));
printf (" %s\n", "-f, --onredirect=<ok|warning|critical|follow|sticky|stickyport>");
@@ -1629,7 +1670,7 @@ print_help (void)
printf ("%s\n", _("Notes:"));
printf (" %s\n", _("This plugin will attempt to open an HTTP connection with the host."));
printf (" %s\n", _("Successful connects return STATE_OK, refusals and timeouts return STATE_CRITICAL"));
- printf (" %s\n", _("other errors return STATE_UNKNOWN. Successful connects, but incorrect reponse"));
+ printf (" %s\n", _("other errors return STATE_UNKNOWN. Successful connects, but incorrect response"));
printf (" %s\n", _("messages from the host result in STATE_WARNING return values. If you are"));
printf (" %s\n", _("checking a virtual server that uses 'host headers' you must supply the FQDN"));
printf (" %s\n", _("(fully qualified domain name) as the [host_name] argument."));
@@ -1668,7 +1709,8 @@ print_help (void)
printf (" %s\n", _("all these options are needed: -I <proxy> -p <proxy-port> -u <check-url> -S(sl) -j CONNECT -H <webserver>"));
printf (" %s\n", _("a STATE_OK will be returned. When the server returns its content but exceeds"));
printf (" %s\n", _("the 5-second threshold, a STATE_WARNING will be returned. When an error occurs,"));
- printf (" %s\n", _("a STATE_CRITICAL will be returned."));
+ printf (" %s\n", _("a STATE_CRITICAL will be returned. By adding a colon to the method you can set the method used"));
+ printf (" %s\n", _("inside the proxied connection: -j CONNECT:POST"));
#endif
@@ -1688,6 +1730,8 @@ print_usage (void)
printf (" [-b proxy_auth] [-f <ok|warning|critcal|follow|sticky|stickyport>]\n");
printf (" [-e <expect>] [-d string] [-s string] [-l] [-r <regex> | -R <case-insensitive regex>]\n");
printf (" [-P string] [-m <min_pg_size>:<max_pg_size>] [-4|-6] [-N] [-M <age>]\n");
- printf (" [-A string] [-k string] [-S <version>] [--sni] [-C <warn_age>[,<crit_age>]]\n");
+ printf (" [-A string] [-k string] [-S <version>] [--sni]\n");
printf (" [-T <content-type>] [-j method]\n");
+ printf (" %s -H <vhost> | -I <IP-address> -C <warn_age>[,<crit_age>]\n",progname);
+ printf (" [-p <port>] [-t <timeout>] [-4|-6] [--sni]\n");
}
diff --git a/plugins/check_ide_smart.c b/plugins/check_ide_smart.c
index 46621318..0160d98b 100644
--- a/plugins/check_ide_smart.c
+++ b/plugins/check_ide_smart.c
@@ -283,7 +283,7 @@ get_offline_text (int status)
return offline_status_text[i].text;
}
}
- return "UNKNOW";
+ return "UNKNOWN";
}
diff --git a/plugins/check_ldap.c b/plugins/check_ldap.c
index 66be4b46..845a4f52 100644
--- a/plugins/check_ldap.c
+++ b/plugins/check_ldap.c
@@ -237,7 +237,7 @@ main (int argc, char *argv[])
if(entries_thresholds != NULL) {
if (verbose) {
printf ("entries found: %d\n", num_entries);
- print_thresholds("entry threasholds", entries_thresholds);
+ print_thresholds("entry thresholds", entries_thresholds);
}
status_entries = get_status(num_entries, entries_thresholds);
if (status_entries == STATE_CRITICAL) {
@@ -432,6 +432,9 @@ validate_arguments ()
set_thresholds(&entries_thresholds,
warn_entries, crit_entries);
}
+ if (ld_passwd==NULL)
+ ld_passwd = getenv("LDAP_PASSWORD");
+
return OK;
}
@@ -465,7 +468,7 @@ print_help (void)
printf (" %s\n", "-D [--bind]");
printf (" %s\n", _("ldap bind DN (if required)"));
printf (" %s\n", "-P [--pass]");
- printf (" %s\n", _("ldap password (if required)"));
+ printf (" %s\n", _("ldap password (if required, or set the password through environment variable 'LDAP_PASSWORD')"));
printf (" %s\n", "-T [--starttls]");
printf (" %s\n", _("use starttls mechanism introduced in protocol version 3"));
printf (" %s\n", "-S [--ssl]");
diff --git a/plugins/check_load.c b/plugins/check_load.c
index b1cc498f..0e4de54e 100644
--- a/plugins/check_load.c
+++ b/plugins/check_load.c
@@ -33,6 +33,7 @@ const char *copyright = "1999-2007";
const char *email = "devel@monitoring-plugins.org";
#include "common.h"
+#include "runcmd.h"
#include "utils.h"
#include "popen.h"
@@ -52,6 +53,9 @@ static int process_arguments (int argc, char **argv);
static int validate_arguments (void);
void print_help (void);
void print_usage (void);
+static int print_top_consuming_processes();
+
+static int n_procs_to_show = 0;
/* strictly for pretty-print usage in loops */
static const int nums[3] = { 1, 5, 15 };
@@ -205,11 +209,14 @@ main (int argc, char **argv)
else if(la[i] > wload[i]) result = STATE_WARNING;
}
- printf("%s - %s|", state_text(result), status_line);
+ printf("LOAD %s - %s|", state_text(result), status_line);
for(i = 0; i < 3; i++)
printf("load%d=%.3f;%.3f;%.3f;0; ", nums[i], la[i], wload[i], cload[i]);
putchar('\n');
+ if (n_procs_to_show > 0) {
+ print_top_consuming_processes();
+ }
return result;
}
@@ -227,6 +234,7 @@ process_arguments (int argc, char **argv)
{"percpu", no_argument, 0, 'r'},
{"version", no_argument, 0, 'V'},
{"help", no_argument, 0, 'h'},
+ {"procs-to-show", required_argument, 0, 'n'},
{0, 0, 0, 0}
};
@@ -234,7 +242,7 @@ process_arguments (int argc, char **argv)
return ERROR;
while (1) {
- c = getopt_long (argc, argv, "Vhrc:w:", longopts, &option);
+ c = getopt_long (argc, argv, "Vhrc:w:n:", longopts, &option);
if (c == -1 || c == EOF)
break;
@@ -255,6 +263,9 @@ process_arguments (int argc, char **argv)
case 'h': /* help */
print_help ();
exit (STATE_UNKNOWN);
+ case 'n':
+ n_procs_to_show = atoi(optarg);
+ break;
case '?': /* help */
usage5 ();
}
@@ -324,6 +335,9 @@ print_help (void)
printf (" %s\n", _("the load average format is the same used by \"uptime\" and \"w\""));
printf (" %s\n", "-r, --percpu");
printf (" %s\n", _("Divide the load averages by the number of CPUs (when possible)"));
+ printf (" %s\n", "-n, --procs-to-show=NUMBER_OF_PROCS");
+ printf (" %s\n", _("Number of processes to show when printing the top consuming processes."));
+ printf (" %s\n", _("NUMBER_OF_PROCS=0 disables this feature. Default value is 0"));
printf (UT_SUPPORT);
}
@@ -332,5 +346,48 @@ void
print_usage (void)
{
printf ("%s\n", _("Usage:"));
- printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15\n", progname);
+ printf ("%s [-r] -w WLOAD1,WLOAD5,WLOAD15 -c CLOAD1,CLOAD5,CLOAD15 [-n NUMBER_OF_PROCS]\n", progname);
+}
+
+#ifdef PS_USES_PROCPCPU
+int cmpstringp(const void *p1, const void *p2) {
+ int procuid = 0;
+ int procpid = 0;
+ int procppid = 0;
+ int procvsz = 0;
+ int procrss = 0;
+ float procpcpu = 0;
+ char procstat[8];
+#ifdef PS_USES_PROCETIME
+ char procetime[MAX_INPUT_BUFFER];
+#endif /* PS_USES_PROCETIME */
+ char procprog[MAX_INPUT_BUFFER];
+ int pos;
+ sscanf (* (char * const *) p1, PS_FORMAT, PS_VARLIST);
+ float procpcpu1 = procpcpu;
+ sscanf (* (char * const *) p2, PS_FORMAT, PS_VARLIST);
+ return procpcpu1 < procpcpu;
+}
+#endif /* PS_USES_PROCPCPU */
+
+static int print_top_consuming_processes() {
+ int i = 0;
+ struct output chld_out, chld_err;
+ if(np_runcmd(PS_COMMAND, &chld_out, &chld_err, 0) != 0){
+ fprintf(stderr, _("'%s' exited with non-zero status.\n"), PS_COMMAND);
+ return STATE_UNKNOWN;
+ }
+ if (chld_out.lines < 2) {
+ fprintf(stderr, _("some error occurred getting procs list.\n"));
+ return STATE_UNKNOWN;
+ }
+#ifdef PS_USES_PROCPCPU
+ qsort(chld_out.line + 1, chld_out.lines - 1, sizeof(char*), cmpstringp);
+#endif /* PS_USES_PROCPCPU */
+ int lines_to_show = chld_out.lines < (n_procs_to_show + 1)
+ ? chld_out.lines : n_procs_to_show + 1;
+ for (i = 0; i < lines_to_show; i += 1) {
+ printf("%s\n", chld_out.line[i]);
+ }
+ return OK;
}
diff --git a/plugins/check_mysql.c b/plugins/check_mysql.c
index 5773afd9..0cba50e6 100644
--- a/plugins/check_mysql.c
+++ b/plugins/check_mysql.c
@@ -379,6 +379,9 @@ process_arguments (int argc, char **argv)
if (is_host (optarg)) {
db_host = optarg;
}
+ else if (*optarg == '/') {
+ db_socket = optarg;
+ }
else {
usage2 (_("Invalid hostname/address"), optarg);
}
diff --git a/plugins/check_mysql_query.c b/plugins/check_mysql_query.c
index 49a14dd3..ac2fb15d 100644
--- a/plugins/check_mysql_query.c
+++ b/plugins/check_mysql_query.c
@@ -136,18 +136,18 @@ main (int argc, char **argv)
die (STATE_CRITICAL, "QUERY %s: Fetch row error - %s\n", _("CRITICAL"), error);
}
- /* free the result */
- mysql_free_result (res);
-
- /* close the connection */
- mysql_close (&mysql);
-
if (! is_numeric(row[0])) {
die (STATE_CRITICAL, "QUERY %s: %s - '%s'\n", _("CRITICAL"), _("Is not a numeric"), row[0]);
}
value = strtod(row[0], NULL);
+ /* free the result */
+ mysql_free_result (res);
+
+ /* close the connection */
+ mysql_close (&mysql);
+
if (verbose >= 3)
printf("mysql result: %f\n", value);
diff --git a/plugins/check_ntp.c b/plugins/check_ntp.c
index 5ac6c65b..914b40ce 100644
--- a/plugins/check_ntp.c
+++ b/plugins/check_ntp.c
@@ -548,7 +548,7 @@ double jitter_request(const char *host, int *status){
DBG(print_ntp_control_message(&req));
/* Attempt to read the largest size packet possible */
req.count=htons(MAX_CM_SIZE);
- DBG(printf("recieving READSTAT response"))
+ DBG(printf("receiving READSTAT response"))
read(conn, &req, SIZEOF_NTPCM(req));
DBG(print_ntp_control_message(&req));
/* Each peer identifier is 4 bytes in the data section, which
@@ -608,7 +608,7 @@ double jitter_request(const char *host, int *status){
DBG(print_ntp_control_message(&req));
req.count = htons(MAX_CM_SIZE);
- DBG(printf("recieving READVAR response...\n"));
+ DBG(printf("receiving READVAR response...\n"));
read(conn, &req, SIZEOF_NTPCM(req));
DBG(print_ntp_control_message(&req));
diff --git a/plugins/check_ntp_peer.c b/plugins/check_ntp_peer.c
index c656b0f5..6842842f 100644
--- a/plugins/check_ntp_peer.c
+++ b/plugins/check_ntp_peer.c
@@ -245,7 +245,7 @@ int ntp_request(const char *host, double *offset, int *offset_result, double *ji
do {
/* Attempt to read the largest size packet possible */
req.count=htons(MAX_CM_SIZE);
- DBG(printf("recieving READSTAT response"))
+ DBG(printf("receiving READSTAT response"))
if(read(conn, &req, SIZEOF_NTPCM(req)) == -1)
die(STATE_CRITICAL, "NTP CRITICAL: No response from NTP server\n");
DBG(print_ntp_control_message(&req));
diff --git a/plugins/check_pgsql.c b/plugins/check_pgsql.c
index 2eb699e8..b8fc5f1d 100644
--- a/plugins/check_pgsql.c
+++ b/plugins/check_pgsql.c
@@ -34,6 +34,7 @@ const char *email = "devel@monitoring-plugins.org";
#include "common.h"
#include "utils.h"
+#include "utils_cmd.h"
#include "netutils.h"
#include <libpq-fe.h>
@@ -346,7 +347,7 @@ process_arguments (int argc, char **argv)
if (!is_pg_dbname (optarg)) /* checks length and valid chars */
usage2 (_("Database name is not valid"), optarg);
else /* we know length, and know optarg is terminated, so us strcpy */
- strcpy (dbName, optarg);
+ snprintf(dbName, NAMEDATALEN, "%s", optarg);
break;
case 'l': /* login name */
if (!is_pg_logname (optarg))
@@ -565,7 +566,7 @@ print_help (void)
printf (" %s\n", _("Typically, the monitoring user (unless the --logname option is used) should be"));
printf (" %s\n", _("able to connect to the database without a password. The plugin can also send"));
- printf (" %s\n", _("a password, but no effort is made to obsure or encrypt the password."));
+ printf (" %s\n", _("a password, but no effort is made to obscure or encrypt the password."));
printf (UT_SUPPORT);
}
diff --git a/plugins/check_ping.c b/plugins/check_ping.c
index 423ecbe5..ba7af373 100644
--- a/plugins/check_ping.c
+++ b/plugins/check_ping.c
@@ -37,6 +37,8 @@ const char *email = "devel@monitoring-plugins.org";
#include "popen.h"
#include "utils.h"
+#include <signal.h>
+
#define WARN_DUPLICATES "DUPLICATES FOUND! "
#define UNKNOWN_TRIP_TIME -1.0 /* -1 seconds */
@@ -163,10 +165,14 @@ main (int argc, char **argv)
printf ("</A>");
/* Print performance data */
- printf("|%s", fperfdata ("rta", (double) rta, "ms",
- wrta>0?TRUE:FALSE, wrta,
- crta>0?TRUE:FALSE, crta,
- TRUE, 0, FALSE, 0));
+ if (pl != 100) {
+ printf("|%s", fperfdata ("rta", (double) rta, "ms",
+ wrta>0?TRUE:FALSE, wrta,
+ crta>0?TRUE:FALSE, crta,
+ TRUE, 0, FALSE, 0));
+ } else {
+ printf("| rta=U;%f;%f;;", wrta, crta);
+ }
printf(" %s\n", perfdata ("pl", (long) pl, "%",
wpl>0?TRUE:FALSE, wpl,
cpl>0?TRUE:FALSE, cpl,
diff --git a/plugins/check_procs.c b/plugins/check_procs.c
index 4bcc56bc..48723404 100644
--- a/plugins/check_procs.c
+++ b/plugins/check_procs.c
@@ -1,34 +1,34 @@
/*****************************************************************************
-*
+*
* Monitoring check_procs plugin
-*
+*
* License: GPL
* Copyright (c) 2000-2008 Monitoring Plugins Development Team
-*
+*
* Description:
-*
+*
* This file contains the check_procs plugin
-*
+*
* Checks all processes and generates WARNING or CRITICAL states if the
* specified metric is outside the required threshold ranges. The metric
* defaults to number of processes. Search filters can be applied to limit
* the processes to check.
-*
-*
+*
+*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
-*
+*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-*
+*
+*
*****************************************************************************/
const char *progname = "check_procs";
@@ -50,7 +50,7 @@ const char *email = "devel@monitoring-plugins.org";
int process_arguments (int, char **);
int validate_arguments (void);
-int convert_to_seconds (char *);
+int convert_to_seconds (char *);
void print_help (void);
void print_usage (void);
@@ -230,9 +230,9 @@ main (int argc, char **argv)
procseconds = convert_to_seconds(procetime);
if (verbose >= 3)
- printf ("proc#=%d uid=%d vsz=%d rss=%d pid=%d ppid=%d pcpu=%.2f stat=%s etime=%s prog=%s args=%s\n",
+ printf ("proc#=%d uid=%d vsz=%d rss=%d pid=%d ppid=%d pcpu=%.2f stat=%s etime=%s prog=%s args=%s\n",
procs, procuid, procvsz, procrss,
- procpid, procppid, procpcpu, procstat,
+ procpid, procppid, procpcpu, procstat,
procetime, procprog, procargs);
/* Ignore self */
@@ -265,7 +265,7 @@ main (int argc, char **argv)
}
}
- if ((options & STAT) && (strstr (statopts, procstat)))
+ if ((options & STAT) && (strstr (procstat, statopts)))
resultsum |= STAT;
if ((options & ARGS) && procargs && (strstr (procargs, args) != NULL))
resultsum |= ARGS;
@@ -292,9 +292,9 @@ main (int argc, char **argv)
procs++;
if (verbose >= 2) {
- printf ("Matched: uid=%d vsz=%d rss=%d pid=%d ppid=%d pcpu=%.2f stat=%s etime=%s prog=%s args=%s\n",
+ printf ("Matched: uid=%d vsz=%d rss=%d pid=%d ppid=%d pcpu=%.2f stat=%s etime=%s prog=%s args=%s\n",
procuid, procvsz, procrss,
- procpid, procppid, procpcpu, procstat,
+ procpid, procppid, procpcpu, procstat,
procetime, procprog, procargs);
}
@@ -320,7 +320,7 @@ main (int argc, char **argv)
result = max_state (result, i);
}
}
- }
+ }
/* This should not happen */
else if (verbose) {
printf(_("Not parseable: %s"), input_buffer);
@@ -332,7 +332,7 @@ main (int argc, char **argv)
return STATE_UNKNOWN;
}
- if ( result == STATE_UNKNOWN )
+ if ( result == STATE_UNKNOWN )
result = STATE_OK;
/* Needed if procs found, but none match filter */
@@ -352,9 +352,9 @@ main (int argc, char **argv)
if (metric != METRIC_PROCS) {
printf (_("%d crit, %d warn out of "), crit, warn);
}
- }
+ }
printf (ngettext ("%d process", "%d processes", (unsigned long) procs), procs);
-
+
if (strcmp(fmt,"") != 0) {
printf (_(" with %s"), fmt);
}
@@ -440,7 +440,7 @@ process_arguments (int argc, char **argv)
break;
case 'c': /* critical threshold */
critical_range = optarg;
- break;
+ break;
case 'w': /* warning threshold */
warning_range = optarg;
break;
@@ -542,11 +542,11 @@ process_arguments (int argc, char **argv)
if ( strcmp(optarg, "PROCS") == 0) {
metric = METRIC_PROCS;
break;
- }
+ }
else if ( strcmp(optarg, "VSZ") == 0) {
metric = METRIC_VSZ;
break;
- }
+ }
else if ( strcmp(optarg, "RSS") == 0 ) {
metric = METRIC_RSS;
break;
@@ -559,7 +559,7 @@ process_arguments (int argc, char **argv)
metric = METRIC_ELAPSED;
break;
}
-
+
usage4 (_("Metric must be one of PROCS, VSZ, RSS, CPU, ELAPSED!"));
case 'k': /* linux kernel thread filter */
kthread_filter = 1;
@@ -642,7 +642,7 @@ convert_to_seconds(char *etime) {
seconds = 0;
for (ptr = etime; *ptr != '\0'; ptr++) {
-
+
if (*ptr == '-') {
hyphcnt++;
continue;
@@ -764,6 +764,11 @@ be the total number of running processes\n\n"));
printf (" %s\n", "check_procs -w 2:2 -c 2:1024 -C portsentry");
printf (" %s\n", _("Warning if not two processes with command name portsentry."));
printf (" %s\n\n", _("Critical if < 2 or > 1024 processes"));
+ printf (" %s\n", "check_procs -c 1: -C sshd");
+ printf (" %s\n", _("Critical if not at least 1 process with command sshd"));
+ printf (" %s\n", "check_procs -w 1024 -c 1: -C sshd");
+ printf (" %s\n", _("Warning if > 1024 processes with command name sshd."));
+ printf (" %s\n\n", _("Critical if < 1 processes with command name sshd."));
printf (" %s\n", "check_procs -w 10 -a '/usr/local/bin/perl' -u root");
printf (" %s\n", _("Warning alert if > 10 processes with command arguments containing"));
printf (" %s\n\n", _("'/usr/local/bin/perl' and owned by root"));
diff --git a/plugins/check_radius.c b/plugins/check_radius.c
index fe84b7ce..be1001b4 100644
--- a/plugins/check_radius.c
+++ b/plugins/check_radius.c
@@ -360,7 +360,7 @@ print_help (void)
printf (" %s\n", "-u, --username=STRING");
printf (" %s\n", _("The user to authenticate"));
printf (" %s\n", "-p, --password=STRING");
- printf (" %s\n", _("Password for autentication (SECURITY RISK)"));
+ printf (" %s\n", _("Password for authentication (SECURITY RISK)"));
printf (" %s\n", "-n, --nas-id=STRING");
printf (" %s\n", _("NAS identifier"));
printf (" %s\n", "-N, --nas-ip-address=STRING");
diff --git a/plugins/check_real.c b/plugins/check_real.c
index 6491e6e9..0f1a1ba7 100644
--- a/plugins/check_real.c
+++ b/plugins/check_real.c
@@ -438,7 +438,7 @@ print_help (void)
printf ("%s\n", _("This plugin will attempt to open an RTSP connection with the host."));
printf ("%s\n", _("Successul connects return STATE_OK, refusals and timeouts return"));
printf ("%s\n", _("STATE_CRITICAL, other errors return STATE_UNKNOWN. Successful connects,"));
- printf ("%s\n", _("but incorrect reponse messages from the host result in STATE_WARNING return"));
+ printf ("%s\n", _("but incorrect response messages from the host result in STATE_WARNING return"));
printf ("%s\n", _("values."));
printf (UT_SUPPORT);
diff --git a/plugins/check_smtp.c b/plugins/check_smtp.c
index 6e0e22ed..c1e92dff 100644
--- a/plugins/check_smtp.c
+++ b/plugins/check_smtp.c
@@ -55,6 +55,7 @@ enum {
#define SMTP_EXPECT "220"
#define SMTP_HELO "HELO "
#define SMTP_EHLO "EHLO "
+#define SMTP_LHLO "LHLO "
#define SMTP_QUIT "QUIT\r\n"
#define SMTP_STARTTLS "STARTTLS\r\n"
#define SMTP_AUTH_LOGIN "AUTH LOGIN\r\n"
@@ -102,6 +103,7 @@ int check_critical_time = FALSE;
int verbose = 0;
int use_ssl = FALSE;
short use_ehlo = FALSE;
+short use_lhlo = FALSE;
short ssl_established = 0;
char *localhostname = NULL;
int sd;
@@ -152,7 +154,9 @@ main (int argc, char **argv)
return STATE_CRITICAL;
}
}
- if(use_ehlo)
+ if(use_lhlo)
+ xasprintf (&helocmd, "%s%s%s", SMTP_LHLO, localhostname, "\r\n");
+ else if(use_ehlo)
xasprintf (&helocmd, "%s%s%s", SMTP_EHLO, localhostname, "\r\n");
else
xasprintf (&helocmd, "%s%s%s", SMTP_HELO, localhostname, "\r\n");
@@ -197,7 +201,7 @@ main (int argc, char **argv)
if (recvlines(buffer, MAX_INPUT_BUFFER) <= 0) {
printf (_("recv() failed\n"));
return STATE_WARNING;
- } else if(use_ehlo){
+ } else if(use_ehlo || use_lhlo){
if(strstr(buffer, "250 STARTTLS") != NULL ||
strstr(buffer, "250-STARTTLS") != NULL){
supports_tls=TRUE;
@@ -293,6 +297,7 @@ main (int argc, char **argv)
printf("%s", buffer);
}
+ n = 0;
while (n < ncommands) {
xasprintf (&cmd_str, "%s%s", commands[n], "\r\n");
my_send(cmd_str, strlen(cmd_str));
@@ -469,6 +474,7 @@ process_arguments (int argc, char **argv)
{"use-ipv4", no_argument, 0, '4'},
{"use-ipv6", no_argument, 0, '6'},
{"help", no_argument, 0, 'h'},
+ {"lmtp", no_argument, 0, 'L'},
{"starttls",no_argument,0,'S'},
{"certificate",required_argument,0,'D'},
{"ignore-quit-failure",no_argument,0,'q'},
@@ -488,7 +494,7 @@ process_arguments (int argc, char **argv)
}
while (1) {
- c = getopt_long (argc, argv, "+hVv46t:p:f:e:c:w:H:C:R:SD:F:A:U:P:q",
+ c = getopt_long (argc, argv, "+hVv46Lt:p:f:e:c:w:H:C:R:SD:F:A:U:P:q",
longopts, &option);
if (c == -1 || c == EOF)
@@ -615,6 +621,9 @@ process_arguments (int argc, char **argv)
use_ssl = TRUE;
use_ehlo = TRUE;
break;
+ case 'L':
+ use_lhlo = TRUE;
+ break;
case '4':
address_family = AF_INET;
break;
@@ -823,6 +832,8 @@ print_help (void)
printf (" %s\n", _("SMTP AUTH username"));
printf (" %s\n", "-P, --authpass=STRING");
printf (" %s\n", _("SMTP AUTH password"));
+ printf (" %s\n", "-L, --lmtp");
+ printf (" %s\n", _("Send LHLO instead of HELO/EHLO"));
printf (" %s\n", "-q, --ignore-quit-failure");
printf (" %s\n", _("Ignore failure when sending QUIT command to server"));
@@ -835,7 +846,7 @@ print_help (void)
printf("\n");
printf ("%s\n", _("Successul connects return STATE_OK, refusals and timeouts return"));
printf ("%s\n", _("STATE_CRITICAL, other errors return STATE_UNKNOWN. Successful"));
- printf ("%s\n", _("connects, but incorrect reponse messages from the host result in"));
+ printf ("%s\n", _("connects, but incorrect response messages from the host result in"));
printf ("%s\n", _("STATE_WARNING return values."));
printf (UT_SUPPORT);
@@ -849,6 +860,6 @@ print_usage (void)
printf ("%s\n", _("Usage:"));
printf ("%s -H host [-p port] [-4|-6] [-e expect] [-C command] [-R response] [-f from addr]\n", progname);
printf ("[-A authtype -U authuser -P authpass] [-w warn] [-c crit] [-t timeout] [-q]\n");
- printf ("[-F fqdn] [-S] [-D warn days cert expire[,crit days cert expire]] [-v] \n");
+ printf ("[-F fqdn] [-S] [-L] [-D warn days cert expire[,crit days cert expire]] [-v] \n");
}
diff --git a/plugins/check_snmp.c b/plugins/check_snmp.c
index da9638c4..abe54cfb 100644
--- a/plugins/check_snmp.c
+++ b/plugins/check_snmp.c
@@ -468,6 +468,9 @@ main (int argc, char **argv)
/* Process this block for numeric comparisons */
/* Make some special values,like Timeticks numeric only if a threshold is defined */
if (thlds[i]->warning || thlds[i]->critical || calculate_rate) {
+ if (verbose > 2) {
+ print_thresholds(" thresholds", thlds[i]);
+ }
ptr = strpbrk (show, "-0123456789");
if (ptr == NULL)
die (STATE_UNKNOWN,_("No valid data returned (%s)\n"), show);
@@ -576,20 +579,23 @@ main (int argc, char **argv)
len = sizeof(perfstr)-strlen(perfstr)-1;
strncat(perfstr, show, len>ptr-show ? ptr-show : len);
+ if (type)
+ strncat(perfstr, type, sizeof(perfstr)-strlen(perfstr)-1);
+
if (warning_thresholds) {
strncat(perfstr, ";", sizeof(perfstr)-strlen(perfstr)-1);
- strncat(perfstr, warning_thresholds, sizeof(perfstr)-strlen(perfstr)-1);
+ if(thlds[i]->warning && thlds[i]->warning->text)
+ strncat(perfstr, thlds[i]->warning->text, sizeof(perfstr)-strlen(perfstr)-1);
}
if (critical_thresholds) {
if (!warning_thresholds)
strncat(perfstr, ";", sizeof(perfstr)-strlen(perfstr)-1);
strncat(perfstr, ";", sizeof(perfstr)-strlen(perfstr)-1);
- strncat(perfstr, critical_thresholds, sizeof(perfstr)-strlen(perfstr)-1);
+ if(thlds[i]->critical && thlds[i]->critical->text)
+ strncat(perfstr, thlds[i]->critical->text, sizeof(perfstr)-strlen(perfstr)-1);
}
- if (type)
- strncat(perfstr, type, sizeof(perfstr)-strlen(perfstr)-1);
strncat(perfstr, " ", sizeof(perfstr)-strlen(perfstr)-1);
}
}
@@ -1160,7 +1166,7 @@ print_help (void)
printf ("(%s \"%s\")\n", _("default is") ,DEFAULT_COMMUNITY);
printf (" %s\n", "-U, --secname=USERNAME");
printf (" %s\n", _("SNMPv3 username"));
- printf (" %s\n", "-A, --authpassword=PASSWORD");
+ printf (" %s\n", "-A, --authpasswd=PASSWORD");
printf (" %s\n", _("SNMPv3 authentication password"));
printf (" %s\n", "-X, --privpasswd=PASSWORD");
printf (" %s\n", _("SNMPv3 privacy password"));
@@ -1207,8 +1213,9 @@ print_help (void)
printf (" %s\n", _("Separates output on multiple OID requests"));
printf (UT_CONN_TIMEOUT, DEFAULT_SOCKET_TIMEOUT);
+ printf (" %s\n", _("NOTE the final timeout value is calculated using this formula: timeout_interval * retries + 5"));
printf (" %s\n", "-e, --retries=INTEGER");
- printf (" %s\n", _("Number of retries to be used in the requests"));
+ printf (" %s%i\n", _("Number of retries to be used in the requests, default: "), DEFAULT_RETRIES);
printf (" %s\n", "-O, --perf-oids");
printf (" %s\n", _("Label performance data with OIDs instead of --label's"));
diff --git a/plugins/check_swap.c b/plugins/check_swap.c
index 4d5a4071..7da26cfc 100644
--- a/plugins/check_swap.c
+++ b/plugins/check_swap.c
@@ -34,6 +34,9 @@ const char *email = "devel@monitoring-plugins.org";
#include "common.h"
#include "popen.h"
#include "utils.h"
+#include <string.h>
+#include <math.h>
+#include <libintl.h>
#ifdef HAVE_DECL_SWAPCTL
# ifdef HAVE_SYS_PARAM_H
@@ -51,16 +54,19 @@ const char *email = "devel@monitoring-plugins.org";
# define SWAP_CONVERSION 1
#endif
-int check_swap (int usp, float free_swap_mb);
+typedef struct {
+ int is_percentage;
+ uint64_t value;
+} threshold_t;
+
+int check_swap (float free_swap_mb, float total_swap_mb);
int process_arguments (int argc, char **argv);
int validate_arguments (void);
void print_usage (void);
void print_help (void);
-int warn_percent = 0;
-int crit_percent = 0;
-float warn_size_bytes = 0;
-float crit_size_bytes = 0;
+threshold_t warn;
+threshold_t crit;
int verbose;
int allswaps;
int no_swap_state = STATE_CRITICAL;
@@ -68,9 +74,10 @@ int no_swap_state = STATE_CRITICAL;
int
main (int argc, char **argv)
{
- int percent_used, percent;
- float total_swap_mb = 0, used_swap_mb = 0, free_swap_mb = 0;
- float dsktotal_mb = 0, dskused_mb = 0, dskfree_mb = 0, tmp_mb = 0;
+ unsigned int percent_used, percent;
+ uint64_t total_swap_mb = 0, used_swap_mb = 0, free_swap_mb = 0;
+ uint64_t dsktotal_mb = 0, dskused_mb = 0, dskfree_mb = 0;
+ uint64_t tmp_KB = 0;
int result = STATE_UNKNOWN;
char input_buffer[MAX_INPUT_BUFFER];
#ifdef HAVE_PROC_MEMINFO
@@ -116,10 +123,15 @@ main (int argc, char **argv)
}
fp = fopen (PROC_MEMINFO, "r");
while (fgets (input_buffer, MAX_INPUT_BUFFER - 1, fp)) {
- if (sscanf (input_buffer, "%*[S]%*[w]%*[a]%*[p]%*[:] %f %f %f", &dsktotal_mb, &dskused_mb, &dskfree_mb) == 3) {
- dsktotal_mb = dsktotal_mb / 1048576; /* Apply conversion */
- dskused_mb = dskused_mb / 1048576;
- dskfree_mb = dskfree_mb / 1048576;
+ /*
+ * The following sscanf call looks for a line looking like: "Swap: 123 123 123"
+ * On which kind of system this format exists, I can not say, but I wanted to
+ * document this for people who are not adapt with sscanf anymore, like me
+ */
+ if (sscanf (input_buffer, "%*[S]%*[w]%*[a]%*[p]%*[:] %lu %lu %lu", &dsktotal_mb, &dskused_mb, &dskfree_mb) == 3) {
+ dsktotal_mb = dsktotal_mb / (1024 * 1024); /* Apply conversion */
+ dskused_mb = dskused_mb / (1024 * 1024);
+ dskfree_mb = dskfree_mb / (1024 * 1024);
total_swap_mb += dsktotal_mb;
used_swap_mb += dskused_mb;
free_swap_mb += dskfree_mb;
@@ -128,21 +140,25 @@ main (int argc, char **argv)
percent=100.0;
else
percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb));
- result = max_state (result, check_swap (percent, dskfree_mb));
+ result = max_state (result, check_swap (dskfree_mb, dsktotal_mb));
if (verbose)
- xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent);
+ xasprintf (&status, "%s [%lu (%d%%)]", status, dskfree_mb, 100 - percent);
}
}
- else if (sscanf (input_buffer, "%*[S]%*[w]%*[a]%*[p]%[TotalFre]%*[:] %f %*[k]%*[B]", str, &tmp_mb)) {
+ /*
+ * The following sscanf call looks for lines looking like: "SwapTotal: 123" and "SwapFree: 123"
+ * This format exists at least on Debian Linux with a 5.* kernel
+ */
+ else if (sscanf (input_buffer, "%*[S]%*[w]%*[a]%*[p]%[TotalFre]%*[:] %lu %*[k]%*[B]", str, &tmp_KB)) {
if (verbose >= 3) {
- printf("Got %s with %f\n", str, tmp_mb);
+ printf("Got %s with %lu\n", str, tmp_KB);
}
/* I think this part is always in Kb, so convert to mb */
if (strcmp ("Total", str) == 0) {
- dsktotal_mb = tmp_mb / 1024;
+ dsktotal_mb = tmp_KB / 1024;
}
else if (strcmp ("Free", str) == 0) {
- dskfree_mb = tmp_mb / 1024;
+ dskfree_mb = tmp_KB / 1024;
}
}
}
@@ -227,7 +243,7 @@ main (int argc, char **argv)
free_swap_mb += dskfree_mb;
if (allswaps) {
percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb));
- result = max_state (result, check_swap (percent, dskfree_mb));
+ result = max_state (result, check_swap (dskfree_mb, dsktotal_mb));
if (verbose)
xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent);
}
@@ -289,7 +305,7 @@ main (int argc, char **argv)
if(allswaps && dsktotal_mb > 0){
percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb));
- result = max_state (result, check_swap (percent, dskfree_mb));
+ result = max_state (result, check_swap (dskfree_mb, dsktotal_mb));
if (verbose) {
xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent);
}
@@ -328,7 +344,7 @@ main (int argc, char **argv)
if(allswaps && dsktotal_mb > 0){
percent = 100 * (((double) dskused_mb) / ((double) dsktotal_mb));
- result = max_state (result, check_swap (percent, dskfree_mb));
+ result = max_state (result, check_swap(dskfree_mb, dsktotal_mb));
if (verbose) {
xasprintf (&status, "%s [%.0f (%d%%)]", status, dskfree_mb, 100 - percent);
}
@@ -355,14 +371,19 @@ main (int argc, char **argv)
status = "- Swap is either disabled, not present, or of zero size. ";
}
- result = max_state (result, check_swap (percent_used, free_swap_mb));
- printf (_("SWAP %s - %d%% free (%d MB out of %d MB) %s|"),
+ result = max_state (result, check_swap(free_swap_mb, total_swap_mb));
+ printf (_("SWAP %s - %d%% free (%dMB out of %dMB) %s|"),
state_text (result),
(100 - percent_used), (int) free_swap_mb, (int) total_swap_mb, status);
- puts (perfdata ("swap", (long) free_swap_mb, "MB",
- TRUE, (long) max (warn_size_bytes/(1024 * 1024), warn_percent/100.0*total_swap_mb),
- TRUE, (long) max (crit_size_bytes/(1024 * 1024), crit_percent/100.0*total_swap_mb),
+ uint64_t warn_print = warn.value;
+ if (warn.is_percentage) warn_print = warn.value * (total_swap_mb *1024 *1024/100);
+ uint64_t crit_print = crit.value;
+ if (crit.is_percentage) crit_print = crit.value * (total_swap_mb *1024 *1024/100);
+
+ puts (perfdata_uint64 ("swap", free_swap_mb *1024 *1024, "B",
+ TRUE, warn_print,
+ TRUE, crit_print,
TRUE, 0,
TRUE, (long) total_swap_mb));
@@ -370,26 +391,37 @@ main (int argc, char **argv)
}
-
int
-check_swap (int usp, float free_swap_mb)
+check_swap(float free_swap_mb, float total_swap_mb)
{
- if (!free_swap_mb) return no_swap_state;
+ if (!total_swap_mb) return no_swap_state;
- int result = STATE_UNKNOWN;
- float free_swap = free_swap_mb * (1024 * 1024); /* Convert back to bytes as warn and crit specified in bytes */
- if (usp >= 0 && crit_percent != 0 && usp >= (100.0 - crit_percent))
- result = STATE_CRITICAL;
- else if (crit_size_bytes > 0 && free_swap <= crit_size_bytes)
- result = STATE_CRITICAL;
- else if (usp >= 0 && warn_percent != 0 && usp >= (100.0 - warn_percent))
- result = STATE_WARNING;
- else if (warn_size_bytes > 0 && free_swap <= warn_size_bytes)
- result = STATE_WARNING;
- else if (usp >= 0.0)
- result = STATE_OK;
- return result;
+ uint64_t free_swap = free_swap_mb * (1024 * 1024); /* Convert back to bytes as warn and crit specified in bytes */
+
+ if (!crit.is_percentage && crit.value >= free_swap) return STATE_CRITICAL;
+ if (!warn.is_percentage && warn.value >= free_swap) return STATE_WARNING;
+
+
+ uint64_t usage_percentage = ((total_swap_mb - free_swap_mb) / total_swap_mb) * 100;
+
+ if (crit.is_percentage &&
+ usage_percentage >= 0 &&
+ crit.value != 0 &&
+ usage_percentage >= (100 - crit.value))
+ {
+ return STATE_CRITICAL;
+ }
+
+ if (warn.is_percentage &&
+ usage_percentage >= 0 &&
+ warn.value != 0 &&
+ usage_percentage >= (100 - warn.value))
+ {
+ return STATE_WARNING;
+ }
+
+ return STATE_OK;
}
@@ -422,42 +454,68 @@ process_arguments (int argc, char **argv)
break;
switch (c) {
- case 'w': /* warning size threshold */
- if (is_intnonneg (optarg)) {
- warn_size_bytes = (float) atoi (optarg);
- break;
- }
- else if (strstr (optarg, ",") &&
- strstr (optarg, "%") &&
- sscanf (optarg, "%f,%d%%", &warn_size_bytes, &warn_percent) == 2) {
- warn_size_bytes = floorf(warn_size_bytes);
- break;
- }
- else if (strstr (optarg, "%") &&
- sscanf (optarg, "%d%%", &warn_percent) == 1) {
- break;
- }
- else {
- usage4 (_("Warning threshold must be integer or percentage!"));
- }
- case 'c': /* critical size threshold */
- if (is_intnonneg (optarg)) {
- crit_size_bytes = (float) atoi (optarg);
- break;
- }
- else if (strstr (optarg, ",") &&
- strstr (optarg, "%") &&
- sscanf (optarg, "%f,%d%%", &crit_size_bytes, &crit_percent) == 2) {
- crit_size_bytes = floorf(crit_size_bytes);
- break;
- }
- else if (strstr (optarg, "%") &&
- sscanf (optarg, "%d%%", &crit_percent) == 1) {
- break;
- }
- else {
- usage4 (_("Critical threshold must be integer or percentage!"));
+ case 'w': /* warning size threshold */
+ {
+ /*
+ * We expect either a positive integer value without a unit, which means
+ * the unit is Bytes or a positive integer value and a percentage sign (%),
+ * which means the value must be with 0 and 100 and is relative to the total swap
+ */
+ size_t length;
+ length = strlen(optarg);
+
+ if (optarg[length - 1] == '%') {
+ /* It's percentage */
+ warn.is_percentage = 1;
+ optarg[length - 1] = '\0';
+ if (is_uint64(optarg, &warn.value)) {
+ if (warn.value > 100) {
+ usage4 (_("Warning threshold percentage must be <= 100!"));
+ } else {
+ break;
+ }
+ }
+ } else {
+ /* It's Bytes */
+ warn.is_percentage = 0;
+ if (is_uint64(optarg, &warn.value)) {
+ break;
+ } else {
+ usage4 (_("Warning threshold be positive integer or percentage!"));
+ }
+ }
}
+ case 'c': /* critical size threshold */
+ {
+ /*
+ * We expect either a positive integer value without a unit, which means
+ * the unit is Bytes or a positive integer value and a percentage sign (%),
+ * which means the value must be with 0 and 100 and is relative to the total swap
+ */
+ size_t length;
+ length = strlen(optarg);
+
+ if (optarg[length - 1] == '%') {
+ /* It's percentage */
+ crit.is_percentage = 1;
+ optarg[length - 1] = '\0';
+ if (is_uint64(optarg, &crit.value)) {
+ if (crit.value> 100) {
+ usage4 (_("Critical threshold percentage must be <= 100!"));
+ } else {
+ break;
+ }
+ }
+ } else {
+ /* It's Bytes */
+ crit.is_percentage = 0;
+ if (is_uint64(optarg, &crit.value)) {
+ break;
+ } else {
+ usage4 (_("Critical threshold be positive integer or percentage!"));
+ }
+ }
+ }
case 'a': /* all swap */
allswaps = TRUE;
break;
@@ -482,23 +540,6 @@ process_arguments (int argc, char **argv)
c = optind;
if (c == argc)
return validate_arguments ();
- if (warn_percent == 0 && is_intnonneg (argv[c]))
- warn_percent = atoi (argv[c++]);
-
- if (c == argc)
- return validate_arguments ();
- if (crit_percent == 0 && is_intnonneg (argv[c]))
- crit_percent = atoi (argv[c++]);
-
- if (c == argc)
- return validate_arguments ();
- if (warn_size_bytes == 0 && is_intnonneg (argv[c]))
- warn_size_bytes = (float) atoi (argv[c++]);
-
- if (c == argc)
- return validate_arguments ();
- if (crit_size_bytes == 0 && is_intnonneg (argv[c]))
- crit_size_bytes = (float) atoi (argv[c++]);
return validate_arguments ();
}
@@ -508,17 +549,15 @@ process_arguments (int argc, char **argv)
int
validate_arguments (void)
{
- if (warn_percent == 0 && crit_percent == 0 && warn_size_bytes == 0
- && crit_size_bytes == 0) {
+ if (warn.value == 0 && crit.value == 0) {
return ERROR;
}
- else if (warn_percent < crit_percent) {
- usage4
- (_("Warning percentage should be more than critical percentage"));
- }
- else if (warn_size_bytes < crit_size_bytes) {
- usage4
- (_("Warning free space should be more than critical free space"));
+ else if ((warn.is_percentage == crit.is_percentage) && (warn.value < crit.value)) {
+ /* This is NOT triggered if warn and crit are different units, e.g warn is percentage
+ * and crit is absolut. We cannot determine the condition at this point since we
+ * dont know the value of total swap yet
+ */
+ usage4(_("Warning should be more than critical"));
}
return OK;
}
@@ -534,7 +573,7 @@ print_help (void)
printf ("%s\n", _("Check swap space on local machine."));
- printf ("\n\n");
+ printf ("\n\n");
print_usage ();
@@ -542,33 +581,32 @@ print_help (void)
printf (UT_EXTRA_OPTS);
printf (" %s\n", "-w, --warning=INTEGER");
- printf (" %s\n", _("Exit with WARNING status if less than INTEGER bytes of swap space are free"));
- printf (" %s\n", "-w, --warning=PERCENT%%");
- printf (" %s\n", _("Exit with WARNING status if less than PERCENT of swap space is free"));
- printf (" %s\n", "-c, --critical=INTEGER");
- printf (" %s\n", _("Exit with CRITICAL status if less than INTEGER bytes of swap space are free"));
- printf (" %s\n", "-c, --critical=PERCENT%%");
- printf (" %s\n", _("Exit with CRITICAL status if less than PERCENT of swap space is free"));
- printf (" %s\n", "-a, --allswaps");
- printf (" %s\n", _("Conduct comparisons for all swap partitions, one by one"));
- printf (" %s\n", "-n, --no-swap=<ok|warning|critical|unknown>");
- printf (" %s %s\n", _("Resulting state when there is no swap regardless of thresholds. Default:"), state_text(no_swap_state));
+ printf (" %s\n", _("Exit with WARNING status if less than INTEGER bytes of swap space are free"));
+ printf (" %s\n", "-w, --warning=PERCENT%");
+ printf (" %s\n", _("Exit with WARNING status if less than PERCENT of swap space is free"));
+ printf (" %s\n", "-c, --critical=INTEGER");
+ printf (" %s\n", _("Exit with CRITICAL status if less than INTEGER bytes of swap space are free"));
+ printf (" %s\n", "-c, --critical=PERCENT%");
+ printf (" %s\n", _("Exit with CRITICAL status if less than PERCENT of swap space is free"));
+ printf (" %s\n", "-a, --allswaps");
+ printf (" %s\n", _("Conduct comparisons for all swap partitions, one by one"));
+ printf (" %s\n", "-n, --no-swap=<ok|warning|critical|unknown>");
+ printf (" %s %s\n", _("Resulting state when there is no swap regardless of thresholds. Default:"), state_text(no_swap_state));
printf (UT_VERBOSE);
printf ("\n");
- printf ("%s\n", _("Notes:"));
- printf (" %s\n", _("Both INTEGER and PERCENT thresholds can be specified, they are all checked."));
- printf (" %s\n", _("On AIX, if -a is specified, uses lsps -a, otherwise uses lsps -s."));
+ printf ("%s\n", _("Notes:"));
+ printf (" %s\n", _("Both INTEGER and PERCENT thresholds can be specified, they are all checked."));
+ printf (" %s\n", _("On AIX, if -a is specified, uses lsps -a, otherwise uses lsps -s."));
printf (UT_SUPPORT);
}
-
void
print_usage (void)
{
printf ("%s\n", _("Usage:"));
- printf (" %s [-av] -w <percent_free>%% -c <percent_free>%%\n",progname);
- printf (" -w <bytes_free> -c <bytes_free> [-n <state>]\n");
+ printf (" %s [-av] -w <percent_free>%% -c <percent_free>%%\n",progname);
+ printf (" -w <bytes_free> -c <bytes_free> [-n <state>]\n");
}
diff --git a/plugins/check_tcp.c b/plugins/check_tcp.c
index 61333bd7..1365b9cb 100644
--- a/plugins/check_tcp.c
+++ b/plugins/check_tcp.c
@@ -86,6 +86,11 @@ static char buffer[MAXBUF];
static int expect_mismatch_state = STATE_WARNING;
static int match_flags = NP_MATCH_EXACT;
+#ifdef HAVE_SSL
+static char *sni = NULL;
+static int sni_specified = FALSE;
+#endif
+
#define FLAG_SSL 0x01
#define FLAG_VERBOSE 0x02
#define FLAG_TIME_WARN 0x04
@@ -241,7 +246,7 @@ main (int argc, char **argv)
#ifdef HAVE_SSL
if (flags & FLAG_SSL){
- result = np_net_ssl_init(sd);
+ result = np_net_ssl_init_with_hostname(sd, (sni_specified ? sni : NULL));
if (result == STATE_OK && check_cert == TRUE) {
result = np_net_ssl_check_cert(days_till_exp_warn, days_till_exp_crit);
}
@@ -401,6 +406,10 @@ process_arguments (int argc, char **argv)
int escape = 0;
char *temp;
+ enum {
+ SNI_OPTION = CHAR_MAX + 1
+ };
+
int option = 0;
static struct option longopts[] = {
{"hostname", required_argument, 0, 'H'},
@@ -427,6 +436,7 @@ process_arguments (int argc, char **argv)
{"version", no_argument, 0, 'V'},
{"help", no_argument, 0, 'h'},
{"ssl", no_argument, 0, 'S'},
+ {"sni", required_argument, 0, SNI_OPTION},
{"certificate", required_argument, 0, 'D'},
{0, 0, 0, 0}
};
@@ -604,6 +614,15 @@ process_arguments (int argc, char **argv)
die (STATE_UNKNOWN, _("Invalid option - SSL is not available"));
#endif
break;
+ case SNI_OPTION:
+#ifdef HAVE_SSL
+ flags |= FLAG_SSL;
+ sni_specified = TRUE;
+ sni = optarg;
+#else
+ die (STATE_UNKNOWN, _("Invalid option - SSL is not available"));
+#endif
+ break;
case 'A':
match_flags |= NP_MATCH_ALL;
break;
@@ -671,6 +690,8 @@ print_help (void)
printf (" %s\n", _("1st is #days for warning, 2nd is critical (if not specified - 0)."));
printf (" %s\n", "-S, --ssl");
printf (" %s\n", _("Use SSL for the connection."));
+ printf (" %s\n", "--sni=STRING");
+ printf (" %s\n", _("SSL server_name"));
#endif
printf (UT_WARN_CRIT);
diff --git a/plugins/check_ups.c b/plugins/check_ups.c
index e9e56a51..0de37a20 100644
--- a/plugins/check_ups.c
+++ b/plugins/check_ups.c
@@ -402,7 +402,10 @@ get_ups_variable (const char *varname, char *buf, size_t buflen)
/* create the command string to send to the UPS daemon */
/* Add LOGOUT to avoid read failure logs */
- sprintf (send_buffer, "GET VAR %s %s\nLOGOUT\n", ups_name, varname);
+ if (snprintf (send_buffer, sizeof(send_buffer), "GET VAR %s %s\nLOGOUT\n", ups_name, varname) >= sizeof(send_buffer)) {
+ printf("%s\n", _("UPS name to long for buffer"));
+ return ERROR;
+ }
/* send the command to the daemon and get a response back */
if (process_tcp_request
diff --git a/plugins/common.h b/plugins/common.h
index 8719b502..0f08e2f6 100644
--- a/plugins/common.h
+++ b/plugins/common.h
@@ -174,6 +174,11 @@
*
*/
+/* MariaDB 10.2 client does not set MYSQL_PORT */
+#ifndef MYSQL_PORT
+# define MYSQL_PORT 3306
+#endif
+
enum {
OK = 0,
ERROR = -1
@@ -220,4 +225,18 @@ enum {
# define __attribute__(x) /* do nothing */
#endif
+/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX.
+ * If that fails and the macro isn't defined, we fall back to an educated
+ * guess. There's no guarantee that our guess is adequate and the program
+ * will die with SIGSEGV if it isn't and the upper boundary is breached. */
+#define DEFAULT_MAXFD 256 /* fallback value if no max open files value is set */
+#define MAXFD_LIMIT 8192 /* upper limit of open files */
+#ifdef _SC_OPEN_MAX
+static long maxfd = 0;
+#elif defined(OPEN_MAX)
+# define maxfd OPEN_MAX
+#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */
+# define maxfd DEFAULT_MAXFD
+#endif
+
#endif /* _COMMON_H_ */
diff --git a/plugins/negate.c b/plugins/negate.c
index b320e356..50f62d33 100644
--- a/plugins/negate.c
+++ b/plugins/negate.c
@@ -86,11 +86,9 @@ main (int argc, char **argv)
result = cmd_run_array (command_line, &chld_out, &chld_err, 0);
}
if (chld_err.lines > 0) {
- printf ("Error output from command:\n");
for (i = 0; i < chld_err.lines; i++) {
- printf ("%s\n", chld_err.line[i]);
+ fprintf (stderr, "%s\n", chld_err.line[i]);
}
- exit (STATE_WARNING);
}
/* Return UNKNOWN or worse if no output is returned */
diff --git a/plugins/picohttpparser/Makefile.am b/plugins/picohttpparser/Makefile.am
new file mode 100644
index 00000000..87e05313
--- /dev/null
+++ b/plugins/picohttpparser/Makefile.am
@@ -0,0 +1,3 @@
+noinst_LIBRARIES = libpicohttpparser.a
+
+libpicohttpparser_a_SOURCES = picohttpparser.c picohttpparser.h
diff --git a/plugins/picohttpparser/picohttpparser.c b/plugins/picohttpparser/picohttpparser.c
new file mode 100644
index 00000000..74ccc3ef
--- /dev/null
+++ b/plugins/picohttpparser/picohttpparser.c
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase,
+ * Shigeo Mitsunari
+ *
+ * The software is licensed under either the MIT License (below) or the Perl
+ * license.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <string.h>
+#ifdef __SSE4_2__
+#ifdef _MSC_VER
+#include <nmmintrin.h>
+#else
+#include <x86intrin.h>
+#endif
+#endif
+#include "picohttpparser.h"
+
+#if __GNUC__ >= 3
+#define likely(x) __builtin_expect(!!(x), 1)
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#else
+#define likely(x) (x)
+#define unlikely(x) (x)
+#endif
+
+#ifdef _MSC_VER
+#define ALIGNED(n) _declspec(align(n))
+#else
+#define ALIGNED(n) __attribute__((aligned(n)))
+#endif
+
+#define IS_PRINTABLE_ASCII(c) ((unsigned char)(c)-040u < 0137u)
+
+#define CHECK_EOF() \
+ if (buf == buf_end) { \
+ *ret = -2; \
+ return NULL; \
+ }
+
+#define EXPECT_CHAR_NO_CHECK(ch) \
+ if (*buf++ != ch) { \
+ *ret = -1; \
+ return NULL; \
+ }
+
+#define EXPECT_CHAR(ch) \
+ CHECK_EOF(); \
+ EXPECT_CHAR_NO_CHECK(ch);
+
+#define ADVANCE_TOKEN(tok, toklen) \
+ do { \
+ const char *tok_start = buf; \
+ static const char ALIGNED(16) ranges2[16] = "\000\040\177\177"; \
+ int found2; \
+ buf = findchar_fast(buf, buf_end, ranges2, 4, &found2); \
+ if (!found2) { \
+ CHECK_EOF(); \
+ } \
+ while (1) { \
+ if (*buf == ' ') { \
+ break; \
+ } else if (unlikely(!IS_PRINTABLE_ASCII(*buf))) { \
+ if ((unsigned char)*buf < '\040' || *buf == '\177') { \
+ *ret = -1; \
+ return NULL; \
+ } \
+ } \
+ ++buf; \
+ CHECK_EOF(); \
+ } \
+ tok = tok_start; \
+ toklen = buf - tok_start; \
+ } while (0)
+
+static const char *token_char_map = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\1\0\1\1\1\1\1\0\0\1\1\0\1\1\0\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0"
+ "\0\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\1\1"
+ "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\1\0\1\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+
+static const char *findchar_fast(const char *buf, const char *buf_end, const char *ranges, size_t ranges_size, int *found)
+{
+ *found = 0;
+#if __SSE4_2__
+ if (likely(buf_end - buf >= 16)) {
+ __m128i ranges16 = _mm_loadu_si128((const __m128i *)ranges);
+
+ size_t left = (buf_end - buf) & ~15;
+ do {
+ __m128i b16 = _mm_loadu_si128((const __m128i *)buf);
+ int r = _mm_cmpestri(ranges16, ranges_size, b16, 16, _SIDD_LEAST_SIGNIFICANT | _SIDD_CMP_RANGES | _SIDD_UBYTE_OPS);
+ if (unlikely(r != 16)) {
+ buf += r;
+ *found = 1;
+ break;
+ }
+ buf += 16;
+ left -= 16;
+ } while (likely(left != 0));
+ }
+#else
+ /* suppress unused parameter warning */
+ (void)buf_end;
+ (void)ranges;
+ (void)ranges_size;
+#endif
+ return buf;
+}
+
+static const char *get_token_to_eol(const char *buf, const char *buf_end, const char **token, size_t *token_len, int *ret)
+{
+ const char *token_start = buf;
+
+#ifdef __SSE4_2__
+ static const char ALIGNED(16) ranges1[16] = "\0\010" /* allow HT */
+ "\012\037" /* allow SP and up to but not including DEL */
+ "\177\177"; /* allow chars w. MSB set */
+ int found;
+ buf = findchar_fast(buf, buf_end, ranges1, 6, &found);
+ if (found)
+ goto FOUND_CTL;
+#else
+ /* find non-printable char within the next 8 bytes, this is the hottest code; manually inlined */
+ while (likely(buf_end - buf >= 8)) {
+#define DOIT() \
+ do { \
+ if (unlikely(!IS_PRINTABLE_ASCII(*buf))) \
+ goto NonPrintable; \
+ ++buf; \
+ } while (0)
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+ DOIT();
+#undef DOIT
+ continue;
+ NonPrintable:
+ if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) {
+ goto FOUND_CTL;
+ }
+ ++buf;
+ }
+#endif
+ for (;; ++buf) {
+ CHECK_EOF();
+ if (unlikely(!IS_PRINTABLE_ASCII(*buf))) {
+ if ((likely((unsigned char)*buf < '\040') && likely(*buf != '\011')) || unlikely(*buf == '\177')) {
+ goto FOUND_CTL;
+ }
+ }
+ }
+FOUND_CTL:
+ if (likely(*buf == '\015')) {
+ ++buf;
+ EXPECT_CHAR('\012');
+ *token_len = buf - 2 - token_start;
+ } else if (*buf == '\012') {
+ *token_len = buf - token_start;
+ ++buf;
+ } else {
+ *ret = -1;
+ return NULL;
+ }
+ *token = token_start;
+
+ return buf;
+}
+
+static const char *is_complete(const char *buf, const char *buf_end, size_t last_len, int *ret)
+{
+ int ret_cnt = 0;
+ buf = last_len < 3 ? buf : buf + last_len - 3;
+
+ while (1) {
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ CHECK_EOF();
+ EXPECT_CHAR('\012');
+ ++ret_cnt;
+ } else if (*buf == '\012') {
+ ++buf;
+ ++ret_cnt;
+ } else {
+ ++buf;
+ ret_cnt = 0;
+ }
+ if (ret_cnt == 2) {
+ return buf;
+ }
+ }
+
+ *ret = -2;
+ return NULL;
+}
+
+#define PARSE_INT(valp_, mul_) \
+ if (*buf < '0' || '9' < *buf) { \
+ buf++; \
+ *ret = -1; \
+ return NULL; \
+ } \
+ *(valp_) = (mul_) * (*buf++ - '0');
+
+#define PARSE_INT_3(valp_) \
+ do { \
+ int res_ = 0; \
+ PARSE_INT(&res_, 100) \
+ *valp_ = res_; \
+ PARSE_INT(&res_, 10) \
+ *valp_ += res_; \
+ PARSE_INT(&res_, 1) \
+ *valp_ += res_; \
+ } while (0)
+
+/* returned pointer is always within [buf, buf_end), or null */
+static const char *parse_http_version(const char *buf, const char *buf_end, int *minor_version, int *ret)
+{
+ /* we want at least [HTTP/1.<two chars>] to try to parse */
+ if (buf_end - buf < 9) {
+ *ret = -2;
+ return NULL;
+ }
+ EXPECT_CHAR_NO_CHECK('H');
+ EXPECT_CHAR_NO_CHECK('T');
+ EXPECT_CHAR_NO_CHECK('T');
+ EXPECT_CHAR_NO_CHECK('P');
+ EXPECT_CHAR_NO_CHECK('/');
+ EXPECT_CHAR_NO_CHECK('1');
+ EXPECT_CHAR_NO_CHECK('.');
+ PARSE_INT(minor_version, 1);
+ return buf;
+}
+
+static const char *parse_headers(const char *buf, const char *buf_end, struct phr_header *headers, size_t *num_headers,
+ size_t max_headers, int *ret)
+{
+ for (;; ++*num_headers) {
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ break;
+ } else if (*buf == '\012') {
+ ++buf;
+ break;
+ }
+ if (*num_headers == max_headers) {
+ *ret = -1;
+ return NULL;
+ }
+ if (!(*num_headers != 0 && (*buf == ' ' || *buf == '\t'))) {
+ /* parsing name, but do not discard SP before colon, see
+ * http://www.mozilla.org/security/announce/2006/mfsa2006-33.html */
+ headers[*num_headers].name = buf;
+ static const char ALIGNED(16) ranges1[] = "\x00 " /* control chars and up to SP */
+ "\"\"" /* 0x22 */
+ "()" /* 0x28,0x29 */
+ ",," /* 0x2c */
+ "//" /* 0x2f */
+ ":@" /* 0x3a-0x40 */
+ "[]" /* 0x5b-0x5d */
+ "{\377"; /* 0x7b-0xff */
+ int found;
+ buf = findchar_fast(buf, buf_end, ranges1, sizeof(ranges1) - 1, &found);
+ if (!found) {
+ CHECK_EOF();
+ }
+ while (1) {
+ if (*buf == ':') {
+ break;
+ } else if (!token_char_map[(unsigned char)*buf]) {
+ *ret = -1;
+ return NULL;
+ }
+ ++buf;
+ CHECK_EOF();
+ }
+ if ((headers[*num_headers].name_len = buf - headers[*num_headers].name) == 0) {
+ *ret = -1;
+ return NULL;
+ }
+ ++buf;
+ for (;; ++buf) {
+ CHECK_EOF();
+ if (!(*buf == ' ' || *buf == '\t')) {
+ break;
+ }
+ }
+ } else {
+ headers[*num_headers].name = NULL;
+ headers[*num_headers].name_len = 0;
+ }
+ const char *value;
+ size_t value_len;
+ if ((buf = get_token_to_eol(buf, buf_end, &value, &value_len, ret)) == NULL) {
+ return NULL;
+ }
+ /* remove trailing SPs and HTABs */
+ const char *value_end = value + value_len;
+ for (; value_end != value; --value_end) {
+ const char c = *(value_end - 1);
+ if (!(c == ' ' || c == '\t')) {
+ break;
+ }
+ }
+ headers[*num_headers].value = value;
+ headers[*num_headers].value_len = value_end - value;
+ }
+ return buf;
+}
+
+static const char *parse_request(const char *buf, const char *buf_end, const char **method, size_t *method_len, const char **path,
+ size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers,
+ size_t max_headers, int *ret)
+{
+ /* skip first empty line (some clients add CRLF after POST content) */
+ CHECK_EOF();
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ } else if (*buf == '\012') {
+ ++buf;
+ }
+
+ /* parse request line */
+ ADVANCE_TOKEN(*method, *method_len);
+ do {
+ ++buf;
+ } while (*buf == ' ');
+ ADVANCE_TOKEN(*path, *path_len);
+ do {
+ ++buf;
+ } while (*buf == ' ');
+ if (*method_len == 0 || *path_len == 0) {
+ *ret = -1;
+ return NULL;
+ }
+ if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) {
+ return NULL;
+ }
+ if (*buf == '\015') {
+ ++buf;
+ EXPECT_CHAR('\012');
+ } else if (*buf == '\012') {
+ ++buf;
+ } else {
+ *ret = -1;
+ return NULL;
+ }
+
+ return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret);
+}
+
+int phr_parse_request(const char *buf_start, size_t len, const char **method, size_t *method_len, const char **path,
+ size_t *path_len, int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf_start + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *method = NULL;
+ *method_len = 0;
+ *path = NULL;
+ *path_len = 0;
+ *minor_version = -1;
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the request is complete (a fast countermeasure
+ againt slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_request(buf, buf_end, method, method_len, path, path_len, minor_version, headers, num_headers, max_headers,
+ &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+static const char *parse_response(const char *buf, const char *buf_end, int *minor_version, int *status, const char **msg,
+ size_t *msg_len, struct phr_header *headers, size_t *num_headers, size_t max_headers, int *ret)
+{
+ /* parse "HTTP/1.x" */
+ if ((buf = parse_http_version(buf, buf_end, minor_version, ret)) == NULL) {
+ return NULL;
+ }
+ /* skip space */
+ if (*buf != ' ') {
+ *ret = -1;
+ return NULL;
+ }
+ do {
+ ++buf;
+ } while (*buf == ' ');
+ /* parse status code, we want at least [:digit:][:digit:][:digit:]<other char> to try to parse */
+ if (buf_end - buf < 4) {
+ *ret = -2;
+ return NULL;
+ }
+ PARSE_INT_3(status);
+
+ /* get message includig preceding space */
+ if ((buf = get_token_to_eol(buf, buf_end, msg, msg_len, ret)) == NULL) {
+ return NULL;
+ }
+ if (*msg_len == 0) {
+ /* ok */
+ } else if (**msg == ' ') {
+ /* remove preceding space */
+ do {
+ ++*msg;
+ --*msg_len;
+ } while (**msg == ' ');
+ } else {
+ /* garbage found after status code */
+ *ret = -1;
+ return NULL;
+ }
+
+ return parse_headers(buf, buf_end, headers, num_headers, max_headers, ret);
+}
+
+int phr_parse_response(const char *buf_start, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len,
+ struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *minor_version = -1;
+ *status = 0;
+ *msg = NULL;
+ *msg_len = 0;
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the response is complete (a fast countermeasure
+ against slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_response(buf, buf_end, minor_version, status, msg, msg_len, headers, num_headers, max_headers, &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+int phr_parse_headers(const char *buf_start, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len)
+{
+ const char *buf = buf_start, *buf_end = buf + len;
+ size_t max_headers = *num_headers;
+ int r;
+
+ *num_headers = 0;
+
+ /* if last_len != 0, check if the response is complete (a fast countermeasure
+ against slowloris */
+ if (last_len != 0 && is_complete(buf, buf_end, last_len, &r) == NULL) {
+ return r;
+ }
+
+ if ((buf = parse_headers(buf, buf_end, headers, num_headers, max_headers, &r)) == NULL) {
+ return r;
+ }
+
+ return (int)(buf - buf_start);
+}
+
+enum {
+ CHUNKED_IN_CHUNK_SIZE,
+ CHUNKED_IN_CHUNK_EXT,
+ CHUNKED_IN_CHUNK_DATA,
+ CHUNKED_IN_CHUNK_CRLF,
+ CHUNKED_IN_TRAILERS_LINE_HEAD,
+ CHUNKED_IN_TRAILERS_LINE_MIDDLE
+};
+
+static int decode_hex(int ch)
+{
+ if ('0' <= ch && ch <= '9') {
+ return ch - '0';
+ } else if ('A' <= ch && ch <= 'F') {
+ return ch - 'A' + 0xa;
+ } else if ('a' <= ch && ch <= 'f') {
+ return ch - 'a' + 0xa;
+ } else {
+ return -1;
+ }
+}
+
+ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *_bufsz)
+{
+ size_t dst = 0, src = 0, bufsz = *_bufsz;
+ ssize_t ret = -2; /* incomplete */
+
+ while (1) {
+ switch (decoder->_state) {
+ case CHUNKED_IN_CHUNK_SIZE:
+ for (;; ++src) {
+ int v;
+ if (src == bufsz)
+ goto Exit;
+ if ((v = decode_hex(buf[src])) == -1) {
+ if (decoder->_hex_count == 0) {
+ ret = -1;
+ goto Exit;
+ }
+ break;
+ }
+ if (decoder->_hex_count == sizeof(size_t) * 2) {
+ ret = -1;
+ goto Exit;
+ }
+ decoder->bytes_left_in_chunk = decoder->bytes_left_in_chunk * 16 + v;
+ ++decoder->_hex_count;
+ }
+ decoder->_hex_count = 0;
+ decoder->_state = CHUNKED_IN_CHUNK_EXT;
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_EXT:
+ /* RFC 7230 A.2 "Line folding in chunk extensions is disallowed" */
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] == '\012')
+ break;
+ }
+ ++src;
+ if (decoder->bytes_left_in_chunk == 0) {
+ if (decoder->consume_trailer) {
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD;
+ break;
+ } else {
+ goto Complete;
+ }
+ }
+ decoder->_state = CHUNKED_IN_CHUNK_DATA;
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_DATA: {
+ size_t avail = bufsz - src;
+ if (avail < decoder->bytes_left_in_chunk) {
+ if (dst != src)
+ memmove(buf + dst, buf + src, avail);
+ src += avail;
+ dst += avail;
+ decoder->bytes_left_in_chunk -= avail;
+ goto Exit;
+ }
+ if (dst != src)
+ memmove(buf + dst, buf + src, decoder->bytes_left_in_chunk);
+ src += decoder->bytes_left_in_chunk;
+ dst += decoder->bytes_left_in_chunk;
+ decoder->bytes_left_in_chunk = 0;
+ decoder->_state = CHUNKED_IN_CHUNK_CRLF;
+ }
+ /* fallthru */
+ case CHUNKED_IN_CHUNK_CRLF:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] != '\015')
+ break;
+ }
+ if (buf[src] != '\012') {
+ ret = -1;
+ goto Exit;
+ }
+ ++src;
+ decoder->_state = CHUNKED_IN_CHUNK_SIZE;
+ break;
+ case CHUNKED_IN_TRAILERS_LINE_HEAD:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] != '\015')
+ break;
+ }
+ if (buf[src++] == '\012')
+ goto Complete;
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_MIDDLE;
+ /* fallthru */
+ case CHUNKED_IN_TRAILERS_LINE_MIDDLE:
+ for (;; ++src) {
+ if (src == bufsz)
+ goto Exit;
+ if (buf[src] == '\012')
+ break;
+ }
+ ++src;
+ decoder->_state = CHUNKED_IN_TRAILERS_LINE_HEAD;
+ break;
+ default:
+ assert(!"decoder is corrupt");
+ }
+ }
+
+Complete:
+ ret = bufsz - src;
+Exit:
+ if (dst != src)
+ memmove(buf + dst, buf + src, bufsz - src);
+ *_bufsz = dst;
+ return ret;
+}
+
+int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder)
+{
+ return decoder->_state == CHUNKED_IN_CHUNK_DATA;
+}
+
+#undef CHECK_EOF
+#undef EXPECT_CHAR
+#undef ADVANCE_TOKEN
diff --git a/plugins/picohttpparser/picohttpparser.h b/plugins/picohttpparser/picohttpparser.h
new file mode 100644
index 00000000..0849f844
--- /dev/null
+++ b/plugins/picohttpparser/picohttpparser.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2009-2014 Kazuho Oku, Tokuhiro Matsuno, Daisuke Murase,
+ * Shigeo Mitsunari
+ *
+ * The software is licensed under either the MIT License (below) or the Perl
+ * license.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef picohttpparser_h
+#define picohttpparser_h
+
+#include <sys/types.h>
+
+#ifdef _MSC_VER
+#define ssize_t intptr_t
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* contains name and value of a header (name == NULL if is a continuing line
+ * of a multiline header */
+struct phr_header {
+ const char *name;
+ size_t name_len;
+ const char *value;
+ size_t value_len;
+};
+
+/* returns number of bytes consumed if successful, -2 if request is partial,
+ * -1 if failed */
+int phr_parse_request(const char *buf, size_t len, const char **method, size_t *method_len, const char **path, size_t *path_len,
+ int *minor_version, struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* ditto */
+int phr_parse_response(const char *_buf, size_t len, int *minor_version, int *status, const char **msg, size_t *msg_len,
+ struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* ditto */
+int phr_parse_headers(const char *buf, size_t len, struct phr_header *headers, size_t *num_headers, size_t last_len);
+
+/* should be zero-filled before start */
+struct phr_chunked_decoder {
+ size_t bytes_left_in_chunk; /* number of bytes left in current chunk */
+ char consume_trailer; /* if trailing headers should be consumed */
+ char _hex_count;
+ char _state;
+};
+
+/* the function rewrites the buffer given as (buf, bufsz) removing the chunked-
+ * encoding headers. When the function returns without an error, bufsz is
+ * updated to the length of the decoded data available. Applications should
+ * repeatedly call the function while it returns -2 (incomplete) every time
+ * supplying newly arrived data. If the end of the chunked-encoded data is
+ * found, the function returns a non-negative number indicating the number of
+ * octets left undecoded at the tail of the supplied buffer. Returns -1 on
+ * error.
+ */
+ssize_t phr_decode_chunked(struct phr_chunked_decoder *decoder, char *buf, size_t *bufsz);
+
+/* returns if the chunked decoder is in middle of chunked data */
+int phr_decode_chunked_is_in_data(struct phr_chunked_decoder *decoder);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/plugins/popen.c b/plugins/popen.c
index 592263fd..9eb49b62 100644
--- a/plugins/popen.c
+++ b/plugins/popen.c
@@ -39,9 +39,9 @@
*****************************************************************************/
#include "common.h"
+#include "utils.h"
/* extern so plugin has pid to kill exec'd process on timeouts */
-extern int timeout_interval;
extern pid_t *childpid;
extern int *child_stderr_array;
extern FILE *child_process;
@@ -76,18 +76,9 @@ RETSIGTYPE popen_timeout_alarm_handler (int);
#define SIG_ERR ((Sigfunc *)-1)
#endif
-#define min(a,b) ((a) < (b) ? (a) : (b))
-#define max(a,b) ((a) > (b) ? (a) : (b))
-int open_max (void); /* {Prog openmax} */
-static void err_sys (const char *, ...) __attribute__((noreturn,format(printf, 1, 2)));
-char *rtrim (char *, const char *);
char *pname = NULL; /* caller can set this from argv[0] */
-/*int *childerr = NULL;*//* ptr to array allocated at run-time */
-/*extern pid_t *childpid = NULL; *//* ptr to array allocated at run-time */
-static int maxfd; /* from our open_max(), {Prog openmax} */
-
#ifdef REDHAT_SPOPEN_ERROR
static volatile int childtermd = 0;
#endif
@@ -186,14 +177,15 @@ spopen (const char *cmdstring)
}
argv[i] = NULL;
+ if(maxfd == 0)
+ maxfd = open_max();
+
if (childpid == NULL) { /* first time through */
- maxfd = open_max (); /* allocate zeroed out array for child pids */
if ((childpid = calloc ((size_t)maxfd, sizeof (pid_t))) == NULL)
return (NULL);
}
if (child_stderr_array == NULL) { /* first time through */
- maxfd = open_max (); /* allocate zeroed out array for child pids */
if ((child_stderr_array = calloc ((size_t)maxfd, sizeof (int))) == NULL)
return (NULL);
}
@@ -273,15 +265,6 @@ spclose (FILE * fp)
return (1);
}
-#ifdef OPEN_MAX
-static int openmax = OPEN_MAX;
-#else
-static int openmax = 0;
-#endif
-
-#define OPEN_MAX_GUESS 256 /* if OPEN_MAX is indeterminate */
- /* no guarantee this is adequate */
-
#ifdef REDHAT_SPOPEN_ERROR
RETSIGTYPE
popen_sigchld_handler (int signo)
@@ -309,63 +292,3 @@ popen_timeout_alarm_handler (int signo)
exit (STATE_CRITICAL);
}
}
-
-
-int
-open_max (void)
-{
- if (openmax == 0) { /* first time through */
- errno = 0;
- if ((openmax = sysconf (_SC_OPEN_MAX)) < 0) {
- if (errno == 0)
- openmax = OPEN_MAX_GUESS; /* it's indeterminate */
- else
- err_sys (_("sysconf error for _SC_OPEN_MAX"));
- }
- }
- return (openmax);
-}
-
-
-/* Fatal error related to a system call.
- * Print a message and die. */
-
-#define MAXLINE 2048
-static void
-err_sys (const char *fmt, ...)
-{
- int errnoflag = 1;
- int errno_save;
- char buf[MAXLINE];
-
- va_list ap;
-
- va_start (ap, fmt);
- /* err_doit (1, fmt, ap); */
- errno_save = errno; /* value caller might want printed */
- vsprintf (buf, fmt, ap);
- if (errnoflag)
- sprintf (buf + strlen (buf), ": %s", strerror (errno_save));
- strcat (buf, "\n");
- fflush (stdout); /* in case stdout and stderr are the same */
- fputs (buf, stderr);
- fflush (NULL); /* flushes all stdio output streams */
- va_end (ap);
- exit (1);
-}
-
-char *
-rtrim (char *str, const char *tok)
-{
- int i = 0;
- int j = sizeof (str);
-
- while (str != NULL && i < j) {
- if (*(str + i) == *tok) {
- sprintf (str + i, "%s", "\0");
- return str;
- }
- i++;
- }
- return str;
-}
diff --git a/plugins/popen.h b/plugins/popen.h
index fc7e78e2..a5dd8fa7 100644
--- a/plugins/popen.h
+++ b/plugins/popen.h
@@ -7,7 +7,6 @@ FILE *spopen (const char *);
int spclose (FILE *);
RETSIGTYPE popen_timeout_alarm_handler (int);
-extern unsigned int timeout_interval;
pid_t *childpid=NULL;
int *child_stderr_array=NULL;
FILE *child_process=NULL;
diff --git a/plugins/runcmd.c b/plugins/runcmd.c
index 1a7c904f..a7155d27 100644
--- a/plugins/runcmd.c
+++ b/plugins/runcmd.c
@@ -67,19 +67,6 @@
* occur in any number of threads simultaneously. */
static pid_t *np_pids = NULL;
-/* Try sysconf(_SC_OPEN_MAX) first, as it can be higher than OPEN_MAX.
- * If that fails and the macro isn't defined, we fall back to an educated
- * guess. There's no guarantee that our guess is adequate and the program
- * will die with SIGSEGV if it isn't and the upper boundary is breached. */
-#ifdef _SC_OPEN_MAX
-static long maxfd = 0;
-#elif defined(OPEN_MAX)
-# define maxfd OPEN_MAX
-#else /* sysconf macro unavailable, so guess (may be wildly inaccurate) */
-# define maxfd 256
-#endif
-
-
/** prototypes **/
static int np_runcmd_open(const char *, int *, int *)
__attribute__((__nonnull__(1, 2, 3)));
@@ -99,14 +86,8 @@ extern void die (int, const char *, ...)
* through this api and thus achieve async-safeness throughout the api */
void np_runcmd_init(void)
{
-#ifndef maxfd
- if(!maxfd && (maxfd = sysconf(_SC_OPEN_MAX)) < 0) {
- /* possibly log or emit a warning here, since there's no
- * guarantee that our guess at maxfd will be adequate */
- maxfd = 256;
- }
-#endif
-
+ if(maxfd == 0)
+ maxfd = open_max();
if(!np_pids) np_pids = calloc(maxfd, sizeof(pid_t));
}
diff --git a/plugins/sslutils.c b/plugins/sslutils.c
index e38947e3..14f6579d 100644
--- a/plugins/sslutils.c
+++ b/plugins/sslutils.c
@@ -1,29 +1,29 @@
/*****************************************************************************
-*
+*
* Monitoring Plugins SSL utilities
-*
+*
* License: GPL
* Copyright (c) 2005-2010 Monitoring Plugins Development Team
-*
+*
* Description:
-*
+*
* This file contains common functions for plugins that require SSL.
-*
+*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
-*
+*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
-*
+*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
-*
-*
+*
+*
*****************************************************************************/
#define MAX_CN_LENGTH 256
@@ -193,12 +193,22 @@ int np_net_ssl_read(void *buf, int num) {
int np_net_ssl_check_cert(int days_till_exp_warn, int days_till_exp_crit){
# ifdef USE_OPENSSL
- X509 *certificate=NULL;
+ X509 *certificate = NULL;
+ certificate=SSL_get_peer_certificate(s);
+ return(np_net_ssl_check_certificate(certificate, days_till_exp_warn, days_till_exp_crit));
+# else /* ifndef USE_OPENSSL */
+ printf("%s\n", _("WARNING - Plugin does not support checking certificates."));
+ return STATE_WARNING;
+# endif /* USE_OPENSSL */
+}
+
+int np_net_ssl_check_certificate(X509 *certificate, int days_till_exp_warn, int days_till_exp_crit){
+# ifdef USE_OPENSSL
X509_NAME *subj=NULL;
char timestamp[50] = "";
char cn[MAX_CN_LENGTH]= "";
char *tz;
-
+
int cnlen =-1;
int status=STATE_UNKNOWN;
@@ -210,7 +220,6 @@ int np_net_ssl_check_cert(int days_till_exp_warn, int days_till_exp_crit){
int time_remaining;
time_t tm_t;
- certificate=SSL_get_peer_certificate(s);
if (!certificate) {
printf("%s\n",_("CRITICAL - Cannot retrieve server certificate."));
return STATE_CRITICAL;
diff --git a/plugins/t/check_by_ssh.t b/plugins/t/check_by_ssh.t
index 4797390d..1d2939e9 100644
--- a/plugins/t/check_by_ssh.t
+++ b/plugins/t/check_by_ssh.t
@@ -9,17 +9,9 @@ use Test::More;
use NPTest;
# Required parameters
-my $ssh_service = getTestParameter( "NP_SSH_HOST",
- "A host providing SSH service",
- "localhost");
-
-my $ssh_key = getTestParameter( "NP_SSH_IDENTITY",
- "A key allowing access to NP_SSH_HOST",
- "~/.ssh/id_dsa");
-
-my $ssh_conf = getTestParameter( "NP_SSH_CONFIGFILE",
- "A config file with ssh settings",
- "~/.ssh/config");
+my $ssh_service = getTestParameter("NP_SSH_HOST", "A host providing SSH service", "localhost");
+my $ssh_key = getTestParameter("NP_SSH_IDENTITY", "A key allowing access to NP_SSH_HOST", "~/.ssh/id_dsa");
+my $ssh_conf = getTestParameter( "NP_SSH_CONFIGFILE", "A config file with ssh settings", "~/.ssh/config");
plan skip_all => "SSH_HOST and SSH_IDENTITY must be defined" unless ($ssh_service && $ssh_key);
diff --git a/plugins/t/check_curl.t b/plugins/t/check_curl.t
new file mode 100644
index 00000000..ada6a045
--- /dev/null
+++ b/plugins/t/check_curl.t
@@ -0,0 +1,201 @@
+#! /usr/bin/perl -w -I ..
+#
+# HyperText Transfer Protocol (HTTP) Test via check_http
+#
+#
+
+use strict;
+use Test::More;
+use POSIX qw/mktime strftime/;
+use NPTest;
+
+plan tests => 58;
+
+my $successOutput = '/OK.*HTTP.*second/';
+
+my $res;
+my $plugin = 'check_http';
+$plugin = 'check_curl' if $0 =~ m/check_curl/mx;
+
+my $host_tcp_http = getTestParameter("NP_HOST_TCP_HTTP", "A host providing the HTTP Service (a web server)", "localhost");
+my $host_tls_http = getTestParameter("NP_HOST_TLS_HTTP", "A host providing the HTTPS Service (a tls web server)", "localhost");
+my $host_tls_cert = getTestParameter("NP_HOST_TLS_CERT", "the common name of the certificate.", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
+my $internet_access = getTestParameter("NP_INTERNET_ACCESS", "Is this system directly connected to the internet?", "yes");
+my $host_tcp_http2 = getTestParameter("NP_HOST_TCP_HTTP2", "A host providing an index page containing the string 'monitoring'", "test.monitoring-plugins.org");
+my $host_tcp_proxy = getTestParameter("NP_HOST_TCP_PROXY", "A host providing a HTTP proxy with CONNECT support", "localhost");
+my $port_tcp_proxy = getTestParameter("NP_PORT_TCP_PROXY", "Port of the proxy with HTTP and CONNECT support", "3128");
+
+my $faketime = -x '/usr/bin/faketime' ? 1 : 0;
+
+
+$res = NPTest->testCmd(
+ "./$plugin $host_tcp_http -wt 300 -ct 600"
+ );
+cmp_ok( $res->return_code, '==', 0, "Webserver $host_tcp_http responded" );
+like( $res->output, $successOutput, "Output OK" );
+
+$res = NPTest->testCmd(
+ "./$plugin $host_tcp_http -wt 300 -ct 600 -v -v -v -k 'bob:there' -k 'carl:frown'"
+ );
+like( $res->output, '/bob:there\r\ncarl:frown\r\n/', "Got headers with multiple -k options" );
+
+$res = NPTest->testCmd(
+ "./$plugin $host_nonresponsive -wt 1 -ct 2 -t 3"
+ );
+cmp_ok( $res->return_code, '==', 2, "Webserver $host_nonresponsive not responding" );
+# was CRITICAL only, but both check_curl and check_http print HTTP CRITICAL (puzzle?!)
+like( $res->output, "/HTTP CRITICAL - Invalid HTTP response received from host on port 80: cURL returned 28 - Connection timed out after/", "Output OK");
+
+$res = NPTest->testCmd(
+ "./$plugin $hostname_invalid -wt 1 -ct 2"
+ );
+cmp_ok( $res->return_code, '==', 2, "Webserver $hostname_invalid not valid" );
+# The first part of the message comes from the OS catalogue, so cannot check this.
+# On Debian, it is Name or service not known, on Darwin, it is No address associated with nodename
+# Is also possible to get a socket timeout if DNS is not responding fast enough
+# cURL gives us consistent strings from it's own 'lib/strerror.c'
+like( $res->output, "/cURL returned 6 - Could not resolve host:/", "Output OK");
+
+# host header checks
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http");
+like( $res->output, '/^Host: '.$host_tcp_http.'\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http -p 80");
+like( $res->output, '/^Host: '.$host_tcp_http.'\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http:8080 -p 80");
+like( $res->output, '/^Host: '.$host_tcp_http.':8080\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http:8080 -p 80");
+like( $res->output, '/^Host: '.$host_tcp_http.':8080\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http:8080 -p 80 -k 'Host: testhost:8001'");
+like( $res->output, '/^Host: testhost:8001\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+$res = NPTest->testCmd("./$plugin -v -I $host_tcp_http -p 80 -k 'Host: testhost:8001'");
+like( $res->output, '/^Host: testhost:8001\s*$/ms', "Host Header OK" );
+like( $res->output, '/CURLOPT_URL: http:\/\/'.$host_tcp_http.':80\//ms', "Url OK" );
+
+SKIP: {
+ skip "No internet access", 4 if $internet_access eq "no";
+
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http -S");
+ like( $res->output, '/^Host: '.$host_tls_http.'\s*$/ms', "Host Header OK" );
+
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http:8080 -S -p 443");
+ like( $res->output, '/^Host: '.$host_tls_http.':8080\s*$/ms', "Host Header OK" );
+
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http:443 -S -p 443");
+ like( $res->output, '/^Host: '.$host_tls_http.'\s*$/ms', "Host Header OK" );
+
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http -D -S -p 443");
+ like( $res->output, '/(^Host: '.$host_tls_http.'\s*$)|(cURL returned 60)/ms', "Host Header OK" );
+};
+
+SKIP: {
+ skip "No host serving monitoring in index file", 7 unless $host_tcp_http2;
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'monitoring'" );
+ cmp_ok( $res->return_code, "==", 0, "Got a reference to 'monitoring'");
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'mONiTORing'" );
+ cmp_ok( $res->return_code, "==", 2, "Not got 'mONiTORing'");
+ like ( $res->output, "/pattern not found/", "Error message says 'pattern not found'");
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -R 'mONiTORing'" );
+ cmp_ok( $res->return_code, "==", 0, "But case insensitive doesn't mind 'mONiTORing'");
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'monitoring' --invert-regex" );
+ cmp_ok( $res->return_code, "==", 2, "Invert results work when found");
+ like ( $res->output, "/pattern found/", "Error message says 'pattern found'");
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'mONiTORing' --invert-regex" );
+ cmp_ok( $res->return_code, "==", 0, "And also when not found");
+}
+SKIP: {
+ skip "No internet access", 28 if $internet_access eq "no";
+
+ $res = NPTest->testCmd(
+ "./$plugin --ssl $host_tls_http"
+ );
+ cmp_ok( $res->return_code, '==', 0, "Can read https for $host_tls_http" );
+
+ $res = NPTest->testCmd( "./$plugin -C 1 --ssl $host_tls_http" );
+ cmp_ok( $res->return_code, '==', 0, "Checking certificate for $host_tls_http");
+ like ( $res->output, "/Certificate '$host_tls_cert' will expire on/", "Output OK" );
+ my $saved_cert_output = $res->output;
+
+ $res = NPTest->testCmd( "./$plugin -C 8000,1 --ssl $host_tls_http" );
+ cmp_ok( $res->return_code, '==', 1, "Checking certificate for $host_tls_http");
+ like ( $res->output, qr/WARNING - Certificate '$host_tls_cert' expires in \d+ day/, "Output Warning" );
+
+ $res = NPTest->testCmd( "./$plugin $host_tls_http -C 1" );
+ is( $res->return_code, 0, "Old syntax for cert checking okay" );
+ is( $res->output, $saved_cert_output, "Same output as new syntax" );
+
+ $res = NPTest->testCmd( "./$plugin -H $host_tls_http -C 1" );
+ is( $res->return_code, 0, "Updated syntax for cert checking okay" );
+ is( $res->output, $saved_cert_output, "Same output as new syntax" );
+
+ $res = NPTest->testCmd( "./$plugin -C 1 $host_tls_http" );
+ cmp_ok( $res->output, 'eq', $saved_cert_output, "--ssl option automatically added");
+
+ $res = NPTest->testCmd( "./$plugin $host_tls_http -C 1" );
+ cmp_ok( $res->output, 'eq', $saved_cert_output, "Old syntax for cert checking still works");
+
+ # run some certificate checks with faketime
+ SKIP: {
+ skip "No faketime binary found", 12 if !$faketime;
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/OK - Certificate '$host_tls_cert' will expire on/, "Catch cert output");
+ is( $res->return_code, 0, "Catch cert output exit code" );
+ my($mon,$day,$hour,$min,$sec,$year) = ($res->output =~ /(\w+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)/);
+ if(!defined $year) {
+ die("parsing date failed from: ".$res->output);
+ }
+ my $months = {'Jan' => 0, 'Feb' => 1, 'Mar' => 2, 'Apr' => 3, 'May' => 4, 'Jun' => 5, 'Jul' => 6, 'Aug' => 7, 'Sep' => 8, 'Oct' => 9, 'Nov' => 10, 'Dec' => 11};
+ my $ts = mktime($sec, $min, $hour, $day, $months->{$mon}, $year-1900);
+ my $time = strftime("%Y-%m-%d %H:%M:%S", localtime($ts));
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' just expired/, "Output on expire date");
+ is( $res->return_code, 2, "Output on expire date" );
+
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-1))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 0 minutes/, "cert expires in 1 second output");
+ is( $res->return_code, 2, "cert expires in 1 second exit code" );
+
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-120))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 minutes/, "cert expires in 2 minutes output");
+ is( $res->return_code, 2, "cert expires in 2 minutes exit code" );
+
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-7200))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 hours/, "cert expires in 2 hours output");
+ is( $res->return_code, 2, "cert expires in 2 hours exit code" );
+
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expired on/, "Certificate expired output");
+ is( $res->return_code, 2, "Certificate expired exit code" );
+ };
+
+ $res = NPTest->testCmd( "./$plugin --ssl $host_tls_http -E" );
+ like ( $res->output, '/time_connect=[\d\.]+/', 'Extended Performance Data Output OK' );
+ like ( $res->output, '/time_ssl=[\d\.]+/', 'Extended Performance Data SSL Output OK' );
+
+ $res = NPTest->testCmd(
+ "./$plugin --ssl -H www.e-paycobalt.com"
+ );
+ cmp_ok( $res->return_code, "==", 0, "Can read https for www.e-paycobalt.com (uses AES certificate)" );
+
+ $res = NPTest->testCmd( "./$plugin -H www.mozilla.com -u /firefox -f curl" );
+ is( $res->return_code, 0, "Redirection based on location is okay");
+
+ $res = NPTest->testCmd( "./$plugin -H www.mozilla.com --extended-perfdata" );
+ like ( $res->output, '/time_connect=[\d\.]+/', 'Extended Performance Data Output OK' );
+}
diff --git a/plugins/t/check_disk.t b/plugins/t/check_disk.t
index 7e0f74b7..ec527e7f 100644
--- a/plugins/t/check_disk.t
+++ b/plugins/t/check_disk.t
@@ -88,8 +88,9 @@ $result = NPTest->testCmd(
);
$_ = $result->perf_output;
my ($warn_absth_data, $crit_absth_data, $total_absth_data) = (m/=.[^;]*;(\d+);(\d+);\d+;(\d+)/);
-is ($warn_absth_data, $total_absth_data - 20, "Wrong warning in perf data using absolute thresholds");
-is ($crit_absth_data, $total_absth_data - 10, "Wrong critical in perf data using absolute thresholds");
+# default unit is MiB, but perfdata is always bytes
+is ($warn_absth_data, $total_absth_data - (20 * (2 ** 20)), "Wrong warning in perf data using absolute thresholds");
+is ($crit_absth_data, $total_absth_data - (10 * (2 ** 20)), "Wrong critical in perf data using absolute thresholds");
# Then check percent thresholds.
$result = NPTest->testCmd(
@@ -119,7 +120,7 @@ like ( $result->only_output, qr/$more_free/, "Have disk name in text");
$result = NPTest->testCmd( "./check_disk -w 1 -c 1 -p $more_free -p $less_free" );
cmp_ok( $result->return_code, '==', 0, "At least 1 MB available on $more_free and $less_free");
$_ = $result->output;
-my ($free_mb_on_mp1, $free_mb_on_mp2) = (m/(\d+) MB .* (\d+) MB /g);
+my ($free_mb_on_mp1, $free_mb_on_mp2) = (m/(\d+)MiB .* (\d+)MiB /g);
my $free_mb_on_all = $free_mb_on_mp1 + $free_mb_on_mp2;
@@ -248,11 +249,11 @@ $result = NPTest->testCmd( "./check_disk -w 100% -c 100% ".${mountpoint_valid} )
cmp_ok( $result->return_code, "==", 2, "100% empty" );
like( $result->output, $failureOutput, "Right output" );
-$result = NPTest->testCmd( "./check_disk -w 100000 -c 100000 $mountpoint_valid" );
-cmp_ok( $result->return_code, '==', 2, "Check for 100GB free" );
+$result = NPTest->testCmd( "./check_disk -w 100000000 -c 100000000 $mountpoint_valid" );
+cmp_ok( $result->return_code, '==', 2, "Check for 100TB free" );
-$result = NPTest->testCmd( "./check_disk -w 100 -c 100 -u GB ".${mountpoint_valid} ); # 100 GB empty
-cmp_ok( $result->return_code, "==", 2, "100 GB empty" );
+$result = NPTest->testCmd( "./check_disk -w 100 -c 100 -u TB ".${mountpoint_valid} ); # 100 TB empty
+cmp_ok( $result->return_code, "==", 2, "100 TB empty" );
# Checking old syntax of check_disk warn crit [fs], with warn/crit at USED% thresholds
diff --git a/plugins/t/check_dns.t b/plugins/t/check_dns.t
index cdfbe60d..afb2062d 100644
--- a/plugins/t/check_dns.t
+++ b/plugins/t/check_dns.t
@@ -10,7 +10,7 @@ use NPTest;
plan skip_all => "check_dns not compiled" unless (-x "check_dns");
-plan tests => 19;
+plan tests => 23;
my $successOutput = '/DNS OK: [\.0-9]+ seconds? response time/';
@@ -105,3 +105,11 @@ cmp_ok( $res->return_code, '==', 0, "Got expected address");
$res = NPTest->testCmd("./check_dns -H $hostname_valid -a $hostname_invalid_cidr -t 5");
cmp_ok( $res->return_code, '==', 2, "Got wrong address");
like ( $res->output, "/^DNS CRITICAL.*expected '$hostname_invalid_cidr' but got '$hostname_valid_ip'".'$/', "Output OK");
+
+$res = NPTest->testCmd("./check_dns -H $hostname_valid -n");
+cmp_ok( $res->return_code, '==', 2, "Found $hostname_valid");
+like ( $res->output, "/^DNS CRITICAL.*Domain '$hostname_valid' was found by the server:/", "Output OK");
+
+$res = NPTest->testCmd("./check_dns -H $hostname_invalid -n");
+cmp_ok( $res->return_code, '==', 0, "Did not find $hostname_invalid");
+like ( $res->output, $successOutput, "Output OK" );
diff --git a/plugins/t/check_fping.t b/plugins/t/check_fping.t
index 08692e46..67b357b2 100644
--- a/plugins/t/check_fping.t
+++ b/plugins/t/check_fping.t
@@ -5,40 +5,30 @@
#
use strict;
-use Test;
+use Test::More;
use NPTest;
-use vars qw($tests);
+my $host_responsive = getTestParameter("NP_HOST_RESPONSIVE", "The hostname of system responsive to network requests", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
-BEGIN {$tests = 4; plan tests => $tests}
-
-my $successOutput = '/^FPING OK - /';
-my $failureOutput = '/^FPING CRITICAL - /';
-
-my $host_responsive = getTestParameter( "host_responsive", "NP_HOST_RESPONSIVE", "localhost",
- "The hostname of system responsive to network requests" );
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
-
-
-my $t;
+my $res;
my $fping = qx(which fping 2> /dev/null);
chomp($fping);
if( ! -x "./check_fping") {
- $t += skipMissingCmd( "./check_fping", $tests );
+ plan skip_all => "check_fping not found, skipping tests";
}
-elsif ( $> != 0 && (!$fping || ! -u $fping)) {
- $t += skipMsg( "./check_fping", $tests );
+elsif ( !$fping || !-x $fping ) {
+ plan skip_all => "fping not found or cannot be executed, skipping tests";
} else {
- $t += checkCmd( "./check_fping $host_responsive", 0, $successOutput );
- $t += checkCmd( "./check_fping $host_nonresponsive", [ 1, 2 ] );
- $t += checkCmd( "./check_fping $hostname_invalid", [ 1, 2 ] );
-}
+ plan tests => 3;
+ $res = NPTest->testCmd( "./check_fping $host_responsive" );
+ cmp_ok( $res->return_code, '==', 0, "Responsive host returns OK");
-exit(0) if defined($Test::Harness::VERSION);
-exit($tests - $t);
+ $res = NPTest->testCmd( "./check_fping $host_nonresponsive" );
+ cmp_ok( $res->return_code, '==', 2, "Non-Responsive host returns Critical");
+
+ $res = NPTest->testCmd( "./check_fping $hostname_invalid" );
+ cmp_ok( $res->return_code, '==', 3, "Invalid host returns Unknown");
+}
diff --git a/plugins/t/check_ftp.t b/plugins/t/check_ftp.t
index de6831ba..93a7d7c3 100644
--- a/plugins/t/check_ftp.t
+++ b/plugins/t/check_ftp.t
@@ -11,14 +11,9 @@ use NPTest;
use vars qw($tests);
BEGIN {$tests = 4; plan tests => $tests}
-my $host_tcp_ftp = getTestParameter( "host_tcp_ftp", "NP_HOST_TCP_FTP", "localhost",
- "A host providing the FTP Service (an FTP server)");
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
+my $host_tcp_ftp = getTestParameter("NP_HOST_TCP_FTP", "A host providing the FTP Service (an FTP server)", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
my $successOutput = '/FTP OK -\s+[0-9]?\.?[0-9]+ second response time/';
diff --git a/plugins/t/check_http.t b/plugins/t/check_http.t
index 5a90f02a..c137f7b4 100644
--- a/plugins/t/check_http.t
+++ b/plugins/t/check_http.t
@@ -9,54 +9,46 @@ use Test::More;
use POSIX qw/mktime strftime/;
use NPTest;
-plan tests => 49;
+plan tests => 50;
my $successOutput = '/OK.*HTTP.*second/';
my $res;
-
-my $host_tcp_http = getTestParameter( "NP_HOST_TCP_HTTP",
- "A host providing the HTTP Service (a web server)",
- "localhost" );
-
-my $host_nonresponsive = getTestParameter( "NP_HOST_NONRESPONSIVE",
- "The hostname of system not responsive to network requests",
- "10.0.0.1" );
-
-my $hostname_invalid = getTestParameter( "NP_HOSTNAME_INVALID",
- "An invalid (not known to DNS) hostname",
- "nosuchhost");
-
-my $internet_access = getTestParameter( "NP_INTERNET_ACCESS",
- "Is this system directly connected to the internet?",
- "yes");
-
-my $host_tcp_http2 = getTestParameter( "NP_HOST_TCP_HTTP2",
- "A host providing an index page containing the string 'monitoring'",
- "test.monitoring-plugins.org" );
+my $plugin = 'check_http';
+$plugin = 'check_curl' if $0 =~ m/check_curl/mx;
+
+my $host_tcp_http = getTestParameter("NP_HOST_TCP_HTTP", "A host providing the HTTP Service (a web server)", "localhost");
+my $host_tls_http = getTestParameter("NP_HOST_TLS_HTTP", "A host providing the HTTPS Service (a tls web server)", "localhost");
+my $host_tls_cert = getTestParameter("NP_HOST_TLS_CERT", "the common name of the certificate.", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
+my $internet_access = getTestParameter("NP_INTERNET_ACCESS", "Is this system directly connected to the internet?", "yes");
+my $host_tcp_http2 = getTestParameter("NP_HOST_TCP_HTTP2", "A host providing an index page containing the string 'monitoring'", "test.monitoring-plugins.org");
+my $host_tcp_proxy = getTestParameter("NP_HOST_TCP_PROXY", "A host providing a HTTP proxy with CONNECT support", "localhost");
+my $port_tcp_proxy = getTestParameter("NP_PORT_TCP_PROXY", "Port of the proxy with HTTP and CONNECT support", "3128");
my $faketime = -x '/usr/bin/faketime' ? 1 : 0;
$res = NPTest->testCmd(
- "./check_http $host_tcp_http -wt 300 -ct 600"
+ "./$plugin $host_tcp_http -wt 300 -ct 600"
);
cmp_ok( $res->return_code, '==', 0, "Webserver $host_tcp_http responded" );
like( $res->output, $successOutput, "Output OK" );
$res = NPTest->testCmd(
- "./check_http $host_tcp_http -wt 300 -ct 600 -v -v -v -k 'bob:there' -k 'carl:frown'"
+ "./$plugin $host_tcp_http -wt 300 -ct 600 -v -v -v -k 'bob:there' -k 'carl:frown'"
);
like( $res->output, '/bob:there\r\ncarl:frown\r\n/', "Got headers with multiple -k options" );
$res = NPTest->testCmd(
- "./check_http $host_nonresponsive -wt 1 -ct 2 -t 3"
+ "./$plugin $host_nonresponsive -wt 1 -ct 2 -t 3"
);
cmp_ok( $res->return_code, '==', 2, "Webserver $host_nonresponsive not responding" );
cmp_ok( $res->output, 'eq', "CRITICAL - Socket timeout after 3 seconds", "Output OK");
$res = NPTest->testCmd(
- "./check_http $hostname_invalid -wt 1 -ct 2"
+ "./$plugin $hostname_invalid -wt 1 -ct 2"
);
cmp_ok( $res->return_code, '==', 2, "Webserver $hostname_invalid not valid" );
# The first part of the message comes from the OS catalogue, so cannot check this.
@@ -65,87 +57,87 @@ cmp_ok( $res->return_code, '==', 2, "Webserver $hostname_invalid not valid" );
like( $res->output, "/Unable to open TCP socket|Socket timeout after/", "Output OK");
# host header checks
-$res = NPTest->testCmd("./check_http -v -H $host_tcp_http");
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http");
like( $res->output, '/^Host: '.$host_tcp_http.'\s*$/ms', "Host Header OK" );
-$res = NPTest->testCmd("./check_http -v -H $host_tcp_http -p 80");
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http -p 80");
like( $res->output, '/^Host: '.$host_tcp_http.'\s*$/ms', "Host Header OK" );
-$res = NPTest->testCmd("./check_http -v -H $host_tcp_http:8080 -p 80");
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http:8080 -p 80");
like( $res->output, '/^Host: '.$host_tcp_http.':8080\s*$/ms', "Host Header OK" );
-$res = NPTest->testCmd("./check_http -v -H $host_tcp_http:8080 -p 80");
+$res = NPTest->testCmd("./$plugin -v -H $host_tcp_http:8080 -p 80");
like( $res->output, '/^Host: '.$host_tcp_http.':8080\s*$/ms', "Host Header OK" );
SKIP: {
skip "No internet access", 3 if $internet_access eq "no";
- $res = NPTest->testCmd("./check_http -v -H www.verisign.com -S");
- like( $res->output, '/^Host: www.verisign.com\s*$/ms', "Host Header OK" );
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http -S");
+ like( $res->output, '/^Host: '.$host_tls_http.'\s*$/ms', "Host Header OK" );
- $res = NPTest->testCmd("./check_http -v -H www.verisign.com:8080 -S -p 443");
- like( $res->output, '/^Host: www.verisign.com:8080\s*$/ms', "Host Header OK" );
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http:8080 -S -p 443");
+ like( $res->output, '/^Host: '.$host_tls_http.':8080\s*$/ms', "Host Header OK" );
- $res = NPTest->testCmd("./check_http -v -H www.verisign.com:443 -S -p 443");
- like( $res->output, '/^Host: www.verisign.com\s*$/ms', "Host Header OK" );
+ $res = NPTest->testCmd("./$plugin -v -H $host_tls_http:443 -S -p 443");
+ like( $res->output, '/^Host: '.$host_tls_http.'\s*$/ms', "Host Header OK" );
};
SKIP: {
skip "No host serving monitoring in index file", 7 unless $host_tcp_http2;
- $res = NPTest->testCmd( "./check_http -H $host_tcp_http2 -r 'monitoring'" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'monitoring'" );
cmp_ok( $res->return_code, "==", 0, "Got a reference to 'monitoring'");
- $res = NPTest->testCmd( "./check_http -H $host_tcp_http2 -r 'mONiTORing'" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'mONiTORing'" );
cmp_ok( $res->return_code, "==", 2, "Not got 'mONiTORing'");
like ( $res->output, "/pattern not found/", "Error message says 'pattern not found'");
- $res = NPTest->testCmd( "./check_http -H $host_tcp_http2 -R 'mONiTORing'" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -R 'mONiTORing'" );
cmp_ok( $res->return_code, "==", 0, "But case insensitive doesn't mind 'mONiTORing'");
- $res = NPTest->testCmd( "./check_http -H $host_tcp_http2 -r 'monitoring' --invert-regex" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'monitoring' --invert-regex" );
cmp_ok( $res->return_code, "==", 2, "Invert results work when found");
like ( $res->output, "/pattern found/", "Error message says 'pattern found'");
- $res = NPTest->testCmd( "./check_http -H $host_tcp_http2 -r 'mONiTORing' --invert-regex" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tcp_http2 -r 'mONiTORing' --invert-regex" );
cmp_ok( $res->return_code, "==", 0, "And also when not found");
}
SKIP: {
- skip "No internet access", 16 if $internet_access eq "no";
+ skip "No internet access", 23 if $internet_access eq "no";
$res = NPTest->testCmd(
- "./check_http --ssl www.verisign.com"
+ "./$plugin --ssl $host_tls_http"
);
- cmp_ok( $res->return_code, '==', 0, "Can read https for www.verisign.com" );
+ cmp_ok( $res->return_code, '==', 0, "Can read https for $host_tls_http" );
- $res = NPTest->testCmd( "./check_http -C 1 --ssl www.verisign.com" );
- cmp_ok( $res->return_code, '==', 0, "Checking certificate for www.verisign.com");
- like ( $res->output, "/Certificate 'www.verisign.com' will expire on/", "Output OK" );
+ $res = NPTest->testCmd( "./$plugin -C 1 --ssl $host_tls_http" );
+ cmp_ok( $res->return_code, '==', 0, "Checking certificate for $host_tls_http");
+ like ( $res->output, "/Certificate '$host_tls_cert' will expire on/", "Output OK" );
my $saved_cert_output = $res->output;
- $res = NPTest->testCmd( "./check_http -C 8000,1 --ssl www.verisign.com" );
- cmp_ok( $res->return_code, '==', 1, "Checking certificate for www.verisign.com");
- like ( $res->output, qr/WARNING - Certificate 'www.verisign.com' expires in \d+ day/, "Output Warning" );
+ $res = NPTest->testCmd( "./$plugin -C 8000,1 --ssl $host_tls_http" );
+ cmp_ok( $res->return_code, '==', 1, "Checking certificate for $host_tls_http");
+ like ( $res->output, qr/WARNING - Certificate '$host_tls_cert' expires in \d+ day/, "Output Warning" );
- $res = NPTest->testCmd( "./check_http www.verisign.com -C 1" );
+ $res = NPTest->testCmd( "./$plugin $host_tls_http -C 1" );
is( $res->return_code, 0, "Old syntax for cert checking okay" );
is( $res->output, $saved_cert_output, "Same output as new syntax" );
- $res = NPTest->testCmd( "./check_http -H www.verisign.com -C 1" );
+ $res = NPTest->testCmd( "./$plugin -H $host_tls_http -C 1" );
is( $res->return_code, 0, "Updated syntax for cert checking okay" );
is( $res->output, $saved_cert_output, "Same output as new syntax" );
- $res = NPTest->testCmd( "./check_http -C 1 www.verisign.com" );
+ $res = NPTest->testCmd( "./$plugin -C 1 $host_tls_http" );
cmp_ok( $res->output, 'eq', $saved_cert_output, "--ssl option automatically added");
- $res = NPTest->testCmd( "./check_http www.verisign.com -C 1" );
+ $res = NPTest->testCmd( "./$plugin $host_tls_http -C 1" );
cmp_ok( $res->output, 'eq', $saved_cert_output, "Old syntax for cert checking still works");
# run some certificate checks with faketime
SKIP: {
- skip "No faketime binary found", 12 if !$faketime;
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC ./check_http -C 1 www.verisign.com");
- like($res->output, qr/OK - Certificate 'www.verisign.com' will expire on/, "Catch cert output");
+ skip "No faketime binary found", 7 if !$faketime;
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/OK - Certificate '$host_tls_cert' will expire on/, "Catch cert output");
is( $res->return_code, 0, "Catch cert output exit code" );
my($mon,$day,$hour,$min,$sec,$year) = ($res->output =~ /(\w+)\s+(\d+)\s+(\d+):(\d+):(\d+)\s+(\d+)/);
if(!defined $year) {
@@ -154,40 +146,51 @@ SKIP: {
my $months = {'Jan' => 0, 'Feb' => 1, 'Mar' => 2, 'Apr' => 3, 'May' => 4, 'Jun' => 5, 'Jul' => 6, 'Aug' => 7, 'Sep' => 8, 'Oct' => 9, 'Nov' => 10, 'Dec' => 11};
my $ts = mktime($sec, $min, $hour, $day, $months->{$mon}, $year-1900);
my $time = strftime("%Y-%m-%d %H:%M:%S", localtime($ts));
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_http -C 1 www.verisign.com");
- like($res->output, qr/CRITICAL - Certificate 'www.verisign.com' just expired/, "Output on expire date");
- is( $res->return_code, 2, "Output on expire date" );
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' just expired/, "Output on expire date");
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-1))."' ./check_http -C 1 www.verisign.com");
- like($res->output, qr/CRITICAL - Certificate 'www.verisign.com' expires in 0 minutes/, "cert expires in 1 second output");
- is( $res->return_code, 2, "cert expires in 1 second exit code" );
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-1))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 0 minutes/, "cert expires in 1 second output");
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-120))."' ./check_http -C 1 www.verisign.com");
- like($res->output, qr/CRITICAL - Certificate 'www.verisign.com' expires in 2 minutes/, "cert expires in 2 minutes output");
- is( $res->return_code, 2, "cert expires in 2 minutes exit code" );
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-120))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 minutes/, "cert expires in 2 minutes output");
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-7200))."' ./check_http -C 1 www.verisign.com");
- like($res->output, qr/CRITICAL - Certificate 'www.verisign.com' expires in 2 hours/, "cert expires in 2 hours output");
- is( $res->return_code, 2, "cert expires in 2 hours exit code" );
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts-7200))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expires in 2 hours/, "cert expires in 2 hours output");
- $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_http -C 1 www.verisign.com");
- like($res->output, qr/CRITICAL - Certificate 'www.verisign.com' expired on/, "Certificate expired output");
- is( $res->return_code, 2, "Certificate expired exit code" );
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./$plugin -C 1 $host_tls_http");
+ like($res->output, qr/CRITICAL - Certificate '$host_tls_cert' expired on/, "Certificate expired output");
};
- $res = NPTest->testCmd( "./check_http --ssl www.verisign.com -E" );
+ $res = NPTest->testCmd( "./$plugin --ssl $host_tls_http -E" );
like ( $res->output, '/time_connect=[\d\.]+/', 'Extended Performance Data Output OK' );
like ( $res->output, '/time_ssl=[\d\.]+/', 'Extended Performance Data SSL Output OK' );
$res = NPTest->testCmd(
- "./check_http --ssl -H www.e-paycobalt.com"
+ "./$plugin --ssl -H www.e-paycobalt.com"
);
cmp_ok( $res->return_code, "==", 0, "Can read https for www.e-paycobalt.com (uses AES certificate)" );
- $res = NPTest->testCmd( "./check_http -H www.mozilla.com -u /firefox -f follow" );
+ $res = NPTest->testCmd( "./$plugin -H www.mozilla.com -u /firefox -f follow" );
is( $res->return_code, 0, "Redirection based on location is okay");
- $res = NPTest->testCmd( "./check_http -H www.mozilla.com --extended-perfdata" );
+ $res = NPTest->testCmd( "./$plugin -H www.mozilla.com --extended-perfdata" );
like ( $res->output, '/time_connect=[\d\.]+/', 'Extended Performance Data Output OK' );
}
+
+SKIP: {
+ skip "No internet access or proxy configured", 6 if $internet_access eq "no" or ! $host_tcp_proxy;
+
+ $res = NPTest->testCmd( "./$plugin -I $host_tcp_proxy -p $port_tcp_proxy -u http://$host_tcp_http -e 200,301,302");
+ is( $res->return_code, 0, "Proxy HTTP works");
+ like($res->output, qr/OK: Status line output matched/, "Proxy HTTP Output is sufficent");
+
+ $res = NPTest->testCmd( "./$plugin -I $host_tcp_proxy -p $port_tcp_proxy -H $host_tls_http -S -j CONNECT");
+ is( $res->return_code, 0, "Proxy HTTP CONNECT works");
+ like($res->output, qr/HTTP OK:/, "Proxy HTTP CONNECT output sufficent");
+
+ $res = NPTest->testCmd( "./$plugin -I $host_tcp_proxy -p $port_tcp_proxy -H $host_tls_http -S -j CONNECT:HEAD");
+ is( $res->return_code, 0, "Proxy HTTP CONNECT works with override method");
+ like($res->output, qr/HTTP OK:/, "Proxy HTTP CONNECT output sufficent");
+}
diff --git a/plugins/t/check_imap.t b/plugins/t/check_imap.t
index 9c6eae1f..7c74e564 100644
--- a/plugins/t/check_imap.t
+++ b/plugins/t/check_imap.t
@@ -8,17 +8,10 @@ use strict;
use Test::More tests => 7;
use NPTest;
-my $host_tcp_smtp = getTestParameter( "host_tcp_smtp", "NP_HOST_TCP_SMTP", "mailhost",
- "A host providing an STMP Service (a mail server)");
-
-my $host_tcp_imap = getTestParameter( "host_tcp_imap", "NP_HOST_TCP_IMAP", $host_tcp_smtp,
- "A host providing an IMAP Service (a mail server)");
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
+my $host_tcp_smtp = getTestParameter("NP_HOST_TCP_SMTP", "A host providing an STMP Service (a mail server)", "mailhost");
+my $host_tcp_imap = getTestParameter("NP_HOST_TCP_IMAP", "A host providing an IMAP Service (a mail server)", $host_tcp_smtp);
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
my $t;
diff --git a/plugins/t/check_jabber.t b/plugins/t/check_jabber.t
index 7a708d5b..fcdae179 100644
--- a/plugins/t/check_jabber.t
+++ b/plugins/t/check_jabber.t
@@ -10,23 +10,9 @@ use NPTest;
plan tests => 10;
-my $host_tcp_jabber = getTestParameter(
- "NP_HOST_TCP_JABBER",
- "A host providing the Jabber Service",
- "jabber.org"
- );
-
-my $host_nonresponsive = getTestParameter(
- "NP_HOST_NONRESPONSIVE",
- "The hostname of system not responsive to network requests",
- "10.0.0.1",
- );
-
-my $hostname_invalid = getTestParameter(
- "NP_HOSTNAME_INVALID",
- "An invalid (not known to DNS) hostname",
- "nosuchhost",
- );
+my $host_tcp_jabber = getTestParameter("NP_HOST_TCP_JABBER", "A host providing the Jabber Service", "jabber.de");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
my $jabberOK = '/JABBER OK\s-\s\d+\.\d+\ssecond response time on '.$host_tcp_jabber.' port 5222/';
diff --git a/plugins/t/check_ldap.t b/plugins/t/check_ldap.t
index b8944d4b..b8a4a766 100644
--- a/plugins/t/check_ldap.t
+++ b/plugins/t/check_ldap.t
@@ -9,19 +9,10 @@ use warnings;
use Test::More;
use NPTest;
-my $host_tcp_ldap = getTestParameter("NP_HOST_TCP_LDAP",
- "A host providing the LDAP Service",
- "localhost" );
-
-my $ldap_base_dn = getTestParameter("NP_LDAP_BASE_DN",
- "A base dn for the LDAP Service",
- "cn=admin" );
-
-my $host_nonresponsive = getTestParameter("host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter("hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
+my $host_tcp_ldap = getTestParameter("NP_HOST_TCP_LDAP", "A host providing the LDAP Service", "localhost");
+my $ldap_base_dn = getTestParameter("NP_LDAP_BASE_DN", "A base dn for the LDAP Service", "cn=admin");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
my($result, $cmd);
my $command = './check_ldap';
diff --git a/plugins/t/check_load.t b/plugins/t/check_load.t
index 55f6f752..60837ef6 100644
--- a/plugins/t/check_load.t
+++ b/plugins/t/check_load.t
@@ -11,8 +11,8 @@ use NPTest;
my $res;
my $loadValue = "[0-9]+\.?[0-9]+";
-my $successOutput = "/^OK - load average: $loadValue, $loadValue, $loadValue/";
-my $failureOutput = "/^CRITICAL - load average: $loadValue, $loadValue, $loadValue/";
+my $successOutput = "/^LOAD OK - load average: $loadValue, $loadValue, $loadValue/";
+my $failureOutput = "/^LOAD CRITICAL - load average: $loadValue, $loadValue, $loadValue/";
plan tests => 11;
diff --git a/plugins/t/check_mysql.t b/plugins/t/check_mysql.t
index 28cd4cd0..e426bf59 100644
--- a/plugins/t/check_mysql.t
+++ b/plugins/t/check_mysql.t
@@ -21,30 +21,11 @@ plan skip_all => "check_mysql not compiled" unless (-x "check_mysql");
plan tests => 15;
my $bad_login_output = '/Access denied for user /';
-my $mysqlserver = getTestParameter(
- "NP_MYSQL_SERVER",
- "A MySQL Server hostname or IP with no slaves setup"
- );
-my $mysqlsocket = getTestParameter(
- "NP_MYSQL_SOCKET",
- "Full path to a MySQL Server socket with no slaves setup"
- );
-my $mysql_login_details = getTestParameter(
- "MYSQL_LOGIN_DETAILS",
- "Command line parameters to specify login access (requires " .
- "REPLICATION CLIENT privleges)",
- "-u test -ptest",
- );
-my $with_slave = getTestParameter(
- "NP_MYSQL_WITH_SLAVE",
- "MySQL server with slaves setup"
- );
-my $with_slave_login = getTestParameter(
- "NP_MYSQL_WITH_SLAVE_LOGIN",
- "Login details for server with slave (requires REPLICATION CLIENT " .
- "privleges)",
- $mysql_login_details || "-u test -ptest"
- );
+my $mysqlserver = getTestParameter("NP_MYSQL_SERVER", "A MySQL Server hostname or IP with no slaves setup");
+my $mysqlsocket = getTestParameter("NP_MYSQL_SOCKET", "Full path to a MySQL Server socket with no slaves setup");
+my $mysql_login_details = getTestParameter("NP_MYSQL_LOGIN_DETAILS", "Command line parameters to specify login access (requires REPLICATION CLIENT privleges)", "-u test -ptest");
+my $with_slave = getTestParameter("NP_MYSQL_WITH_SLAVE", "MySQL server with slaves setup");
+my $with_slave_login = getTestParameter("NP_MYSQL_WITH_SLAVE_LOGIN", "Login details for server with slave (requires REPLICATION CLIENT privleges)", $mysql_login_details || "-u test -ptest");
my $result;
diff --git a/plugins/t/check_mysql_query.t b/plugins/t/check_mysql_query.t
index 407af881..96899ac6 100644
--- a/plugins/t/check_mysql_query.t
+++ b/plugins/t/check_mysql_query.t
@@ -17,15 +17,8 @@ use vars qw($tests);
plan skip_all => "check_mysql_query not compiled" unless (-x "check_mysql_query");
-my $mysqlserver = getTestParameter(
- "NP_MYSQL_SERVER",
- "A MySQL Server with no slaves setup"
- );
-my $mysql_login_details = getTestParameter(
- "MYSQL_LOGIN_DETAILS",
- "Command line parameters to specify login access",
- "-u user -ppw -d db",
- );
+my $mysqlserver = getTestParameter("NP_MYSQL_SERVER", "A MySQL Server with no slaves setup");
+my $mysql_login_details = getTestParameter("NP_MYSQL_LOGIN_DETAILS", "Command line parameters to specify login access", "-u user -ppw -d db");
my $result;
if (! $mysqlserver) {
diff --git a/plugins/t/check_snmp.t b/plugins/t/check_snmp.t
index eff46ea1..f2f218fd 100644
--- a/plugins/t/check_snmp.t
+++ b/plugins/t/check_snmp.t
@@ -15,18 +15,12 @@ BEGIN {
my $res;
-my $host_snmp = getTestParameter( "host_snmp", "NP_HOST_SNMP", "localhost",
- "A host providing an SNMP Service");
+my $host_snmp = getTestParameter("NP_HOST_SNMP", "A host providing an SNMP Service", "localhost");
+my $snmp_community = getTestParameter("NP_SNMP_COMMUNITY", "The SNMP Community string for SNMP Testing (assumes snmp v1)", "public");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
+my $user_snmp = getTestParameter("NP_SNMP_USER", "An SNMP user", "auth_md5");
-my $snmp_community = getTestParameter( "snmp_community", "NP_SNMP_COMMUNITY", "public",
- "The SNMP Community string for SNMP Testing (assumes snmp v1)" );
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
-my $user_snmp = getTestParameter( "user_snmp", "NP_SNMP_USER", "auth_md5", "An SNMP user");
$res = NPTest->testCmd( "./check_snmp -t 1" );
is( $res->return_code, 3, "No host name" );
@@ -154,9 +148,9 @@ SKIP: {
cmp_ok( $res->return_code, '==', 0, "Timetick used as a string");
like($res->output, '/^SNMP OK - Timeticks:\s\(\d+\)\s+(?:\d+ days?,\s+)?\d+:\d+:\d+\.\d+\s.*$/', "Timetick used as a string, result printed rather than parsed");
- $res = NPTest->testCmd( "./check_snmp -H $host_snmp -C $snmp_community -o HOST-RESOURCES-MIB::hrSWRunParameters.1");
- cmp_ok( $res->return_code, '==', 0, "Timetick used as a string");
- is( $res->output, 'SNMP OK - "" | ', "snmp response without datatype" );
+ $res = NPTest->testCmd( "./check_snmp -H $host_snmp -C $snmp_community -o HOST-RESOURCES-MIB::hrSWRunName.1");
+ cmp_ok( $res->return_code, '==', 0, "snmp response without datatype");
+ like( $res->output, '/^SNMP OK - "(systemd|init)" \| $/', "snmp response without datatype" );
}
SKIP: {
diff --git a/plugins/t/check_ssh.t b/plugins/t/check_ssh.t
index 80083492..a5cd23ce 100644
--- a/plugins/t/check_ssh.t
+++ b/plugins/t/check_ssh.t
@@ -9,17 +9,9 @@ use Test::More;
use NPTest;
# Required parameters
-my $ssh_host = getTestParameter("NP_SSH_HOST",
- "A host providing SSH service",
- "localhost");
-
-my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE",
- "The hostname of system not responsive to network requests",
- "10.0.0.1" );
-
-my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID",
- "An invalid (not known to DNS) hostname",
- "nosuchhost" );
+my $ssh_host = getTestParameter("NP_SSH_HOST", "A host providing SSH service", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1" );
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost" );
plan skip_all => "SSH_HOST must be defined" unless $ssh_host;
diff --git a/plugins/t/check_swap.t b/plugins/t/check_swap.t
index e44adc90..de9e0f05 100644
--- a/plugins/t/check_swap.t
+++ b/plugins/t/check_swap.t
@@ -8,9 +8,9 @@ use strict;
use Test::More tests => 8;
use NPTest;
-my $successOutput = '/^SWAP OK - [0-9]+\% free \([0-9]+ MB out of [0-9]+ MB\)/';
-my $failureOutput = '/^SWAP CRITICAL - [0-9]+\% free \([0-9]+ MB out of [0-9]+ MB\)/';
-my $warnOutput = '/^SWAP WARNING - [0-9]+\% free \([0-9]+ MB out of [0-9]+ MB\)/';
+my $successOutput = '/^SWAP OK - [0-9]+\% free \([0-9]+MB out of [0-9]+MB\)/';
+my $failureOutput = '/^SWAP CRITICAL - [0-9]+\% free \([0-9]+MB out of [0-9]+MB\)/';
+my $warnOutput = '/^SWAP WARNING - [0-9]+\% free \([0-9]+MB out of [0-9]+MB\)/';
my $result;
diff --git a/plugins/t/check_tcp.t b/plugins/t/check_tcp.t
index f996685d..cb4de53d 100644
--- a/plugins/t/check_tcp.t
+++ b/plugins/t/check_tcp.t
@@ -15,18 +15,11 @@ BEGIN {
}
-my $host_tcp_http = getTestParameter( "host_tcp_http", "NP_HOST_TCP_HTTP", "localhost",
- "A host providing the HTTP Service (a web server)" );
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
-
-my $internet_access = getTestParameter( "NP_INTERNET_ACCESS",
- "Is this system directly connected to the internet?",
- "yes");
+my $host_tcp_http = getTestParameter("NP_HOST_TCP_HTTP", "A host providing the HTTP Service (a web server)", "localhost");
+my $host_tls_http = getTestParameter("NP_HOST_TLS_HTTP", "A host providing the HTTPS Service (a tls web server)", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
+my $internet_access = getTestParameter("NP_INTERNET_ACCESS", "Is this system directly connected to the internet?", "yes");
my $successOutput = '/^TCP OK\s-\s+[0-9]?\.?[0-9]+ second response time on port [0-9]+/';
@@ -42,10 +35,10 @@ $t += checkCmd( "./check_tcp $host_tcp_http -p 81 -wt 0 -ct 0 -to 1", 2
$t += checkCmd( "./check_tcp $host_nonresponsive -p 80 -wt 0 -ct 0 -to 1", 2 );
$t += checkCmd( "./check_tcp $hostname_invalid -p 80 -wt 0 -ct 0 -to 1", 2 );
if($internet_access ne "no") {
- $t += checkCmd( "./check_tcp -S -D 1 -H www.verisign.com -p 443", 0 );
- $t += checkCmd( "./check_tcp -S -D 9000,1 -H www.verisign.com -p 443", 1 );
- $t += checkCmd( "./check_tcp -S -D 9000 -H www.verisign.com -p 443", 1 );
- $t += checkCmd( "./check_tcp -S -D 9000,8999 -H www.verisign.com -p 443", 2 );
+ $t += checkCmd( "./check_tcp -S -D 1 -H $host_tls_http -p 443", 0 );
+ $t += checkCmd( "./check_tcp -S -D 9000,1 -H $host_tls_http -p 443", 1 );
+ $t += checkCmd( "./check_tcp -S -D 9000 -H $host_tls_http -p 443", 1 );
+ $t += checkCmd( "./check_tcp -S -D 9000,8999 -H $host_tls_http -p 443", 2 );
}
# Need the \r\n to make it more standards compliant with web servers. Need the various quotes
diff --git a/plugins/t/check_time.t b/plugins/t/check_time.t
index 961f56e6..92c2f891 100644
--- a/plugins/t/check_time.t
+++ b/plugins/t/check_time.t
@@ -11,14 +11,9 @@ use NPTest;
use vars qw($tests);
BEGIN {$tests = 8; plan tests => $tests}
-my $host_udp_time = getTestParameter( "host_udp_time", "NP_HOST_UDP_TIME", "localhost",
- "A host providing the UDP Time Service" );
-
-my $host_nonresponsive = getTestParameter( "host_nonresponsive", "NP_HOST_NONRESPONSIVE", "10.0.0.1",
- "The hostname of system not responsive to network requests" );
-
-my $hostname_invalid = getTestParameter( "hostname_invalid", "NP_HOSTNAME_INVALID", "nosuchhost",
- "An invalid (not known to DNS) hostname" );
+my $host_udp_time = getTestParameter("NP_HOST_UDP_TIME", "A host providing the UDP Time Service", "localhost");
+my $host_nonresponsive = getTestParameter("NP_HOST_NONRESPONSIVE", "The hostname of system not responsive to network requests", "10.0.0.1");
+my $hostname_invalid = getTestParameter("NP_HOSTNAME_INVALID", "An invalid (not known to DNS) hostname", "nosuchhost");
my $successOutput = '/^TIME OK - [0-9]+ second time difference/';
diff --git a/plugins/t/check_udp.t b/plugins/t/check_udp.t
index 1f6fee70..6c47d095 100644
--- a/plugins/t/check_udp.t
+++ b/plugins/t/check_udp.t
@@ -34,12 +34,12 @@ my $nc;
if(system("which nc.traditional >/dev/null 2>&1") == 0) {
$nc = 'nc.traditional -w 3 -l -u -p 3333';
}
-elsif(system("which netcat >/dev/null 2>&1") == 0) {
- $nc = 'netcat -w 3 -l -u -p 3333';
-}
elsif(system("which nc >/dev/null 2>&1") == 0) {
$nc = 'nc -w 3 -l -u -4 localhost 3333';
}
+elsif(system("which netcat >/dev/null 2>&1") == 0) {
+ $nc = 'netcat -w 3 -l -u -p 3333';
+}
SKIP: {
skip "solaris netcat does not listen to udp", 6 if $^O eq 'solaris';
diff --git a/plugins/tests/certs/expired-cert.pem b/plugins/tests/certs/expired-cert.pem
index 40324cf8..77a9166e 100644
--- a/plugins/tests/certs/expired-cert.pem
+++ b/plugins/tests/certs/expired-cert.pem
@@ -1,21 +1,24 @@
-----BEGIN CERTIFICATE-----
-MIIDYzCCAsygAwIBAgIJAJISzcX71f5pMA0GCSqGSIb3DQEBBAUAMH8xCzAJBgNV
-BAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIxFzAV
-BgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwGCSqG
-SIb3DQEJARYPdG9udm9vbkBtYWMuY29tMB4XDTA5MDMwNjAwMTMxNVoXDTA5MDMw
-NTAwMTMxNlowfzELMAkGA1UEBhMCVUsxEzARBgNVBAgTCkRlcmJ5c2hpcmUxDzAN
-BgNVBAcTBkJlbHBlcjEXMBUGA1UEChMOTmFnaW9zIFBsdWdpbnMxETAPBgNVBAMT
-CFRvbiBWb29uMR4wHAYJKoZIhvcNAQkBFg90b252b29uQG1hYy5jb20wgZ8wDQYJ
-KoZIhvcNAQEBBQADgY0AMIGJAoGBAOQHP4JnzACi4q6quXAiK+gTSffG6yyjEV+K
-iyutRgBF2MdF03X5ls0wENw/5fnMTrHynl4XoGoV/rD4CR2hGT0m7dv7Vu0MRLlP
-J1SCiFeMuQS30zzLMJr0A7IW869qRlKQmzxs1JT6XDbSoNQuF154zoxwNsKlMjoX
-tJSHN2YpAgMBAAGjgeYwgeMwHQYDVR0OBBYEFHWjM9OQldrDLMcAfPnUVfGxlzOp
-MIGzBgNVHSMEgaswgaiAFHWjM9OQldrDLMcAfPnUVfGxlzOpoYGEpIGBMH8xCzAJ
-BgNVBAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIx
-FzAVBgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwG
-CSqGSIb3DQEJARYPdG9udm9vbkBtYWMuY29tggkAkhLNxfvV/mkwDAYDVR0TBAUw
-AwEB/zANBgkqhkiG9w0BAQQFAAOBgQDHjoXoGwBamCiNplTt93jH/TO08RATdZP5
-45hlxv2+PKCjjTiFa2mjAvopFiqmYsr40XYEmpeYMiaOzOW5rBjtqBAT/JJWyfda
-SCmj3swqyKus63rv/iuokIhZzBdhbB+eOJJrmwT2SEc5KdRaipH0QAGF1nZAAGzo
-6xW7hkzYog==
+MIIEETCCAvmgAwIBAgIUFDsP6WnV/uqeQMpD/DYSqouE13kwDQYJKoZIhvcNAQEL
+BQAwgZcxCzAJBgNVBAYTAkRFMRAwDgYDVQQIDAdCYXZhcmlhMQ8wDQYDVQQHDAZN
+dW5pY2gxGzAZBgNVBAoMEk1vbml0b3JpbmcgUGx1Z2luczEbMBkGA1UEAwwSTW9u
+aXRvcmluZyBQbHVnaW5zMSswKQYJKoZIhvcNAQkBFhxkZXZlbEBtb25pdG9yaW5n
+LXBsdWdpbnMub3JnMB4XDTA4MDEwMTExMDAyNloXDTA4MDEwMjExMDAyNlowgZcx
+CzAJBgNVBAYTAkRFMRAwDgYDVQQIDAdCYXZhcmlhMQ8wDQYDVQQHDAZNdW5pY2gx
+GzAZBgNVBAoMEk1vbml0b3JpbmcgUGx1Z2luczEbMBkGA1UEAwwSTW9uaXRvcmlu
+ZyBQbHVnaW5zMSswKQYJKoZIhvcNAQkBFhxkZXZlbEBtb25pdG9yaW5nLXBsdWdp
+bnMub3JnMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyeHKwKFjJWUX
+YHKsisypUf9dHlIPQAISyGP1BX6UL26ZLvE6kKbx3LFQ9W2POGoQWlzFiB1soGeV
+WDd0U0JtWdCKmOXWdcXpupQlTSUtRCMDQkfqLN8GR5TBTd73rezp5mz08nMfLwu0
+p5VQ191Ui8JHFgrAOalAn8Uw5De8vj4VmTXmU5NJ2UFoC0ddU/Th/lwRCayHc1cn
+MVq2F7c/uhMUUQYNBmJy0pxoHawp+j9NKl/xIYsjgQNgahQyNuswuGHjaEwhPu+7
+G03XsW4ehu+H1898M/MkSln6LQAU1syoJ8ypPM8tV+zgx4uwj7udnZ2hceN95uW7
+0PWg5DQyUwIDAQABo1MwUTAdBgNVHQ4EFgQUt9ps3KJ1XiMuy/ijFBjMzf6jgwkw
+HwYDVR0jBBgwFoAUt9ps3KJ1XiMuy/ijFBjMzf6jgwkwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAQEAVPBZwMHbrnHFbmhbcPuvYd5cxk0uSVNAUzsl
+2biCq5P+ZHo10VHGygXtdV4utqk/IrAt2u5qSxycWPStCtAgTd3Q8ncfjOkaHM4z
+2bxTkhLyQeU8NWPuDBqDszo2GOaFTv+lm36LEKiAfqB1tjQVePSkycdrWIhkamBV
+EgMe6uHLdU7QQk1ajQfrBdakN1beqki/dKieA6gm+XF/QS4SSYINmsHB/2X5cT9U
+b/KMB8xurCnuJQuk1P4VsSkJCOSeHjWZgK9pKNdsIJZr4wDVfhjQgU0XT6xakSf7
+eCaHtO0VKsbLZoiTmpxidjsdYiXyeKYIQNtUpTjyJ5V/cZsq9w==
-----END CERTIFICATE-----
diff --git a/plugins/tests/certs/expired-key.pem b/plugins/tests/certs/expired-key.pem
index af0e24da..c1510b2d 100644
--- a/plugins/tests/certs/expired-key.pem
+++ b/plugins/tests/certs/expired-key.pem
@@ -1,15 +1,28 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQDkBz+CZ8wAouKuqrlwIivoE0n3xussoxFfiosrrUYARdjHRdN1
-+ZbNMBDcP+X5zE6x8p5eF6BqFf6w+AkdoRk9Ju3b+1btDES5TydUgohXjLkEt9M8
-yzCa9AOyFvOvakZSkJs8bNSU+lw20qDULhdeeM6McDbCpTI6F7SUhzdmKQIDAQAB
-AoGARgI3rHjjuDpKMGg4IMZNBqaNaiZHY9/44IVvrww21rSbFqtIfgsQEpU0R/rS
-R7xDWPztRGQqmwd/t6OfYNpqHbjO1MWzasVBVnzue5P59Y1xy1h0LZF8+a9GY++0
-uAGUC24jsXSmypNVzoX+ZKyinA3oYV/etdPYx1W8Ms5XIzUCQQD7xwhMuLok6Kbq
-UEgiSfBTbx+haP3IiqqMF14z8QoEyD3jchydNaXEYdQxN8jEl2aPrMqTc6x8Jq4/
-ai0OkB+fAkEA59pAmN81HylV7+CsVjLOSbJqzau7NDxSs2uutxhHZRwz0e25wVer
-fA03l08u0ebC/TDHkmHV6ikCryM5HU2FNwJAVZJFzd2S1myEHmr+uTisB49jDrbi
-WkBWypo+mCS6JPnxntXvx7auClq9haTSBY73eqldiFPuMZvr6P2rJqHxPQJBAOTM
-quaxjti7kATy8N73sD9mBKQGju1TgkFxSK+DFCGhnTnToXY9MAtxd6SoDYoyccYu
-dyPrzJAR/IYc+mYCdC0CQDKlZuMPVXEgvGaQapzMQ++5yJRvMZF4tWvONBs0OCE9
-QYarsTi5M20cymMBXHOLZIjqwsni4G/C9kqJSvC75Vg=
------END RSA PRIVATE KEY-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDJ4crAoWMlZRdg
+cqyKzKlR/10eUg9AAhLIY/UFfpQvbpku8TqQpvHcsVD1bY84ahBaXMWIHWygZ5VY
+N3RTQm1Z0IqY5dZ1xem6lCVNJS1EIwNCR+os3wZHlMFN3vet7OnmbPTycx8vC7Sn
+lVDX3VSLwkcWCsA5qUCfxTDkN7y+PhWZNeZTk0nZQWgLR11T9OH+XBEJrIdzVycx
+WrYXtz+6ExRRBg0GYnLSnGgdrCn6P00qX/EhiyOBA2BqFDI26zC4YeNoTCE+77sb
+Tdexbh6G74fXz3wz8yRKWfotABTWzKgnzKk8zy1X7ODHi7CPu52dnaFx433m5bvQ
+9aDkNDJTAgMBAAECggEACrLFfNnQmD24NGs/S4e2/VpsA9xTZI/3kNkDNgxULANP
+aNZtxRajwI9A/BCXQ2UTgsZhzWnJxOJYXrlpl7PweY78mUesysb3MOUC6QisUm0M
+kimfdktHWOnAKLFFLNleN9DUVjjVkTeslijqhNX80f80py1grG2UuCLKCX4OqYIm
+qACE8TMmSZLz42AO96TndNtKplQ8LuGLEmByW95wEfhx3Gm4ckkL7qII/U3DnQXr
+0T+3xLaj+eNJzYDpIFZiw4sNzOuAyCz+4Cc4sPDuMnzquXF+enpkemoycC1RmEpG
+KIDTwmFsc8TrbGV0qifC6fsCrDivdYLqL7R/q3IBQQKBgQDmfvO3VYTEKY8NA+AT
+5s6+7NTxRsXxJUCEhCNBWimSH3EzmBAvrodLY6A0oYg8i81bgNX1I9GPVXJZ/QA7
+ukd84HUIQoGS5Usmo4rp+kz4P6KkLXDemZtWPU5GXxicfajHRQlkbW6St6SpV7IS
+ibJcDADeoiaPL1xvue1ToP/LoQKBgQDgOFHjYpep00gabvjXfYW7vhrg1vVwaKUM
+rf0+UW8Exk4nbBw0eEC2YjxIwzdktlkdbzGaXYULnhg8GnfxYesMOpCLPw1JdB8o
+ixETAFpW5bKrUsjEFRUGhzWnsCSFIQ4smpmtGLTxOQ8AkoDdORY5Z+Wv7JtFF6Do
+PSoblckZcwKBgB3TD3YJesRnHDty5OuuUdIikuslXTd2uoJrFqS+JeLibqNeabnB
+u3/lxDULMbWj4U6VvRmbKOKDC+jY887Gq7lc0cff0yROxwqY3sCnwo3crg7QUmp7
+Nb5S8G3qoCSfndcq96wm/Me/O28uCbycVJfUdchY8uRUHIHYbP0FOBQBAoGBAMgh
+fPX4imaKr1DovDObVkK87EDDnU84GBm5MtDs3qrkVd3aIVK0Aw7HoAdSN58tI12i
+YiPmVVqJQhhjh6tsOuAvZdTj8ngdrbICbrsHFZt6an+A5LIgHyQ0iy+hiPdLCdvG
+ImTeKKMmyr04Bs1upueWVO0xw2VoMbcY4Py+NUEBAoGASQqedfCSKGLT+5lLZrhP
+CbFVMmswEPjBcRb1trcuA09vfExn9FfUNFnnw3i9miprED5kufvAjb+6nduXizKg
+7HQYHCwVvakgtXgbiDMaNgYZcjWm+MdnfiwLJjJTO3DfI1JF2PJ8y9R95DPlAkDm
+xH3OV8KV4UiTEVxS7ksmGzY=
+-----END PRIVATE KEY-----
diff --git a/plugins/tests/certs/server-cert.pem b/plugins/tests/certs/server-cert.pem
index 549e4f7e..b84b91d2 100644
--- a/plugins/tests/certs/server-cert.pem
+++ b/plugins/tests/certs/server-cert.pem
@@ -1,21 +1,24 @@
-----BEGIN CERTIFICATE-----
-MIIDYzCCAsygAwIBAgIJAL8LkpNwzYdxMA0GCSqGSIb3DQEBBAUAMH8xCzAJBgNV
-BAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIxFzAV
-BgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwGCSqG
-SIb3DQEJARYPdG9udm9vbkBtYWMuY29tMB4XDTA5MDMwNTIxNDEyOFoXDTE5MDMw
-MzIxNDEyOFowfzELMAkGA1UEBhMCVUsxEzARBgNVBAgTCkRlcmJ5c2hpcmUxDzAN
-BgNVBAcTBkJlbHBlcjEXMBUGA1UEChMOTmFnaW9zIFBsdWdpbnMxETAPBgNVBAMT
-CFRvbiBWb29uMR4wHAYJKoZIhvcNAQkBFg90b252b29uQG1hYy5jb20wgZ8wDQYJ
-KoZIhvcNAQEBBQADgY0AMIGJAoGBAKcWMBtNtfY8vZXk0SN6/EYTVN/LOvaOSegy
-oVdLoGwuwjagk+XmCzvCqHZRp8lnCLay7AO8AQI7TSN02ihCcSrgGA9OT+HciIJ1
-l5/kEYUAuA1PR6YKK/T713zUAlMzy2tsugx5+xSsSEwsXkmne52jJiG/wuE5CLT0
-9pF8HQqHAgMBAAGjgeYwgeMwHQYDVR0OBBYEFGioSPQ/rdE19+zaeY2YvHTXlUDI
-MIGzBgNVHSMEgaswgaiAFGioSPQ/rdE19+zaeY2YvHTXlUDIoYGEpIGBMH8xCzAJ
-BgNVBAYTAlVLMRMwEQYDVQQIEwpEZXJieXNoaXJlMQ8wDQYDVQQHEwZCZWxwZXIx
-FzAVBgNVBAoTDk5hZ2lvcyBQbHVnaW5zMREwDwYDVQQDEwhUb24gVm9vbjEeMBwG
-CSqGSIb3DQEJARYPdG9udm9vbkBtYWMuY29tggkAvwuSk3DNh3EwDAYDVR0TBAUw
-AwEB/zANBgkqhkiG9w0BAQQFAAOBgQCdqasaIO6JiV5ONFG6Tr1++85UfEdZKMUX
-N2NHiNNUunolIZEYR+dW99ezKmHlDiQ/tMgoLVYpl2Ubho2pAkLGQR+W0ZASgWQ1
-NjfV27Rv0y6lYQMTA0lVAU93L1x9reo3FMedmL5+H+lIEpLCxEPtAJNISrJOneZB
-W5jDadwkoQ==
+MIIEBjCCAu6gAwIBAgIJANbQ5QQrKhUGMA0GCSqGSIb3DQEBCwUAMIGXMQswCQYD
+VQQGEwJERTEQMA4GA1UECAwHQmF2YXJpYTEPMA0GA1UEBwwGTXVuaWNoMRswGQYD
+VQQKDBJNb25pdG9yaW5nIFBsdWdpbnMxGzAZBgNVBAMMEk1vbml0b3JpbmcgUGx1
+Z2luczErMCkGCSqGSIb3DQEJARYcZGV2ZWxAbW9uaXRvcmluZy1wbHVnaW5zLm9y
+ZzAeFw0xOTAyMTkxNTMxNDRaFw0yOTAyMTYxNTMxNDRaMIGXMQswCQYDVQQGEwJE
+RTEQMA4GA1UECAwHQmF2YXJpYTEPMA0GA1UEBwwGTXVuaWNoMRswGQYDVQQKDBJN
+b25pdG9yaW5nIFBsdWdpbnMxGzAZBgNVBAMMEk1vbml0b3JpbmcgUGx1Z2luczEr
+MCkGCSqGSIb3DQEJARYcZGV2ZWxAbW9uaXRvcmluZy1wbHVnaW5zLm9yZzCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKgV2yp8pQvJuN+aJGdAe6Hd0tja
+uteCPcNIcM92WLOF69TLTSYon1XDon4tHTh4Z5d4lD8bfsGzFVBmDSgWidhAUf+v
+EqEXwbp293ej/Frc0pXCvmrz6kI1tWrLtQhL/VdbxFYxhV7JjKb+PY3SxGFpSLPe
+PQ/5SwVndv7rZIwcjseL22K5Uy2TIrkgzzm2pRs/IvoxRybYr/+LGoHyrtJC6AO8
+ylp8A/etL0gwtUvRnrnZeTQ2pA1uZ5QN3anTL8JP/ZRZYNegIkaawqMtTKbhM6pi
+u3/4a3Uppvt0y7vmGfQlYejxCpICnMrvHMpw8L58zv/98AbCGjDU3UwCt6MCAwEA
+AaNTMFEwHQYDVR0OBBYEFG/UH6nGYPlVcM75UXzXBF5GZyrcMB8GA1UdIwQYMBaA
+FG/UH6nGYPlVcM75UXzXBF5GZyrcMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
+AQELBQADggEBAGwitJPOnlIKLndNf+iCLMIs0dxsl8kAaejFcjoT0n4ja7Y6Zrqz
+VSIidzz9vQWvy24xKJpAOdj/iLRHCUOG+Pf5fA6+/FiuqXr6gE2/lm0eC58BNONr
+E5OzjQ/VoQ8RX4hDntgu6FYbaVa/vhwn16igt9qmdNGGZXf2/+DM3JADwyaA4EK8
+vm7KdofX9zkxXecHPNvf3jiVLPiDDt6tkGpHPEsyP/yc+RUdltUeZvHfliV0cCuC
+jJX+Fm9ysjSpHIFFr+jUMuMHibWoOD8iy3eYxfCDoWsH488pCbj8MNuAq6vd6DBk
+bOZxDz43vjWuYMkwXJTxJQh7Pne6kK0vE1g=
-----END CERTIFICATE-----
diff --git a/plugins/tests/certs/server-key.pem b/plugins/tests/certs/server-key.pem
index eacaeaa3..11947555 100644
--- a/plugins/tests/certs/server-key.pem
+++ b/plugins/tests/certs/server-key.pem
@@ -1,15 +1,28 @@
------BEGIN RSA PRIVATE KEY-----
-MIICWwIBAAKBgQCnFjAbTbX2PL2V5NEjevxGE1Tfyzr2jknoMqFXS6BsLsI2oJPl
-5gs7wqh2UafJZwi2suwDvAECO00jdNooQnEq4BgPTk/h3IiCdZef5BGFALgNT0em
-Civ0+9d81AJTM8trbLoMefsUrEhMLF5Jp3udoyYhv8LhOQi09PaRfB0KhwIDAQAB
-AoGAfpxclcP8N3vteXErXURrd7pcXT0GECDgNjhvc9PV20RPXM+vYs1AA+fMeeQE
-TaRqwO6x016aMRO4rz5ztYArecTBznkds1k59pkN/Ne/nsueU4tvGK8MNyS2o986
-Voohqkaq4Lcy1bcHJb9su1ELjegEr1R76Mz452Hsy+uTbAECQQDcg/tZWKVeh5CQ
-dOEB3YWHwfn0NDgfPm/X2i2kAZ7n7URaUy/ffdlfsrr1mBtHCfedLoOxmmlNfEpM
-hXAAurSHAkEAwfk7fEb0iN0Sj9gTozO7c6Ky10KwePZyjVzqSQIiJq3NX8BEaIeb
-51TXxE5VxaLjjMLRkA0hWTYXClgERFZ6AQJAN7ChPqwzf08PRFwwIw911JY5cOHr
-NoDHMCUql5vNLNdwBruxgGjBB/kUXEfgw60RusFvgt/zLh1wiii844JDawJAGQBF
-sYP3urg7zzx7c3qUe5gJ0wLuefjR1PSX4ecbfb7DDMdcSdjIuG1QDiZGmd2f1KG7
-nwSCOtxk5dloW2KGAQJAQh/iBn0QhfKLFAP5eZBVk8E8XlZuw+S2DLy5SnBlIiYJ
-GB5I2OClgtudXMv1labFrcST8O9eFrtsrhU1iUGUOw==
------END RSA PRIVATE KEY-----
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCoFdsqfKULybjf
+miRnQHuh3dLY2rrXgj3DSHDPdlizhevUy00mKJ9Vw6J+LR04eGeXeJQ/G37BsxVQ
+Zg0oFonYQFH/rxKhF8G6dvd3o/xa3NKVwr5q8+pCNbVqy7UIS/1XW8RWMYVeyYym
+/j2N0sRhaUiz3j0P+UsFZ3b+62SMHI7Hi9tiuVMtkyK5IM85tqUbPyL6MUcm2K//
+ixqB8q7SQugDvMpafAP3rS9IMLVL0Z652Xk0NqQNbmeUDd2p0y/CT/2UWWDXoCJG
+msKjLUym4TOqYrt/+Gt1Kab7dMu75hn0JWHo8QqSApzK7xzKcPC+fM7//fAGwhow
+1N1MArejAgMBAAECggEANuvdTwanTzC8jaNqHaq+OuemS2E9B8nwsGxtH/zFgvNR
+WZiMPtmrJnTkFWJcV+VPw/iMSAqN4nDHmBugVOb4Z4asxGTKK4T9shXJSnh0rqPU
+00ZsvbmxY6z0+E5TesCJqQ+9GYTY1V357V7JchvaOxIRxWPqg9urHbru8OCtW/I5
+Fh5HPUZlgCvlMpjlhyjydIf/oXyVA3RNsXlwe8+2cKuGIrjEzm2j9o3VF0sctTX0
+ItP8A9qDmDQN7GIWX0MW6gncojpS1omC2wcFsdjj/xfPyiDal1X4aq/2YqG8351c
+YlM/+6Va0u9WWE/i64gASTAVqpMV4Yg8y0gGycuA0QKBgQDbgI2QeLd3FvMcURiU
+l3w9qJgw/Jp3jaNC/9LkVGGz4f4lKKB67lPZvI4noMK8GqO/LcXgqP/RY1oJojoA
+/6JKVvzYGASZ7VgMoG9bk1AneP1PGdibuTUEwimGlcObxnDFIC/yjwPFu3jIdqdS
+zZi1RZzyqAogN5y3SBEypSmn9wKBgQDECKsqqlcizmCl8v5aVk875AzGN+DOHZqx
+bkmztlnLO/2e2Fmk3G5Vvnui0FYisf8Eq19tUTQCF6lSfJlGQeFAT119wkFZhLu+
+FfLGqoEMH0ijJg/8PpdpFRK3I94YcISoTNN6yxMvE6xdDGfKCt5a+IX5bwQi9Zdc
+B242gEc6tQKBgA6tM8n7KFlAIZU9HuWgk2AUC8kKutFPmSD7tgAqXDYI4FNfugs+
+MEEYyHCB4UNujJBV4Ss6YZCAkh6eyD4U2aca1eElCfm40vBVMdzvpqZdAqLtWXxg
+D9l3mgszrFaYGCY2Fr6jLV9lP5g3xsxUjudf9jSLY9HvpfzjRrMaNATVAoGBALTl
+/vYfPMucwKlC5B7++J0e4/7iv6vUu9SyHocdZh1anb9AjPDKjXLIlZT4RhQ8R0XK
+0wOw5JpttU2uN08TKkbLNk3/vYhbKVjPLjrQSseh8sjDLgsqw1QwIxYnniLVakVY
+p+rvjSNrNyqicQCMKQavwgocvSd5lJRTMwxOMezlAoGBAKWj71BX+0CK00/2S6lC
+TcNcuUPG0d8y1czZ4q6tUlG4htwq1FMOpaghATXjkdsOGTLS+H1aA0Kt7Ai9zDhc
+/bzOJEJ+jvBXV4Gcs7jl1r/HTKv0tT9ZSI5Vzkida0rfqxDGzcMVlLuCdH0cb8Iu
+N0wdmCAqlQwHR13+F1zrAD7V
+-----END PRIVATE KEY-----
diff --git a/plugins/tests/check_curl.t b/plugins/tests/check_curl.t
new file mode 100755
index 00000000..29cb03f2
--- /dev/null
+++ b/plugins/tests/check_curl.t
@@ -0,0 +1,509 @@
+#! /usr/bin/perl -w -I ..
+#
+# Test check_http by having an actual HTTP server running
+#
+# To create the https server certificate:
+# openssl req -new -x509 -keyout server-key.pem -out server-cert.pem -days 3650 -nodes
+# to create a new expired certificate:
+# faketime '2008-01-01 12:00:00' openssl req -new -x509 -keyout expired-key.pem -out expired-cert.pem -days 1 -nodes
+# Country Name (2 letter code) [AU]:DE
+# State or Province Name (full name) [Some-State]:Bavaria
+# Locality Name (eg, city) []:Munich
+# Organization Name (eg, company) [Internet Widgits Pty Ltd]:Monitoring Plugins
+# Organizational Unit Name (eg, section) []:
+# Common Name (e.g. server FQDN or YOUR name) []:Monitoring Plugins
+# Email Address []:devel@monitoring-plugins.org
+
+use strict;
+use Test::More;
+use NPTest;
+use FindBin qw($Bin);
+
+$ENV{'LC_TIME'} = "C";
+
+my $common_tests = 72;
+my $ssl_only_tests = 8;
+# Check that all dependent modules are available
+eval "use HTTP::Daemon 6.01;";
+plan skip_all => 'HTTP::Daemon >= 6.01 required' if $@;
+eval {
+ require HTTP::Status;
+ require HTTP::Response;
+};
+
+my $plugin = 'check_http';
+$plugin = 'check_curl' if $0 =~ m/check_curl/mx;
+
+# look for libcurl version to see if some advanced checks are possible (>= 7.49.0)
+my $advanced_checks = 12;
+my $use_advanced_checks = 0;
+my $required_version = '7.49.0';
+my $virtual_host = 'www.somefunnyhost.com';
+my $virtual_port = 42;
+my $curl_version = '';
+open (my $fh, '-|', "./$plugin --version") or die;
+while (<$fh>) {
+ if (m{libcurl/([\d.]+)\s}) {
+ $curl_version = $1;
+ last;
+ }
+}
+close ($fh);
+if ($curl_version) {
+ my ($major, $minor, $release) = split (/\./, $curl_version);
+ my ($req_major, $req_minor, $req_release) = split (/\./, $required_version);
+ my $check = ($major <=> $req_major or $minor <=> $req_minor or $release <=> $req_release);
+ if ($check >= 0) {
+ $use_advanced_checks = 1;
+ print "Found libcurl $major.$minor.$release. Using advanced checks\n";
+ }
+}
+
+if ($@) {
+ plan skip_all => "Missing required module for test: $@";
+} else {
+ if (-x "./$plugin") {
+ plan tests => $common_tests * 2 + $ssl_only_tests + $advanced_checks;
+ } else {
+ plan skip_all => "No $plugin compiled";
+ }
+}
+
+my $servers = { http => 0 }; # HTTP::Daemon should always be available
+eval { require HTTP::Daemon::SSL };
+if ($@) {
+ diag "Cannot load HTTP::Daemon::SSL: $@";
+} else {
+ $servers->{https} = 0;
+}
+
+# set a fixed version, so the header size doesn't vary
+$HTTP::Daemon::VERSION = "1.00";
+
+my $port_http = 50000 + int(rand(1000));
+my $port_https = $port_http + 1;
+my $port_https_expired = $port_http + 2;
+
+# This array keeps sockets around for implementing timeouts
+my @persist;
+
+# Start up all servers
+my @pids;
+my $pid = fork();
+if ($pid) {
+ # Parent
+ push @pids, $pid;
+ if (exists $servers->{https}) {
+ # Fork a normal HTTPS server
+ $pid = fork();
+ if ($pid) {
+ # Parent
+ push @pids, $pid;
+ # Fork an expired cert server
+ $pid = fork();
+ if ($pid) {
+ push @pids, $pid;
+ } else {
+ my $d = HTTP::Daemon::SSL->new(
+ LocalPort => $port_https_expired,
+ LocalAddr => "127.0.0.1",
+ SSL_cert_file => "$Bin/certs/expired-cert.pem",
+ SSL_key_file => "$Bin/certs/expired-key.pem",
+ ) || die;
+ print "Please contact https expired at: <URL:", $d->url, ">\n";
+ run_server( $d );
+ exit;
+ }
+ } else {
+ my $d = HTTP::Daemon::SSL->new(
+ LocalPort => $port_https,
+ LocalAddr => "127.0.0.1",
+ SSL_cert_file => "$Bin/certs/server-cert.pem",
+ SSL_key_file => "$Bin/certs/server-key.pem",
+ ) || die;
+ print "Please contact https at: <URL:", $d->url, ">\n";
+ run_server( $d );
+ exit;
+ }
+ }
+} else {
+ # Child
+ #print "child\n";
+ my $d = HTTP::Daemon->new(
+ LocalPort => $port_http,
+ LocalAddr => "127.0.0.1",
+ ) || die;
+ print "Please contact http at: <URL:", $d->url, ">\n";
+ run_server( $d );
+ exit;
+}
+
+# give our webservers some time to startup
+sleep(3);
+
+# Run the same server on http and https
+sub run_server {
+ my $d = shift;
+ MAINLOOP: while (my $c = $d->accept ) {
+ while (my $r = $c->get_request) {
+ if ($r->method eq "GET" and $r->url->path =~ m^/statuscode/(\d+)^) {
+ $c->send_basic_header($1);
+ $c->send_crlf;
+ } elsif ($r->method eq "GET" and $r->url->path =~ m^/file/(.*)^) {
+ $c->send_basic_header;
+ $c->send_crlf;
+ $c->send_file_response("$Bin/var/$1");
+ } elsif ($r->method eq "GET" and $r->url->path eq "/slow") {
+ $c->send_basic_header;
+ $c->send_crlf;
+ sleep 1;
+ $c->send_response("slow");
+ } elsif ($r->url->path eq "/method") {
+ if ($r->method eq "DELETE") {
+ $c->send_error(HTTP::Status->RC_METHOD_NOT_ALLOWED);
+ } elsif ($r->method eq "foo") {
+ $c->send_error(HTTP::Status->RC_NOT_IMPLEMENTED);
+ } else {
+ $c->send_status_line(200, $r->method);
+ }
+ } elsif ($r->url->path eq "/postdata") {
+ $c->send_basic_header;
+ $c->send_crlf;
+ $c->send_response($r->method.":".$r->content);
+ } elsif ($r->url->path eq "/redirect") {
+ $c->send_redirect( "/redirect2" );
+ } elsif ($r->url->path eq "/redir_external") {
+ $c->send_redirect(($d->isa('HTTP::Daemon::SSL') ? "https" : "http") . "://169.254.169.254/redirect2" );
+ } elsif ($r->url->path eq "/redirect2") {
+ $c->send_basic_header;
+ $c->send_crlf;
+ $c->send_response(HTTP::Response->new( 200, 'OK', undef, 'redirected' ));
+ } elsif ($r->url->path eq "/redir_timeout") {
+ $c->send_redirect( "/timeout" );
+ } elsif ($r->url->path eq "/timeout") {
+ # Keep $c from being destroyed, but prevent severe leaks
+ unshift @persist, $c;
+ delete($persist[1000]);
+ next MAINLOOP;
+ } elsif ($r->url->path eq "/header_check") {
+ $c->send_basic_header;
+ $c->send_header('foo');
+ $c->send_crlf;
+ } elsif ($r->url->path eq "/header_broken_check") {
+ $c->send_basic_header;
+ $c->send_header('foo');
+ print $c "Test1:: broken\n";
+ print $c " Test2: leading whitespace\n";
+ $c->send_crlf;
+ } elsif ($r->url->path eq "/virtual_port") {
+ # return sent Host header
+ $c->send_basic_header;
+ $c->send_crlf;
+ $c->send_response(HTTP::Response->new( 200, 'OK', undef, $r->header ('Host')));
+ } else {
+ $c->send_error(HTTP::Status->RC_FORBIDDEN);
+ }
+ $c->close;
+ }
+ }
+}
+
+END {
+ foreach my $pid (@pids) {
+ if ($pid) { print "Killing $pid\n"; kill "INT", $pid }
+ }
+};
+
+if ($ARGV[0] && $ARGV[0] eq "-d") {
+ while (1) {
+ sleep 100;
+ }
+}
+
+my $result;
+my $command = "./$plugin -H 127.0.0.1";
+
+run_common_tests( { command => "$command -p $port_http" } );
+SKIP: {
+ skip "HTTP::Daemon::SSL not installed", $common_tests + $ssl_only_tests if ! exists $servers->{https};
+ run_common_tests( { command => "$command -p $port_https", ssl => 1 } );
+
+ $result = NPTest->testCmd( "$command -p $port_https -S -C 14" );
+ is( $result->return_code, 0, "$command -p $port_https -S -C 14" );
+ is( $result->output, "OK - Certificate 'Monitoring Plugins' will expire on Fri Feb 16 15:31:44 2029 +0000.", "output ok" );
+
+ $result = NPTest->testCmd( "$command -p $port_https -S -C 14000" );
+ is( $result->return_code, 1, "$command -p $port_https -S -C 14000" );
+ like( $result->output, '/WARNING - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" );
+
+ # Expired cert tests
+ $result = NPTest->testCmd( "$command -p $port_https -S -C 13960,14000" );
+ is( $result->return_code, 2, "$command -p $port_https -S -C 13960,14000" );
+ like( $result->output, '/CRITICAL - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" );
+
+ $result = NPTest->testCmd( "$command -p $port_https_expired -S -C 7" );
+ is( $result->return_code, 2, "$command -p $port_https_expired -S -C 7" );
+ is( $result->output,
+ 'CRITICAL - Certificate \'Monitoring Plugins\' expired on Wed Jan 2 11:00:26 2008 +0000.',
+ "output ok" );
+
+}
+
+my $cmd;
+
+# advanced checks with virtual hostname and virtual port
+SKIP: {
+ skip "libcurl version is smaller than $required_version", 6 unless $use_advanced_checks;
+
+ # http without virtual port
+ $cmd = "./$plugin -H $virtual_host -I 127.0.0.1 -p $port_http -u /virtual_port -r ^$virtual_host:$port_http\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # http with virtual port (!= 80)
+ $cmd = "./$plugin -H $virtual_host:$virtual_port -I 127.0.0.1 -p $port_http -u /virtual_port -r ^$virtual_host:$virtual_port\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # http with virtual port (80)
+ $cmd = "./$plugin -H $virtual_host:80 -I 127.0.0.1 -p $port_http -u /virtual_port -r ^$virtual_host\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+}
+
+# and the same for SSL
+SKIP: {
+ skip "libcurl version is smaller than $required_version and/or HTTP::Daemon::SSL not installed", 6 if ! exists $servers->{https} or not $use_advanced_checks;
+ # https without virtual port
+ $cmd = "./$plugin -H $virtual_host -I 127.0.0.1 -p $port_https --ssl -u /virtual_port -r ^$virtual_host:$port_https\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # https with virtual port (!= 443)
+ $cmd = "./$plugin -H $virtual_host:$virtual_port -I 127.0.0.1 -p $port_https --ssl -u /virtual_port -r ^$virtual_host:$virtual_port\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # https with virtual port (443)
+ $cmd = "./$plugin -H $virtual_host:443 -I 127.0.0.1 -p $port_https --ssl -u /virtual_port -r ^$virtual_host\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+}
+
+
+sub run_common_tests {
+ my ($opts) = @_;
+ my $command = $opts->{command};
+ if ($opts->{ssl}) {
+ $command .= " --ssl";
+ }
+
+ $result = NPTest->testCmd( "$command -u /file/root" );
+ is( $result->return_code, 0, "/file/root");
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - 274 bytes in [\d\.]+ second/', "Output correct" );
+
+ $result = NPTest->testCmd( "$command -u /file/root -s Root" );
+ is( $result->return_code, 0, "/file/root search for string");
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - 274 bytes in [\d\.]+ second/', "Output correct" );
+
+ $result = NPTest->testCmd( "$command -u /file/root -s NonRoot" );
+ is( $result->return_code, 2, "Missing string check");
+ like( $result->output, qr%^HTTP CRITICAL: HTTP/1\.1 200 OK - string 'NonRoot' not found on 'https?://127\.0\.0\.1:\d+/file/root'%, "Shows search string and location");
+
+ $result = NPTest->testCmd( "$command -u /file/root -s NonRootWithOver30charsAndMoreFunThanAWetFish" );
+ is( $result->return_code, 2, "Missing string check");
+ like( $result->output, qr%HTTP CRITICAL: HTTP/1\.1 200 OK - string 'NonRootWithOver30charsAndM...' not found on 'https?://127\.0\.0\.1:\d+/file/root'%, "Shows search string and location");
+
+ $result = NPTest->testCmd( "$command -u /header_check -d foo" );
+ is( $result->return_code, 0, "header_check search for string");
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - 96 bytes in [\d\.]+ second/', "Output correct" );
+
+ $result = NPTest->testCmd( "$command -u /header_check -d bar" );
+ is( $result->return_code, 2, "Missing header string check");
+ like( $result->output, qr%^HTTP CRITICAL: HTTP/1\.1 200 OK - header 'bar' not found on 'https?://127\.0\.0\.1:\d+/header_check'%, "Shows search string and location");
+
+ $result = NPTest->testCmd( "$command -u /header_broken_check" );
+ is( $result->return_code, 0, "header_check search for string");
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - 138 bytes in [\d\.]+ second/', "Output correct" );
+
+ my $cmd;
+ $cmd = "$command -u /slow";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, "$cmd");
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+ $result->output =~ /in ([\d\.]+) second/;
+ cmp_ok( $1, ">", 1, "Time is > 1 second" );
+
+ $cmd = "$command -u /statuscode/200";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/200 -e 200";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - Status line output matched "200" - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/201";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 201 Created - \d+ bytes in [\d\.]+ second /', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/201 -e 201";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 201 Created - Status line output matched "201" - \d+ bytes in [\d\.]+ second /', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/201 -e 200";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 2, $cmd);
+ like( $result->output, '/^HTTP CRITICAL - Invalid HTTP response received from host on port \d+: HTTP/1.1 201 Created/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/200 -e 200,201,202";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - Status line output matched "200,201,202" - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/201 -e 200,201,202";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 201 Created - Status line output matched "200,201,202" - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /statuscode/203 -e 200,201,202";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 2, $cmd);
+ like( $result->output, '/^HTTP CRITICAL - Invalid HTTP response received from host on port (\d+): HTTP/1.1 203 Non-Authoritative Information/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j HEAD -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 HEAD - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j POST -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 POST - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j GET -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 GET - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 GET - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -P foo -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 POST - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j DELETE -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 1, $cmd);
+ like( $result->output, '/^HTTP WARNING: HTTP/1.1 405 Method Not Allowed/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j foo -u /method";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 2, $cmd);
+ like( $result->output, '/^HTTP CRITICAL: HTTP/1.1 501 Not Implemented/', "Output correct: ".$result->output );
+
+ $cmd = "$command -P stufftoinclude -u /postdata -s POST:stufftoinclude";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -j PUT -P stufftoinclude -u /postdata -s PUT:stufftoinclude";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # To confirm that the free doesn't segfault
+ $cmd = "$command -P stufftoinclude -j PUT -u /postdata -s PUT:stufftoinclude";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /redirect";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 301 Moved Permanently - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -f follow -u /redirect";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -u /redirect -k 'follow: me'";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 301 Moved Permanently - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -f follow -u /redirect -k 'follow: me'";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -f sticky -u /redirect -k 'follow: me'";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ $cmd = "$command -f stickyport -u /redirect -k 'follow: me'";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # These tests may block
+ print "ALRM\n";
+
+ # stickyport - on full urlS port is set back to 80 otherwise
+ $cmd = "$command -f stickyport -u /redir_external -t 5 -s redirected";
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" };
+ alarm(2);
+ $result = NPTest->testCmd( $cmd );
+ alarm(0); };
+ isnt( $@, "alarm\n", $cmd );
+ is( $result->return_code, 0, $cmd );
+
+ # Let's hope there won't be any web server on :80 returning "redirected"!
+ $cmd = "$command -f sticky -u /redir_external -t 5 -s redirected";
+ eval {
+ local $SIG{ALRM} = sub { die "alarm\n" };
+ alarm(2);
+ $result = NPTest->testCmd( $cmd );
+ alarm(0); };
+ isnt( $@, "alarm\n", $cmd );
+ isnt( $result->return_code, 0, $cmd );
+
+ # Test an external address - timeout
+ SKIP: {
+ skip "This doesn't seem to work all the time", 1 unless ($ENV{HTTP_EXTERNAL});
+ $cmd = "$command -f follow -u /redir_external -t 5";
+ eval {
+ $result = NPTest->testCmd( $cmd, 2 );
+ };
+ like( $@, "/timeout in command: $cmd/", $cmd );
+ }
+
+ $cmd = "$command -u /timeout -t 5";
+ eval {
+ $result = NPTest->testCmd( $cmd, 2 );
+ };
+ like( $@, "/timeout in command: $cmd/", $cmd );
+
+ $cmd = "$command -f follow -u /redir_timeout -t 2";
+ eval {
+ $result = NPTest->testCmd( $cmd, 5 );
+ };
+ is( $@, "", $cmd );
+
+}
diff --git a/plugins/tests/check_http.t b/plugins/tests/check_http.t
index 1bc0ecb7..188f5e75 100755
--- a/plugins/tests/check_http.t
+++ b/plugins/tests/check_http.t
@@ -4,13 +4,15 @@
#
# To create the https server certificate:
# openssl req -new -x509 -keyout server-key.pem -out server-cert.pem -days 3650 -nodes
-# Country Name (2 letter code) [AU]:UK
-# State or Province Name (full name) [Some-State]:Derbyshire
-# Locality Name (eg, city) []:Belper
+# to create a new expired certificate:
+# faketime '2008-01-01 12:00:00' openssl req -new -x509 -keyout expired-key.pem -out expired-cert.pem -days 1 -nodes
+# Country Name (2 letter code) [AU]:DE
+# State or Province Name (full name) [Some-State]:Bavaria
+# Locality Name (eg, city) []:Munich
# Organization Name (eg, company) [Internet Widgits Pty Ltd]:Monitoring Plugins
# Organizational Unit Name (eg, section) []:
-# Common Name (eg, YOUR name) []:Ton Voon
-# Email Address []:tonvoon@mac.com
+# Common Name (e.g. server FQDN or YOUR name) []:Monitoring Plugins
+# Email Address []:devel@monitoring-plugins.org
use strict;
use Test::More;
@@ -20,6 +22,7 @@ use FindBin qw($Bin);
$ENV{'LC_TIME'} = "C";
my $common_tests = 70;
+my $virtual_port_tests = 8;
my $ssl_only_tests = 8;
# Check that all dependent modules are available
eval "use HTTP::Daemon 6.01;";
@@ -29,13 +32,16 @@ eval {
require HTTP::Response;
};
+my $plugin = 'check_http';
+$plugin = 'check_curl' if $0 =~ m/check_curl/mx;
+
if ($@) {
plan skip_all => "Missing required module for test: $@";
} else {
- if (-x "./check_http") {
- plan tests => $common_tests * 2 + $ssl_only_tests;
+ if (-x "./$plugin") {
+ plan tests => $common_tests * 2 + $ssl_only_tests + $virtual_port_tests;
} else {
- plan skip_all => "No check_http compiled";
+ plan skip_all => "No $plugin compiled";
}
}
@@ -85,6 +91,8 @@ if ($pid) {
exit;
}
} else {
+ # closing the connection after -C cert checks make the daemon exit with a sigpipe otherwise
+ local $SIG{'PIPE'} = 'IGNORE';
my $d = HTTP::Daemon::SSL->new(
LocalPort => $port_https,
LocalAddr => "127.0.0.1",
@@ -96,8 +104,6 @@ if ($pid) {
exit;
}
}
- # give our webservers some time to startup
- sleep(1);
} else {
# Child
#print "child\n";
@@ -110,6 +116,9 @@ if ($pid) {
exit;
}
+# give our webservers some time to startup
+sleep(3);
+
# Run the same server on http and https
sub run_server {
my $d = shift;
@@ -158,6 +167,11 @@ sub run_server {
$c->send_basic_header;
$c->send_header('foo');
$c->send_crlf;
+ } elsif ($r->url->path eq "/virtual_port") {
+ # return sent Host header
+ $c->send_basic_header;
+ $c->send_crlf;
+ $c->send_response(HTTP::Response->new( 200, 'OK', undef, $r->header ('Host')));
} else {
$c->send_error(HTTP::Status->RC_FORBIDDEN);
}
@@ -179,7 +193,7 @@ if ($ARGV[0] && $ARGV[0] eq "-d") {
}
my $result;
-my $command = "./check_http -H 127.0.0.1";
+my $command = "./$plugin -H 127.0.0.1";
run_common_tests( { command => "$command -p $port_http" } );
SKIP: {
@@ -188,25 +202,56 @@ SKIP: {
$result = NPTest->testCmd( "$command -p $port_https -S -C 14" );
is( $result->return_code, 0, "$command -p $port_https -S -C 14" );
- is( $result->output, 'OK - Certificate \'Ton Voon\' will expire on Sun Mar 3 21:41:28 2019 +0000.', "output ok" );
+ is( $result->output, "OK - Certificate 'Monitoring Plugins' will expire on Fri Feb 16 15:31:44 2029 +0000.", "output ok" );
$result = NPTest->testCmd( "$command -p $port_https -S -C 14000" );
is( $result->return_code, 1, "$command -p $port_https -S -C 14000" );
- like( $result->output, '/WARNING - Certificate \'Ton Voon\' expires in \d+ day\(s\) \(Sun Mar 3 21:41:28 2019 \+0000\)./', "output ok" );
+ like( $result->output, '/WARNING - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" );
# Expired cert tests
$result = NPTest->testCmd( "$command -p $port_https -S -C 13960,14000" );
is( $result->return_code, 2, "$command -p $port_https -S -C 13960,14000" );
- like( $result->output, '/CRITICAL - Certificate \'Ton Voon\' expires in \d+ day\(s\) \(Sun Mar 3 21:41:28 2019 \+0000\)./', "output ok" );
+ like( $result->output, '/CRITICAL - Certificate \'Monitoring Plugins\' expires in \d+ day\(s\) \(Fri Feb 16 15:31:44 2029 \+0000\)./', "output ok" );
$result = NPTest->testCmd( "$command -p $port_https_expired -S -C 7" );
is( $result->return_code, 2, "$command -p $port_https_expired -S -C 7" );
is( $result->output,
- 'CRITICAL - Certificate \'Ton Voon\' expired on Thu Mar 5 00:13:16 2009 +0000.',
+ 'CRITICAL - Certificate \'Monitoring Plugins\' expired on Wed Jan 2 11:00:26 2008 +0000.',
"output ok" );
}
+my $cmd;
+# check virtual port behaviour
+#
+# http without virtual port
+$cmd = "$command -p $port_http -u /virtual_port -r ^127.0.0.1:$port_http\$";
+$result = NPTest->testCmd( $cmd );
+is( $result->return_code, 0, $cmd);
+like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+# http with virtual port
+$cmd = "$command:80 -p $port_http -u /virtual_port -r ^127.0.0.1\$";
+$result = NPTest->testCmd( $cmd );
+is( $result->return_code, 0, $cmd);
+like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+SKIP: {
+ skip "HTTP::Daemon::SSL not installed", 4 if ! exists $servers->{https};
+ # https without virtual port
+ $cmd = "$command -p $port_https --ssl -u /virtual_port -r ^127.0.0.1:$port_https\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+
+ # https with virtual port
+ $cmd = "$command:443 -p $port_https --ssl -u /virtual_port -r ^127.0.0.1\$";
+ $result = NPTest->testCmd( $cmd );
+ is( $result->return_code, 0, $cmd);
+ like( $result->output, '/^HTTP OK: HTTP/1.1 200 OK - \d+ bytes in [\d\.]+ second/', "Output correct: ".$result->output );
+}
+
+
sub run_common_tests {
my ($opts) = @_;
my $command = $opts->{command};
@@ -372,27 +417,29 @@ sub run_common_tests {
# stickyport - on full urlS port is set back to 80 otherwise
$cmd = "$command -f stickyport -u /redir_external -t 5 -s redirected";
+ alarm(2);
eval {
local $SIG{ALRM} = sub { die "alarm\n" };
- alarm(2);
$result = NPTest->testCmd( $cmd );
- alarm(0); };
+ };
isnt( $@, "alarm\n", $cmd );
+ alarm(0);
is( $result->return_code, 0, $cmd );
# Let's hope there won't be any web server on :80 returning "redirected"!
$cmd = "$command -f sticky -u /redir_external -t 5 -s redirected";
+ alarm(2);
eval {
local $SIG{ALRM} = sub { die "alarm\n" };
- alarm(2);
$result = NPTest->testCmd( $cmd );
- alarm(0); };
+ };
isnt( $@, "alarm\n", $cmd );
+ alarm(0);
isnt( $result->return_code, 0, $cmd );
# Test an external address - timeout
SKIP: {
- skip "This doesn't seems to work all the time", 1 unless ($ENV{HTTP_EXTERNAL});
+ skip "This doesn't seem to work all the time", 1 unless ($ENV{HTTP_EXTERNAL});
$cmd = "$command -f follow -u /redir_external -t 5";
eval {
$result = NPTest->testCmd( $cmd, 2 );
diff --git a/plugins/tests/check_procs.t b/plugins/tests/check_procs.t
index 54d43d9b..3af218f5 100755
--- a/plugins/tests/check_procs.t
+++ b/plugins/tests/check_procs.t
@@ -8,13 +8,14 @@ use Test::More;
use NPTest;
if (-x "./check_procs") {
- plan tests => 50;
+ plan tests => 52;
} else {
plan skip_all => "No check_procs compiled";
}
my $result;
-my $command = "./check_procs --input-file=tests/var/ps-axwo.darwin";
+my $command = "./check_procs --input-file=tests/var/ps-axwo.darwin";
+my $cmd_etime = "./check_procs --input-file=tests/var/ps-axwo.debian";
$result = NPTest->testCmd( "$command" );
is( $result->return_code, 0, "Run with no options" );
@@ -69,9 +70,21 @@ SKIP: {
like( $result->output, '/^PROCS OK: 0 processes with UID = -2 \(nobody\), args \'UsB\'/', "Output correct" );
};
-$result = NPTest->testCmd( "$command --ereg-argument-array='mdworker.*501'" );
-is( $result->return_code, 0, "Checking regexp search of arguments" );
-is( $result->output, "PROCS OK: 1 process with regex args 'mdworker.*501' | procs=1;;;0;", "Output correct" );
+SKIP: {
+ skip 'check_procs is compiled with etime format support', 2 if `$command -vvv` =~ m/etime/mx;
+
+ $result = NPTest->testCmd( "$command --ereg-argument-array='mdworker.*501'" );
+ is( $result->return_code, 0, "Checking regexp search of arguments" );
+ is( $result->output, "PROCS OK: 1 process with regex args 'mdworker.*501' | procs=1;;;0;", "Output correct" );
+}
+
+SKIP: {
+ skip 'check_procs is compiled without etime format support', 2 if `$cmd_etime -vvv` !~ m/etime/mx;
+
+ $result = NPTest->testCmd( "$cmd_etime -m ELAPSED -C apache2 -w 1000 -c 2000" );
+ is( $result->return_code, 2, "Checking elapsed time threshold" );
+ is( $result->output, "ELAPSED CRITICAL: 10 crit, 0 warn out of 10 processes with command name 'apache2' | procs=10;;;0; procs_warn=0;;;0; procs_crit=10;;;0;", "Output correct" );
+}
$result = NPTest->testCmd( "$command --vsz 1000000" );
is( $result->return_code, 0, "Checking filter by VSZ" );
@@ -83,7 +96,7 @@ is( $result->output, 'PROCS OK: 3 processes with RSS >= 100000 | procs=3;;;0;',
$result = NPTest->testCmd( "$command -s S" );
is( $result->return_code, 0, "Checking filter for sleeping processes" );
-like( $result->output, '/^PROCS OK: 44 processes with STATE = S/', "Output correct" );
+like( $result->output, '/^PROCS OK: 88 processes with STATE = S/', "Output correct" );
$result = NPTest->testCmd( "$command -s Z" );
is( $result->return_code, 0, "Checking filter for zombies" );
@@ -129,4 +142,3 @@ is( $result->output, 'RSS CRITICAL: 5 crit, 0 warn out of 95 processes [WindowSe
$result = NPTest->testCmd( "$command --ereg-argument-array='(nosuchname|nosuch2name)'" );
is( $result->return_code, 0, "Checking no pipe symbol in output" );
is( $result->output, "PROCS OK: 0 processes with regex args '(nosuchname,nosuch2name)' | procs=0;;;0;", "Output correct" );
-
diff --git a/plugins/tests/check_snmp.t b/plugins/tests/check_snmp.t
index 73a68b20..0a77fa8a 100755
--- a/plugins/tests/check_snmp.t
+++ b/plugins/tests/check_snmp.t
@@ -7,8 +7,9 @@ use strict;
use Test::More;
use NPTest;
use FindBin qw($Bin);
+use POSIX qw/strftime/;
-my $tests = 67;
+my $tests = 73;
# Check that all dependent modules are available
eval {
require NetSNMP::OID;
@@ -37,6 +38,7 @@ if ($@) {
my $port_snmp = 16100 + int(rand(100));
+my $faketime = -x '/usr/bin/faketime' ? 1 : 0;
# Start up server
my @pids;
@@ -118,77 +120,81 @@ like($res->output, '/'.quotemeta('SNMP OK - And now have fun with with this: \"C
"And now have fun with with this: \"C:\\\\\"
because we\'re not done yet!"').'/m', "Attempt to confuse parser No.3");
-system("rm -f ".$ENV{'MP_STATE_PATH'}."/check_snmp/*");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
-is($res->return_code, 0, "Returns OK");
-is($res->output, "No previous data to calculate rate - assume okay");
+system("rm -f ".$ENV{'MP_STATE_PATH'}."/*/check_snmp/*");
-# Need to sleep, otherwise duration=0
-sleep 1;
+# run rate checks with faketime. rate checks depend on the exact amount of time spend between the
+# plugin runs which may fail on busy machines.
+# using faketime removes this race condition and also saves all the sleeps in between.
+SKIP: {
+ skip "No faketime binary found", 28 if !$faketime;
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
-is($res->return_code, 1, "WARNING - due to going above rate calculation" );
-is($res->output, "SNMP RATE WARNING - *666* | iso.3.6.1.4.1.8072.3.2.67.10=666;600 ");
+ my $ts = time();
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
+ is($res->return_code, 0, "Returns OK");
+ is($res->output, "No previous data to calculate rate - assume okay");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
-is($res->return_code, 3, "UNKNOWN - basically the divide by zero error" );
-is($res->output, "Time duration between plugin calls is invalid");
+ # test rate 1 second later
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
+ is($res->return_code, 1, "WARNING - due to going above rate calculation" );
+ is($res->output, "SNMP RATE WARNING - *666* | iso.3.6.1.4.1.8072.3.2.67.10=666;600 ");
+ # test rate with same time
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -w 600" );
+ is($res->return_code, 3, "UNKNOWN - basically the divide by zero error" );
+ is($res->output, "Time duration between plugin calls is invalid");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
-is($res->return_code, 0, "OK for first call" );
-is($res->output, "No previous data to calculate rate - assume okay" );
-# Need to sleep, otherwise duration=0
-sleep 1;
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
+ is($res->return_code, 0, "OK for first call" );
+ is($res->output, "No previous data to calculate rate - assume okay" );
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP RATE OK - inoctets 666 | inoctets=666 ", "Check label");
+ # test rate 1 second later
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP RATE OK - inoctets 666 | inoctets=666 ", "Check label");
-sleep 2;
+ # test rate 3 seconds later
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+3))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP RATE OK - inoctets 333 | inoctets=333 ", "Check rate decreases due to longer interval");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP RATE OK - inoctets 333 | inoctets=333 ", "Check rate decreases due to longer interval");
+ # label performance data check
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - test 67996 | test=67996c ", "Check label");
-# label performance data check
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - test 67996 | test=67996c ", "Check label");
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l \"test'test\"" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - test'test 68662 | \"test'test\"=68662c ", "Check label");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l \"test'test\"" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - test'test 68662 | \"test'test\"=68662c ", "Check label");
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test\"test'" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - test\"test 69328 | 'test\"test'=69328c ", "Check label");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test\"test'" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - test\"test 69328 | 'test\"test'=69328c ", "Check label");
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test -O" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - test 69994 | iso.3.6.1.4.1.8072.3.2.67.10=69994c ", "Check label");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l test -O" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - test 69994 | iso.3.6.1.4.1.8072.3.2.67.10=69994c ", "Check label");
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - 70660 | iso.3.6.1.4.1.8072.3.2.67.10=70660c ", "Check label");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - 70660 | iso.3.6.1.4.1.8072.3.2.67.10=70660c ", "Check label");
+ $res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test test'" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP OK - test test 71326 | 'test test'=71326c ", "Check label");
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 -l 'test test'" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP OK - test test 71326 | 'test test'=71326c ", "Check label");
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" );
+ is($res->return_code, 0, "OK for first call" );
+ is($res->output, "No previous data to calculate rate - assume okay" );
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" );
-is($res->return_code, 0, "OK for first call" );
-is($res->output, "No previous data to calculate rate - assume okay" );
-
-# Need to sleep, otherwise duration=0
-sleep 1;
+ # test 1 second later
+ $res = NPTest->testCmd("LC_TIME=C TZ=UTC faketime -f '".strftime("%Y-%m-%d %H:%M:%S", localtime($ts+1))."' ./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" );
+ is($res->return_code, 0, "OK as no thresholds" );
+ is($res->output, "SNMP RATE OK - inoctets_per_minute 39960 | inoctets_per_minute=39960 ", "Checking multiplier");
+};
-$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10 --rate -l inoctets_per_minute --rate-multiplier=60" );
-is($res->return_code, 0, "OK as no thresholds" );
-is($res->output, "SNMP RATE OK - inoctets_per_minute 39960 | inoctets_per_minute=39960 ", "Checking multiplier");
$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.11 -s '\"stringtests\"'" );
@@ -245,9 +251,20 @@ is($res->output, 'SNMP CRITICAL - *-4* | iso.3.6.1.4.1.8072.3.2.67.17=-4;-2:;-3:
$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.18 -c '~:-6.5'" );
is($res->return_code, 0, "Negative float OK" );
-is($res->output, 'SNMP OK - -6.6 | iso.3.6.1.4.1.8072.3.2.67.18=-6.6;;~:-6.5 ', "Negative float OK output" );
+is($res->output, 'SNMP OK - -6.6 | iso.3.6.1.4.1.8072.3.2.67.18=-6.6;;@-6.5:~ ', "Negative float OK output" );
$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.18 -w '~:-6.65' -c '~:-6.55'" );
is($res->return_code, 1, "Negative float WARNING" );
-is($res->output, 'SNMP WARNING - *-6.6* | iso.3.6.1.4.1.8072.3.2.67.18=-6.6;~:-6.65;~:-6.55 ', "Negative float WARNING output" );
+is($res->output, 'SNMP WARNING - *-6.6* | iso.3.6.1.4.1.8072.3.2.67.18=-6.6;@-6.65:~;@-6.55:~ ', "Negative float WARNING output" );
+
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10,.1.3.6.1.4.1.8072.3.2.67.17 -w '1:100000,-10:20' -c '2:200000,-20:30'" );
+is($res->return_code, 0, "Multiple OIDs with thresholds" );
+like($res->output, '/SNMP OK - \d+ -4 | iso.3.6.1.4.1.8072.3.2.67.10=\d+c;1:100000;2:200000 iso.3.6.1.4.1.8072.3.2.67.17=-4;-10:20;-20:30/', "Multiple OIDs with thresholds output" );
+
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10,.1.3.6.1.4.1.8072.3.2.67.17 -w '1:100000,-1:2' -c '2:200000,-20:30'" );
+is($res->return_code, 1, "Multiple OIDs with thresholds" );
+like($res->output, '/SNMP WARNING - \d+ \*-4\* | iso.3.6.1.4.1.8072.3.2.67.10=\d+c;1:100000;2:200000 iso.3.6.1.4.1.8072.3.2.67.17=-4;-10:20;-20:30/', "Multiple OIDs with thresholds output" );
+$res = NPTest->testCmd( "./check_snmp -H 127.0.0.1 -C public -p $port_snmp -o .1.3.6.1.4.1.8072.3.2.67.10,.1.3.6.1.4.1.8072.3.2.67.17 -w 1,2 -c 1" );
+is($res->return_code, 2, "Multiple OIDs with some thresholds" );
+like($res->output, '/SNMP CRITICAL - \*\d+\* \*-4\* | iso.3.6.1.4.1.8072.3.2.67.10=\d+c;1;2 iso.3.6.1.4.1.8072.3.2.67.17=-4;;/', "Multiple OIDs with thresholds output" );
diff --git a/plugins/tests/var/ps-axwo.debian b/plugins/tests/var/ps-axwo.debian
new file mode 100644
index 00000000..5889e9a4
--- /dev/null
+++ b/plugins/tests/var/ps-axwo.debian
@@ -0,0 +1,219 @@
+STAT UID PID PPID VSZ RSS %CPU ELAPSED COMMAND COMMAND
+Ss 0 1 0 167244 7144 0.1 26-03:07:26 systemd /lib/systemd/systemd --system --deserialize 17
+S 0 2 0 0 0 0.0 26-03:07:26 kthreadd [kthreadd]
+I< 0 3 2 0 0 0.0 26-03:07:26 rcu_gp [rcu_gp]
+I< 0 4 2 0 0 0.0 26-03:07:26 rcu_par_gp [rcu_par_gp]
+I< 0 6 2 0 0 0.0 26-03:07:26 kworker/0:0H-ev [kworker/0:0H-events_highpri]
+I< 0 9 2 0 0 0.0 26-03:07:26 mm_percpu_wq [mm_percpu_wq]
+S 0 10 2 0 0 0.0 26-03:07:26 rcu_tasks_rude_ [rcu_tasks_rude_]
+S 0 11 2 0 0 0.0 26-03:07:26 rcu_tasks_trace [rcu_tasks_trace]
+S 0 12 2 0 0 0.0 26-03:07:26 ksoftirqd/0 [ksoftirqd/0]
+I 0 13 2 0 0 0.0 26-03:07:26 rcu_sched [rcu_sched]
+S 0 14 2 0 0 0.0 26-03:07:26 migration/0 [migration/0]
+S 0 15 2 0 0 0.0 26-03:07:26 cpuhp/0 [cpuhp/0]
+S 0 16 2 0 0 0.0 26-03:07:26 cpuhp/1 [cpuhp/1]
+S 0 17 2 0 0 0.0 26-03:07:26 migration/1 [migration/1]
+S 0 18 2 0 0 0.0 26-03:07:26 ksoftirqd/1 [ksoftirqd/1]
+I< 0 20 2 0 0 0.0 26-03:07:26 kworker/1:0H-ev [kworker/1:0H-events_highpri]
+S 0 21 2 0 0 0.0 26-03:07:26 cpuhp/2 [cpuhp/2]
+S 0 22 2 0 0 0.0 26-03:07:26 migration/2 [migration/2]
+S 0 23 2 0 0 0.0 26-03:07:26 ksoftirqd/2 [ksoftirqd/2]
+I< 0 25 2 0 0 0.0 26-03:07:26 kworker/2:0H-ev [kworker/2:0H-events_highpri]
+S 0 26 2 0 0 0.0 26-03:07:26 cpuhp/3 [cpuhp/3]
+S 0 27 2 0 0 0.0 26-03:07:26 migration/3 [migration/3]
+S 0 28 2 0 0 0.0 26-03:07:26 ksoftirqd/3 [ksoftirqd/3]
+I< 0 30 2 0 0 0.0 26-03:07:26 kworker/3:0H-ev [kworker/3:0H-events_highpri]
+S 0 35 2 0 0 0.0 26-03:07:26 kdevtmpfs [kdevtmpfs]
+I< 0 36 2 0 0 0.0 26-03:07:26 netns [netns]
+S 0 37 2 0 0 0.0 26-03:07:26 kauditd [kauditd]
+S 0 38 2 0 0 0.0 26-03:07:26 khungtaskd [khungtaskd]
+S 0 39 2 0 0 0.0 26-03:07:26 oom_reaper [oom_reaper]
+I< 0 40 2 0 0 0.0 26-03:07:26 writeback [writeback]
+S 0 41 2 0 0 0.0 26-03:07:26 kcompactd0 [kcompactd0]
+SN 0 42 2 0 0 0.0 26-03:07:26 ksmd [ksmd]
+SN 0 43 2 0 0 0.0 26-03:07:26 khugepaged [khugepaged]
+I< 0 62 2 0 0 0.0 26-03:07:26 kintegrityd [kintegrityd]
+I< 0 63 2 0 0 0.0 26-03:07:26 kblockd [kblockd]
+I< 0 64 2 0 0 0.0 26-03:07:26 blkcg_punt_bio [blkcg_punt_bio]
+I< 0 65 2 0 0 0.0 26-03:07:26 edac-poller [edac-poller]
+I< 0 66 2 0 0 0.0 26-03:07:26 devfreq_wq [devfreq_wq]
+I< 0 67 2 0 0 0.0 26-03:07:26 kworker/2:1H-ev [kworker/2:1H-events_highpri]
+S 0 70 2 0 0 0.3 26-03:07:25 kswapd0 [kswapd0]
+I< 0 71 2 0 0 0.0 26-03:07:25 kthrotld [kthrotld]
+I< 0 72 2 0 0 0.0 26-03:07:25 acpi_thermal_pm [acpi_thermal_pm]
+I< 0 74 2 0 0 0.0 26-03:07:25 ipv6_addrconf [ipv6_addrconf]
+I< 0 80 2 0 0 0.0 26-03:07:25 kworker/3:1H-ev [kworker/3:1H-events_highpri]
+I< 0 84 2 0 0 0.0 26-03:07:25 kstrp [kstrp]
+I< 0 87 2 0 0 0.0 26-03:07:25 zswap-shrink [zswap-shrink]
+I< 0 110 2 0 0 0.0 26-03:07:25 kworker/0:1H-ev [kworker/0:1H-events_highpri]
+I< 0 141 2 0 0 0.0 26-03:07:25 ata_sff [ata_sff]
+S 0 143 2 0 0 0.0 26-03:07:25 scsi_eh_0 [scsi_eh_0]
+I< 0 144 2 0 0 0.0 26-03:07:25 scsi_tmf_0 [scsi_tmf_0]
+S 0 145 2 0 0 0.0 26-03:07:25 scsi_eh_1 [scsi_eh_1]
+I< 0 146 2 0 0 0.0 26-03:07:25 scsi_tmf_1 [scsi_tmf_1]
+S 0 147 2 0 0 0.0 26-03:07:25 scsi_eh_2 [scsi_eh_2]
+I< 0 148 2 0 0 0.0 26-03:07:25 scsi_tmf_2 [scsi_tmf_2]
+S 0 149 2 0 0 0.0 26-03:07:25 scsi_eh_3 [scsi_eh_3]
+I< 0 150 2 0 0 0.0 26-03:07:25 scsi_tmf_3 [scsi_tmf_3]
+S 0 151 2 0 0 0.0 26-03:07:25 scsi_eh_4 [scsi_eh_4]
+I< 0 152 2 0 0 0.0 26-03:07:25 scsi_tmf_4 [scsi_tmf_4]
+S 0 153 2 0 0 0.0 26-03:07:25 scsi_eh_5 [scsi_eh_5]
+I< 0 154 2 0 0 0.0 26-03:07:25 scsi_tmf_5 [scsi_tmf_5]
+S 0 158 2 0 0 0.0 26-03:07:25 card0-crtc0 [card0-crtc0]
+S 0 159 2 0 0 0.0 26-03:07:25 card0-crtc1 [card0-crtc1]
+S 0 160 2 0 0 0.0 26-03:07:25 card0-crtc2 [card0-crtc2]
+I< 0 162 2 0 0 0.0 26-03:07:25 kworker/1:1H-ev [kworker/1:1H-events_highpri]
+S 0 163 2 0 0 0.0 26-03:07:25 scsi_eh_6 [scsi_eh_6]
+I< 0 164 2 0 0 0.0 26-03:07:25 scsi_tmf_6 [scsi_tmf_6]
+S 0 165 2 0 0 0.0 26-03:07:25 usb-storage [usb-storage]
+I< 0 167 2 0 0 0.0 26-03:07:25 uas [uas]
+I< 0 176 2 0 0 0.0 26-03:07:25 kdmflush [kdmflush]
+I< 0 177 2 0 0 0.0 26-03:07:25 kdmflush [kdmflush]
+S 0 202 2 0 0 0.0 26-03:07:24 scsi_eh_7 [scsi_eh_7]
+I< 0 203 2 0 0 0.0 26-03:07:24 scsi_tmf_7 [scsi_tmf_7]
+S 0 204 2 0 0 0.0 26-03:07:24 usb-storage [usb-storage]
+I< 0 232 2 0 0 0.0 26-03:07:23 btrfs-worker [btrfs-worker]
+I< 0 233 2 0 0 0.0 26-03:07:23 btrfs-worker-hi [btrfs-worker-hi]
+I< 0 234 2 0 0 0.0 26-03:07:23 btrfs-delalloc [btrfs-delalloc]
+I< 0 235 2 0 0 0.0 26-03:07:23 btrfs-flush_del [btrfs-flush_del]
+I< 0 236 2 0 0 0.0 26-03:07:23 btrfs-cache [btrfs-cache]
+I< 0 237 2 0 0 0.0 26-03:07:23 btrfs-fixup [btrfs-fixup]
+I< 0 238 2 0 0 0.0 26-03:07:23 btrfs-endio [btrfs-endio]
+I< 0 239 2 0 0 0.0 26-03:07:23 btrfs-endio-met [btrfs-endio-met]
+I< 0 240 2 0 0 0.0 26-03:07:23 btrfs-endio-met [btrfs-endio-met]
+I< 0 241 2 0 0 0.0 26-03:07:23 btrfs-endio-rai [btrfs-endio-rai]
+I< 0 242 2 0 0 0.0 26-03:07:23 btrfs-rmw [btrfs-rmw]
+I< 0 243 2 0 0 0.0 26-03:07:23 btrfs-endio-wri [btrfs-endio-wri]
+I< 0 244 2 0 0 0.0 26-03:07:23 btrfs-freespace [btrfs-freespace]
+I< 0 245 2 0 0 0.0 26-03:07:23 btrfs-delayed-m [btrfs-delayed-m]
+I< 0 246 2 0 0 0.0 26-03:07:23 btrfs-readahead [btrfs-readahead]
+I< 0 247 2 0 0 0.0 26-03:07:23 btrfs-qgroup-re [btrfs-qgroup-re]
+S 0 248 2 0 0 0.0 26-03:07:23 btrfs-cleaner [btrfs-cleaner]
+S 0 249 2 0 0 0.2 26-03:07:23 btrfs-transacti [btrfs-transacti]
+I< 0 317 2 0 0 0.0 26-03:07:22 rpciod [rpciod]
+I< 0 322 2 0 0 0.0 26-03:07:22 xprtiod [xprtiod]
+S 0 381 2 0 0 0.0 26-03:07:22 irq/133-mei_me [irq/133-mei_me]
+S 0 422 2 0 0 0.0 26-03:07:22 watchdogd [watchdogd]
+I< 0 523 2 0 0 0.0 26-03:07:22 led_workqueue [led_workqueue]
+I< 0 583 2 0 0 0.0 26-03:07:22 cryptd [cryptd]
+I< 0 590 2 0 0 0.0 26-03:07:22 ext4-rsv-conver [ext4-rsv-conver]
+Ss 104 693 1 12324 4292 0.5 26-03:07:21 dbus-daemon /usr/bin/dbus-daemon --system --address=systemd: --nofork --nopidfile --systemd-activation --syslog-only
+Ss 0 731 1 575120 1368 0.0 26-03:07:21 systemd-logind /lib/systemd/systemd-logind
+Ssl 0 1111 1 121248 732 0.0 26-03:07:18 unattended-upgr /usr/bin/python3 /usr/share/unattended-upgrades/unattended-upgrade-shutdown --wait-for-signal
+S 0 1141 2 0 0 0.0 26-03:07:18 lockd [lockd]
+I< 0 1459 2 0 0 0.0 26-03:07:16 nfsiod [nfsiod]
+S 0 1621 2 0 0 0.0 26-03:07:15 NFSv4 callback [NFSv4 callback]
+Ssl 0 1771 1 1548340 676 0.0 26-03:07:13 libvirtd /usr/sbin/libvirtd
+I< 0 24315 2 0 0 0.0 26-02:49:02 cifsiod [cifsiod]
+I< 0 24316 2 0 0 0.0 26-02:49:02 smb3decryptd [smb3decryptd]
+I< 0 24317 2 0 0 0.0 26-02:49:02 cifsfileinfoput [cifsfileinfoput]
+I< 0 24318 2 0 0 0.0 26-02:49:02 cifsoplockd [cifsoplockd]
+I< 0 24319 2 0 0 0.0 26-02:49:02 cifs-dfscache [cifs-dfscache]
+S 0 24322 2 0 0 0.0 26-02:49:02 cifsd [cifsd]
+I< 0 24413 2 0 0 0.0 26-02:48:57 btrfs-worker [btrfs-worker]
+I< 0 24414 2 0 0 0.0 26-02:48:57 btrfs-worker-hi [btrfs-worker-hi]
+I< 0 24415 2 0 0 0.0 26-02:48:57 btrfs-delalloc [btrfs-delalloc]
+I< 0 24416 2 0 0 0.0 26-02:48:57 btrfs-flush_del [btrfs-flush_del]
+I< 0 24418 2 0 0 0.0 26-02:48:57 btrfs-cache [btrfs-cache]
+I< 0 24419 2 0 0 0.0 26-02:48:57 btrfs-fixup [btrfs-fixup]
+I< 0 24420 2 0 0 0.0 26-02:48:57 btrfs-endio [btrfs-endio]
+I< 0 24421 2 0 0 0.0 26-02:48:57 btrfs-endio-met [btrfs-endio-met]
+I< 0 24422 2 0 0 0.0 26-02:48:57 btrfs-endio-met [btrfs-endio-met]
+I< 0 24423 2 0 0 0.0 26-02:48:57 btrfs-endio-rai [btrfs-endio-rai]
+I< 0 24424 2 0 0 0.0 26-02:48:57 btrfs-rmw [btrfs-rmw]
+I< 0 24425 2 0 0 0.0 26-02:48:57 btrfs-endio-wri [btrfs-endio-wri]
+I< 0 24426 2 0 0 0.0 26-02:48:57 btrfs-freespace [btrfs-freespace]
+I< 0 24427 2 0 0 0.0 26-02:48:57 btrfs-delayed-m [btrfs-delayed-m]
+I< 0 24428 2 0 0 0.0 26-02:48:57 btrfs-readahead [btrfs-readahead]
+I< 0 24429 2 0 0 0.0 26-02:48:57 btrfs-qgroup-re [btrfs-qgroup-re]
+S 0 24450 2 0 0 0.0 26-02:48:53 btrfs-cleaner [btrfs-cleaner]
+S 0 24451 2 0 0 0.0 26-02:48:53 btrfs-transacti [btrfs-transacti]
+I< 0 747708 2 0 0 0.0 16-21:06:20 xfsalloc [xfsalloc]
+I< 0 747709 2 0 0 0.0 16-21:06:20 xfs_mru_cache [xfs_mru_cache]
+S 0 747713 2 0 0 0.0 16-21:06:20 jfsIO [jfsIO]
+S 0 747714 2 0 0 0.0 16-21:06:20 jfsCommit [jfsCommit]
+S 0 747715 2 0 0 0.0 16-21:06:20 jfsCommit [jfsCommit]
+S 0 747716 2 0 0 0.0 16-21:06:20 jfsCommit [jfsCommit]
+S 0 747717 2 0 0 0.0 16-21:06:20 jfsCommit [jfsCommit]
+S 0 747718 2 0 0 0.0 16-21:06:20 jfsSync [jfsSync]
+Ss 0 1071687 1 105976 28304 0.0 3-03:12:31 systemd-journal /lib/systemd/systemd-journald
+Ss 0 1934146 1 25672 4704 0.0 11:19:31 cupsd /usr/sbin/cupsd -l
+Ssl 0 1934148 1 182868 8540 0.0 11:19:31 cups-browsed /usr/sbin/cups-browsed
+S 13 1934155 3392655 5752 88 0.0 11:19:31 pinger (pinger)
+S< 33 1934166 3393034 57996 5460 0.0 11:19:31 apache2 /usr/sbin/apache2 -k start
+S< 33 1934167 3393034 216944 13892 0.0 11:19:30 apache2 /usr/sbin/apache2 -k start
+S< 33 1934168 3393034 216944 13756 0.0 11:19:30 apache2 /usr/sbin/apache2 -k start
+S< 33 1934169 3393034 216936 13732 0.0 11:19:30 apache2 /usr/sbin/apache2 -k start
+S< 33 1934170 3393034 216944 13888 0.0 11:19:30 apache2 /usr/sbin/apache2 -k start
+S< 33 1934172 3393034 216944 15388 0.0 11:19:30 apache2 /usr/sbin/apache2 -k start
+S< 33 1934701 3393034 216936 13736 0.0 11:19:29 apache2 /usr/sbin/apache2 -k start
+S< 33 1935056 3393034 216920 13724 0.0 11:19:28 apache2 /usr/sbin/apache2 -k start
+S 7 1936834 1934146 16652 832 0.0 11:18:12 dbus /usr/lib/cups/notifier/dbus dbus://
+S< 33 1955909 3393034 216928 13792 0.0 11:00:25 apache2 /usr/sbin/apache2 -k start
+I< 0 2531464 2 0 0 0.0 06:35:47 kworker/u9:0-i9 [kworker/u9:0-i915_flip]
+I 0 2570506 2 0 0 0.0 06:27:41 kworker/1:0-cgr [kworker/1:0-cgroup_destroy]
+I 0 2596195 2 0 0 0.0 06:21:52 kworker/1:1-eve [kworker/1:1-events]
+I 0 2785341 2 0 0 0.0 03:34:16 kworker/u8:8-bt [kworker/u8:8-btrfs-endio-write]
+I 0 2785520 2 0 0 0.0 03:33:50 kworker/3:0-eve [kworker/3:0-events]
+I 0 2798669 2 0 0 0.0 03:21:09 kworker/u8:5-bt [kworker/u8:5-btrfs-endio-write]
+Ss 0 2803015 1 5616 3108 0.0 03:17:54 cron /usr/sbin/cron -f
+I 0 2845483 2 0 0 0.0 02:38:11 kworker/0:3-eve [kworker/0:3-events]
+I 0 2939490 2 0 0 0.1 01:10:32 kworker/0:0-eve [kworker/0:0-events]
+I 0 2939754 2 0 0 0.0 01:10:26 kworker/u8:1-i9 [kworker/u8:1-i915]
+I 0 2942040 2 0 0 0.0 01:08:02 kworker/u8:7-bt [kworker/u8:7-btrfs-endio-meta]
+S 117 2954268 3392551 40044 5772 0.0 56:37 pickup pickup -l -t unix -u -c
+I 0 2965195 2 0 0 0.0 46:00 kworker/u8:0-bt [kworker/u8:0-btrfs-worker]
+I 0 2977972 2 0 0 0.0 33:54 kworker/u8:2-bt [kworker/u8:2-btrfs-endio-write]
+I 0 2985488 2 0 0 0.0 27:02 kworker/u8:3-bl [kworker/u8:3-blkcg_punt_bio]
+I 0 2987519 2 0 0 1.0 25:15 kworker/2:1-eve [kworker/2:1-events]
+I 0 2987601 2 0 0 0.0 25:03 kworker/u8:9-i9 [kworker/u8:9-i915]
+I< 0 2995218 2 0 0 0.0 18:41 kworker/u9:2-xp [kworker/u9:2-xprtiod]
+I 0 2997170 2 0 0 0.0 16:41 kworker/3:1-rcu [kworker/3:1-rcu_gp]
+I 0 3001264 2 0 0 0.0 13:01 kworker/u8:4-bt [kworker/u8:4-btrfs-endio-write]
+I 0 3004697 2 0 0 0.7 09:41 kworker/2:0-eve [kworker/2:0-events]
+I 0 3010619 2 0 0 1.0 04:29 kworker/2:2-eve [kworker/2:2-events]
+I 0 3014612 2 0 0 0.0 00:41 kworker/3:2-eve [kworker/3:2-events]
+S 0 3015082 2803015 6716 3028 0.0 00:30 cron /usr/sbin/CRON -f
+I 0 3015382 2 0 0 0.0 00:00 kworker/u8:6-bt [kworker/u8:6-btrfs-endio-meta]
+Ss 1 3392068 1 5592 504 0.0 15-02:34:39 atd /usr/sbin/atd -f
+Ssl 0 3392072 1 235796 1740 0.0 15-02:34:39 accounts-daemon /usr/libexec/accounts-daemon
+Ssl 106 3392076 1 315708 6128 0.0 15-02:34:39 colord /usr/libexec/colord
+Ss 0 3392083 1 8120 720 0.0 15-02:34:39 haveged /usr/sbin/haveged --Foreground --verbose=1
+Ss 0 3392090 1 5168 132 0.0 15-02:34:39 blkmapd /usr/sbin/blkmapd
+SNsl 111 3392094 1 155648 440 0.0 15-02:34:39 rtkit-daemon /usr/libexec/rtkit-daemon
+Ssl 0 3392097 1 290168 1352 0.0 15-02:34:39 packagekitd /usr/libexec/packagekitd
+Ss 128 3392100 1 7960 448 0.0 15-02:34:39 rpcbind /sbin/rpcbind -f -w
+Ss 0 3392114 1 13432 616 0.0 15-02:34:39 systemd-machine /lib/systemd/systemd-machined
+Ss 0 3392118 1 13316 848 0.0 15-02:34:39 sshd sshd: /usr/sbin/sshd -D [listener] 0 of 10-100 startups
+Ssl 0 3392124 1 244072 2456 0.0 15-02:34:39 upowerd /usr/libexec/upowerd
+Ssl 0 3392138 1 1634748 10684 0.0 15-02:34:39 containerd /usr/bin/containerd
+Ssl 0 3392139 1 222768 1784 0.0 15-02:34:39 rsyslogd /usr/sbin/rsyslogd -n -iNONE
+Ss 13 3392140 1 3344 152 0.0 15-02:34:39 polipo /usr/bin/polipo -c /etc/polipo/config pidFile=/var/run/polipo/polipo.pid daemonise=true
+Ssl 119 3392156 1 76472 1688 0.0 15-02:34:39 ntpd /usr/sbin/ntpd -p /var/run/ntpd.pid -g -u 119:126
+Ss 120 3392168 1 4656 276 0.0 15-02:34:39 rpc.statd /sbin/rpc.statd --no-notify
+Ss 0 3392171 1 5072 432 0.0 15-02:34:39 rpc.mountd /usr/sbin/rpc.mountd --manage-gids
+Ss 0 3392176 1 5008 288 0.0 15-02:34:39 rpc.idmapd /usr/sbin/rpc.idmapd
+Ss 105 3392184 1 15544 6816 3.5 15-02:34:39 avahi-daemon avahi-daemon: running [tsui.local]
+Ss 0 3392186 1 25288 3860 0.0 15-02:34:39 systemd-udevd /lib/systemd/systemd-udevd
+S 105 3392190 3392184 8788 52 0.0 15-02:34:39 avahi-daemon avahi-daemon: chroot helper
+Ssl 0 3392197 1 396120 4188 0.0 15-02:34:39 udisksd /usr/libexec/udisks2/udisksd
+Ssl 0 3392214 1 237504 6632 0.0 15-02:34:39 polkitd /usr/libexec/polkitd --no-debug
+Ss 0 3392284 1 9684 560 0.0 15-02:34:38 xinetd /usr/sbin/xinetd -pidfile /run/xinetd.pid -stayalive -inetd_compat -inetd_ipv6
+Ssl 0 3392285 1 314840 1352 0.0 15-02:34:38 ModemManager /usr/sbin/ModemManager
+Ss 0 3392317 1 2352 140 0.0 15-02:34:38 acpid /usr/sbin/acpid
+S 0 3392400 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392401 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392402 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392403 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392404 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392405 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392407 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+S 0 3392410 2 0 0 0.0 15-02:34:38 nfsd [nfsd]
+Ss 0 3392551 1 40092 1304 0.0 15-02:34:37 master /usr/lib/postfix/sbin/master -w
+S 117 3392553 3392551 40156 568 0.0 15-02:34:37 qmgr qmgr -l -t unix -u
+Ss 0 3392650 1 63652 4 0.0 15-02:34:36 squid /usr/sbin/squid --foreground -sYC
+Ssl 116 3392652 1 1675196 93848 0.0 15-02:34:36 mariadbd /usr/sbin/mariadbd
+S 13 3392655 3392650 81776 21232 0.0 15-02:34:36 squid (squid-1) --kid squid-1 --foreground -sYC
+S 13 3392657 3392655 5572 68 0.0 15-02:34:36 log_file_daemon (logfile-daemon) /var/log/squid/access.log
+S<s 0 3393034 1 216648 7560 0.0 15-02:34:34 apache2 /usr/sbin/apache2 -k start
+Ss 33 3393037 1 3432 180 0.0 15-02:34:34 htcacheclean /usr/bin/htcacheclean -d 120 -p /var/cache/apache2/mod_cache_disk -l 300M -n
diff --git a/plugins/tests/var/ps_axwo.debian b/plugins/tests/var/ps_axwo.debian
deleted file mode 100644
index 37a2d35e..00000000
--- a/plugins/tests/var/ps_axwo.debian
+++ /dev/null
@@ -1,84 +0,0 @@
-STAT UID PID PPID VSZ RSS %CPU COMMAND COMMAND
-S 0 1 0 1504 428 0.0 init init [2]
-SN 0 2 1 0 0 0.0 ksoftirqd/0 [ksoftirqd/0]
-S< 0 3 1 0 0 0.0 events/0 [events/0]
-S< 0 4 3 0 0 0.0 khelper [khelper]
-S< 0 5 3 0 0 0.0 kacpid [kacpid]
-S< 0 38 3 0 0 0.0 kblockd/0 [kblockd/0]
-S 0 48 3 0 0 0.0 pdflush [pdflush]
-S< 0 51 3 0 0 0.0 aio/0 [aio/0]
-S 0 50 1 0 0 0.0 kswapd0 [kswapd0]
-S 0 193 1 0 0 0.0 kseriod [kseriod]
-S 0 214 1 0 0 0.0 scsi_eh_0 [scsi_eh_0]
-S 0 221 1 0 0 0.0 khubd [khubd]
-S 0 299 1 0 0 0.3 kjournald [kjournald]
-S 0 1148 1 0 0 0.0 pciehpd_event [pciehpd_event]
-S 0 1168 1 0 0 0.0 shpchpd_event [shpchpd_event]
-Ss 1 1795 1 1612 276 0.0 portmap /sbin/portmap
-Ss 0 2200 1 1652 568 0.0 vmware-guestd /usr/sbin/vmware-guestd --background /var/run/vmware-guestd.pid
-Ss 0 2209 1 2240 532 0.0 inetd /usr/sbin/inetd
-Ss 0 2319 1 3468 792 0.0 sshd /usr/sbin/sshd
-Ss 0 2323 1 2468 676 0.0 rpc.statd /sbin/rpc.statd
-Ss 1 2332 1 1684 488 0.0 atd /usr/sbin/atd
-Ss 0 2335 1 1764 636 0.0 cron /usr/sbin/cron
-Ss+ 0 2350 1 1500 348 0.0 getty /sbin/getty 38400 tty1
-Ss+ 0 2351 1 1500 348 0.0 getty /sbin/getty 38400 tty2
-Ss+ 0 2352 1 1500 348 0.0 getty /sbin/getty 38400 tty3
-Ss+ 0 2353 1 1500 348 0.0 getty /sbin/getty 38400 tty4
-Ss+ 0 2354 1 1500 348 0.0 getty /sbin/getty 38400 tty5
-Ss+ 0 2355 1 1500 348 0.0 getty /sbin/getty 38400 tty6
-S 0 6907 1 2308 892 0.0 mysqld_safe /bin/sh /usr/bin/mysqld_safe
-S 103 6944 6907 123220 27724 0.0 mysqld /usr/sbin/mysqld --basedir=/usr --datadir=/var/lib/mysql --user=mysql --pid-file=/var/run/mysqld/mysqld.pid --skip-locking --port=3306 --socket=/var/run/mysqld/mysqld.sock
-S 0 6945 6907 1488 420 0.0 logger logger -p daemon.err -t mysqld_safe -i -t mysqld
-S 1001 17778 1 6436 1588 0.0 snmpd /usr/sbin/snmpd -u nagios -Lsd -Lf /dev/null -p/var/run/snmpd.pid
-Ss 0 17789 1 9496 5556 0.0 snmptrapd /usr/sbin/snmptrapd -t -m ALL -M /usr/share/snmp/mibs:/usr/local/monitoring/snmp/load -p /var/run/snmptrapd.pid
-Ss 0 847 2319 14452 1752 0.0 sshd sshd: tonvoon [priv]
-S 1000 857 847 14616 1832 0.0 sshd sshd: tonvoon@pts/3
-Ss 1000 860 857 2984 1620 0.0 bash -bash
-S 0 868 860 2588 1428 0.0 bash -su
-S+ 1001 877 868 2652 1568 0.0 bash -su
-S 0 6086 3 0 0 0.0 pdflush [pdflush]
-Ss 0 17832 2319 14452 1752 0.0 sshd sshd: tonvoon [priv]
-S 1000 18155 17832 14620 1840 0.0 sshd sshd: tonvoon@pts/0
-Ss 1000 18156 18155 2984 1620 0.0 bash -bash
-S 0 18518 18156 2588 1428 0.0 bash -su
-S 1001 18955 18518 2672 1600 0.0 bash -su
-Ss 0 21683 2319 14452 1756 0.0 sshd sshd: tonvoon [priv]
-S 1000 21742 21683 14620 1896 0.0 sshd sshd: tonvoon@pts/1
-Ss 1000 21743 21742 2984 1620 0.0 bash -bash
-S 0 21748 21743 2592 1432 0.0 bash -su
-S 1001 21757 21748 2620 1540 0.0 bash -su
-Ss 0 2334 2319 14452 1756 0.0 sshd sshd: tonvoon [priv]
-S 1000 2343 2334 14620 1840 0.0 sshd sshd: tonvoon@pts/2
-Ss 1000 2344 2343 2984 1620 0.0 bash -bash
-S 0 2349 2344 2592 1432 0.0 bash -su
-S+ 1001 2364 2349 2620 1520 0.0 bash -su
-T 1001 2454 2364 2096 1032 0.0 vi vi configure.in.rej
-S+ 1001 8500 21757 69604 52576 0.0 opsview_web_ser /usr/bin/perl -w ./script/opsview_web_server.pl -f -d
-Ss 0 7609 2319 14452 1756 0.0 sshd sshd: tonvoon [priv]
-S 1000 7617 7609 14460 1828 0.0 sshd sshd: tonvoon@pts/4
-Ss 1000 7618 7617 2984 1620 0.0 bash -bash
-S 0 7623 7618 2592 1432 0.0 bash -su
-S+ 1001 7632 7623 2620 1528 0.0 bash -su
-Ss 1001 12678 1 20784 17728 0.0 opsviewd opsviewd
-Ss 0 832 1 14512 6360 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-S 33 842 832 14648 6596 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-S 33 843 832 14512 6504 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-S 33 844 832 14512 6476 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-S 33 845 832 14512 6476 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-S 33 846 832 14512 6476 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-Ss 7 4081 1 2464 884 0.0 lpd /usr/sbin/lpd -s
-S 33 26484 832 14512 6476 0.0 apache2 /usr/sbin/apache2 -k start -DSSL
-Ss 1001 22324 1 20252 1612 0.1 nagios ../../bin/nagios -d /usr/local/nagios/etc/nagios.cfg
-Ss 0 23336 2319 14452 1756 0.0 sshd sshd: tonvoon [priv]
-S 1000 23339 23336 14620 1840 0.0 sshd sshd: tonvoon@pts/5
-Ss 1000 23340 23339 2996 1636 0.0 bash -bash
-S 0 23367 23340 3020 1628 0.0 bash bash
-S 1001 23370 23367 3064 1748 0.0 bash bash
-Ss 1001 23783 1 3220 764 0.0 ndo2db /usr/local/nagios/bin/ndo2db -c /usr/local/nagios/etc/ndo2db.cfg
-Ss 1001 23784 1 6428 4948 0.0 import_ndologsd import_ndologsd
-S+ 1001 9803 18955 4132 1936 0.0 ssh ssh altinity@cube02.lei.altinity
-S 1001 22505 22324 20256 1616 0.0 nagios ../../bin/nagios -d /usr/local/nagios/etc/nagios.cfg
-S 1001 22506 22505 1676 608 0.0 check_ping /usr/local/libexec/check_ping -H 192.168.10.23 -w 3000.0,80% -c 5000.0,100% -p 1
-S 1001 22507 22506 1660 492 0.0 ping /bin/ping -n -U -w 10 -c 1 192.168.10.23
-R+ 1001 22508 23370 2308 680 0.0 ps ps axwo stat uid pid ppid vsz rss pcpu comm args
diff --git a/plugins/utils.c b/plugins/utils.c
index 231af92b..17dd5814 100644
--- a/plugins/utils.c
+++ b/plugins/utils.c
@@ -27,6 +27,8 @@
#include "utils_base.h"
#include <stdarg.h>
#include <limits.h>
+#include <string.h>
+#include <errno.h>
#include <arpa/inet.h>
@@ -36,9 +38,6 @@ extern const char *progname;
#define STRLEN 64
#define TXTBLK 128
-unsigned int timeout_state = STATE_CRITICAL;
-unsigned int timeout_interval = DEFAULT_SOCKET_TIMEOUT;
-
time_t start_time, end_time;
/* **************************************************************************
@@ -148,33 +147,6 @@ print_revision (const char *command_name, const char *revision)
command_name, revision, PACKAGE, VERSION);
}
-const char *
-state_text (int result)
-{
- switch (result) {
- case STATE_OK:
- return "OK";
- case STATE_WARNING:
- return "WARNING";
- case STATE_CRITICAL:
- return "CRITICAL";
- case STATE_DEPENDENT:
- return "DEPENDENT";
- default:
- return "UNKNOWN";
- }
-}
-
-void
-timeout_alarm_handler (int signo)
-{
- if (signo == SIGALRM) {
- printf (_("%s - Plugin timed out after %d seconds\n"),
- state_text(timeout_state), timeout_interval);
- exit (timeout_state);
- }
-}
-
int
is_numeric (char *number)
{
@@ -269,6 +241,46 @@ is_intnonneg (char *number)
return FALSE;
}
+/*
+ * Checks whether the number in the string _number_ can be put inside a int64_t
+ * On success the number will be written to the _target_ address, if _target_ is not set
+ * to NULL.
+ */
+int is_int64(char *number, int64_t *target) {
+ errno = 0;
+ uint64_t tmp = strtoll(number, NULL, 10);
+ if (errno != 0) {
+ return 0;
+ }
+ if (tmp < INT64_MIN || tmp > INT64_MAX) {
+ return 0;
+ }
+ if (target != NULL) {
+ *target = tmp;
+ }
+ return 1;
+}
+
+/*
+ * Checks whether the number in the string _number_ can be put inside a uint64_t
+ * On success the number will be written to the _target_ address, if _target_ is not set
+ * to NULL.
+ */
+int is_uint64(char *number, uint64_t *target) {
+ errno = 0;
+ uint64_t tmp = strtoll(number, NULL, 10);
+ if (errno != 0) {
+ return 0;
+ }
+ if (tmp < 0 || tmp > UINT64_MAX) {
+ return 0;
+ }
+ if (target != NULL) {
+ *target = tmp;
+ }
+ return 1;
+}
+
int
is_intpercent (char *number)
{
@@ -577,10 +589,94 @@ char *perfdata (const char *label,
xasprintf (&data, "%s;", data);
if (minp)
- xasprintf (&data, "%s%ld", data, minv);
+ xasprintf (&data, "%s%ld;", data, minv);
+ else
+ xasprintf (&data, "%s;", data);
if (maxp)
- xasprintf (&data, "%s;%ld", data, maxv);
+ xasprintf (&data, "%s%ld", data, maxv);
+
+ return data;
+}
+
+
+char *perfdata_uint64 (const char *label,
+ uint64_t val,
+ const char *uom,
+ int warnp, /* Warning present */
+ uint64_t warn,
+ int critp, /* Critical present */
+ uint64_t crit,
+ int minp, /* Minimum present */
+ uint64_t minv,
+ int maxp, /* Maximum present */
+ uint64_t maxv)
+{
+ char *data = NULL;
+
+ if (strpbrk (label, "'= "))
+ xasprintf (&data, "'%s'=%ld%s;", label, val, uom);
+ else
+ xasprintf (&data, "%s=%ld%s;", label, val, uom);
+
+ if (warnp)
+ xasprintf (&data, "%s%lu;", data, warn);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (critp)
+ xasprintf (&data, "%s%lu;", data, crit);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (minp)
+ xasprintf (&data, "%s%lu;", data, minv);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (maxp)
+ xasprintf (&data, "%s%lu", data, maxv);
+
+ return data;
+}
+
+
+char *perfdata_int64 (const char *label,
+ int64_t val,
+ const char *uom,
+ int warnp, /* Warning present */
+ int64_t warn,
+ int critp, /* Critical present */
+ int64_t crit,
+ int minp, /* Minimum present */
+ int64_t minv,
+ int maxp, /* Maximum present */
+ int64_t maxv)
+{
+ char *data = NULL;
+
+ if (strpbrk (label, "'= "))
+ xasprintf (&data, "'%s'=%ld%s;", label, val, uom);
+ else
+ xasprintf (&data, "%s=%ld%s;", label, val, uom);
+
+ if (warnp)
+ xasprintf (&data, "%s%ld;", data, warn);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (critp)
+ xasprintf (&data, "%s%ld;", data, crit);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (minp)
+ xasprintf (&data, "%s%ld;", data, minv);
+ else
+ xasprintf (&data, "%s;", data);
+
+ if (maxp)
+ xasprintf (&data, "%s%ld", data, maxv);
return data;
}
@@ -709,3 +805,18 @@ char *sperfdata_int (const char *label,
return data;
}
+int
+open_max (void)
+{
+ errno = 0;
+ if (maxfd > 0)
+ return(maxfd);
+
+ if ((maxfd = sysconf (_SC_OPEN_MAX)) < 0) {
+ if (errno == 0)
+ maxfd = DEFAULT_MAXFD; /* it's indeterminate */
+ else
+ die (STATE_UNKNOWN, _("sysconf error for _SC_OPEN_MAX\n"));
+ }
+ return(maxfd);
+}
diff --git a/plugins/utils.h b/plugins/utils.h
index a436e1ca..5b54da3c 100644
--- a/plugins/utils.h
+++ b/plugins/utils.h
@@ -16,6 +16,7 @@ suite of plugins. */
/* now some functions etc are being defined in ../lib/utils_base.c */
#include "utils_base.h"
+
#ifdef NP_EXTRA_OPTS
/* Include extra-opts functions if compiled in */
#include "extra_opts.h"
@@ -29,13 +30,6 @@ suite of plugins. */
void support (void);
void print_revision (const char *, const char *);
-/* Handle timeouts */
-
-extern unsigned int timeout_state;
-extern unsigned int timeout_interval;
-
-RETSIGTYPE timeout_alarm_handler (int);
-
extern time_t start_time, end_time;
/* Test input types */
@@ -45,6 +39,8 @@ int is_intpos (char *);
int is_intneg (char *);
int is_intnonneg (char *);
int is_intpercent (char *);
+int is_uint64(char *number, uint64_t *target);
+int is_int64(char *number, int64_t *target);
int is_numeric (char *);
int is_positive (char *);
@@ -89,14 +85,18 @@ void usage4(const char *) __attribute__((noreturn));
void usage5(void) __attribute__((noreturn));
void usage_va(const char *fmt, ...) __attribute__((noreturn));
-const char *state_text (int);
-
#define max(a,b) (((a)>(b))?(a):(b))
#define min(a,b) (((a)<(b))?(a):(b))
char *perfdata (const char *, long int, const char *, int, long int,
int, long int, int, long int, int, long int);
+char *perfdata_uint64 (const char *, uint64_t , const char *, int, uint64_t,
+ int, uint64_t, int, uint64_t, int, uint64_t);
+
+char *perfdata_int64 (const char *, int64_t, const char *, int, int64_t,
+ int, int64_t, int, int64_t, int, int64_t);
+
char *fperfdata (const char *, double, const char *, int, double,
int, double, int, double, int, double);
@@ -106,6 +106,8 @@ char *sperfdata (const char *, double, const char *, char *, char *,
char *sperfdata_int (const char *, int, const char *, char *, char *,
int, int, int, int);
+int open_max (void);
+
/* The idea here is that, although not every plugin will use all of these,
most will or should. Therefore, for consistency, these very common
options should have only these meanings throughout the overall suite */
diff --git a/po/de.po b/po/de.po
index 51551aef..919fae32 100644
--- a/po/de.po
+++ b/po/de.po
@@ -13,10 +13,10 @@ msgstr ""
"PO-Revision-Date: 2004-12-23 17:46+0100\n"
"Last-Translator: <>\n"
"Language-Team: English <en@li.org>\n"
-"Language: en\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=iso-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
+"Language: en\n"
"Plural-Forms: nplurals=2; plural=(n > 1);X-Generator: KBabel 1.3.1\n"
#: plugins/check_by_ssh.c:86 plugins/check_cluster.c:76 plugins/check_dig.c:88
@@ -704,7 +704,7 @@ msgstr "DNS CRITICAL - %s\n"
#: plugins/check_dns.c:254
#, fuzzy, c-format
msgid "DNS UNKNOWN - %s\n"
-msgstr "DNS UNKNOW - %s\n"
+msgstr "DNS UNKNOWN - %s\n"
#: plugins/check_dns.c:267
msgid "Note: nslookup is deprecated and may be removed from future releases."
@@ -852,7 +852,7 @@ msgstr "Konnte stderr nicht öffnen für: %s\n"
#: plugins/check_fping.c:157
#, fuzzy
msgid "FPING UNKNOWN - IP address not found\n"
-msgstr "FPING UNKNOW - %s nicht gefunden\n"
+msgstr "FPING UNKNOWN - %s nicht gefunden\n"
#: plugins/check_fping.c:160
msgid "FPING UNKNOWN - invalid commandline argument\n"
@@ -861,12 +861,12 @@ msgstr ""
#: plugins/check_fping.c:163
#, fuzzy
msgid "FPING UNKNOWN - failed system call\n"
-msgstr "FPING UNKNOW - %s nicht gefunden\n"
+msgstr "FPING UNKNOWN - %s nicht gefunden\n"
#: plugins/check_fping.c:187
#, c-format
-msgid "FPING UNKNOW - %s not found\n"
-msgstr "FPING UNKNOW - %s nicht gefunden\n"
+msgid "FPING UNKNOWN - %s not found\n"
+msgstr "FPING UNKNOWN - %s nicht gefunden\n"
#: plugins/check_fping.c:191
#, c-format
@@ -876,7 +876,7 @@ msgstr "FPING CRITICAL - %s ist nicht erreichbar\n"
#: plugins/check_fping.c:196
#, fuzzy, c-format
msgid "FPING UNKNOWN - %s parameter error\n"
-msgstr "FPING UNKNOW - %s nicht gefunden\n"
+msgstr "FPING UNKNOWN - %s nicht gefunden\n"
#: plugins/check_fping.c:200 plugins/check_fping.c:240
#, c-format
@@ -1577,7 +1577,7 @@ msgstr ""
#: plugins/check_http.c:1545
msgid ""
"other errors return STATE_UNKNOWN. Successful connects, but incorrect "
-"reponse"
+"response"
msgstr ""
#: plugins/check_http.c:1546
@@ -3636,7 +3636,7 @@ msgid ""
msgstr ""
#: plugins/check_pgsql.c:568
-msgid "a password, but no effort is made to obsure or encrypt the password."
+msgid "a password, but no effort is made to obscure or encrypt the password."
msgstr ""
#: plugins/check_pgsql.c:601
@@ -4169,7 +4169,7 @@ msgid "The user to authenticate"
msgstr ""
#: plugins/check_radius.c:352
-msgid "Password for autentication (SECURITY RISK)"
+msgid "Password for authentication (SECURITY RISK)"
msgstr ""
#: plugins/check_radius.c:354
@@ -4309,7 +4309,7 @@ msgstr ""
#: plugins/check_real.c:440
msgid ""
-"but incorrect reponse messages from the host result in STATE_WARNING return"
+"but incorrect response messages from the host result in STATE_WARNING return"
msgstr ""
#: plugins/check_real.c:441
@@ -4530,7 +4530,7 @@ msgid "STATE_CRITICAL, other errors return STATE_UNKNOWN. Successful"
msgstr ""
#: plugins/check_smtp.c:832
-msgid "connects, but incorrect reponse messages from the host result in"
+msgid "connects, but incorrect response messages from the host result in"
msgstr ""
#: plugins/check_smtp.c:833
@@ -5438,8 +5438,8 @@ msgstr ""
#: plugins/negate.c:174
msgid ""
-"Ok must be a valid state name (OK, WARNING, CRITICAL, UNKNOWN) or integer "
-"(0-3)."
+"Ok must be a valid state name (OK, WARNING, CRITICAL, UNKNOWN) or integer (0-"
+"3)."
msgstr ""
#: plugins/negate.c:180
diff --git a/po/fr.po b/po/fr.po
index d88dfe2e..e44cf88c 100644
--- a/po/fr.po
+++ b/po/fr.po
@@ -902,7 +902,7 @@ msgstr "PING INCONNU - Hôte non trouvé (%s)\n"
#: plugins/check_fping.c:187
#, c-format
-msgid "FPING UNKNOW - %s not found\n"
+msgid "FPING UNKNOWN - %s not found\n"
msgstr "PING INCONNU - Hôte non trouvé (%s)\n"
#: plugins/check_fping.c:191
@@ -1623,7 +1623,7 @@ msgstr ""
#: plugins/check_http.c:1545
msgid ""
"other errors return STATE_UNKNOWN. Successful connects, but incorrect "
-"reponse"
+"response"
msgstr ""
#: plugins/check_http.c:1546
@@ -3700,7 +3700,7 @@ msgid ""
msgstr ""
#: plugins/check_pgsql.c:568
-msgid "a password, but no effort is made to obsure or encrypt the password."
+msgid "a password, but no effort is made to obscure or encrypt the password."
msgstr ""
#: plugins/check_pgsql.c:601
@@ -4246,7 +4246,7 @@ msgid "The user to authenticate"
msgstr ""
#: plugins/check_radius.c:352
-msgid "Password for autentication (SECURITY RISK)"
+msgid "Password for authentication (SECURITY RISK)"
msgstr ""
#: plugins/check_radius.c:354
@@ -4382,7 +4382,7 @@ msgstr ""
#: plugins/check_real.c:440
msgid ""
-"but incorrect reponse messages from the host result in STATE_WARNING return"
+"but incorrect response messages from the host result in STATE_WARNING return"
msgstr ""
#: plugins/check_real.c:441
@@ -4600,7 +4600,7 @@ msgid "STATE_CRITICAL, other errors return STATE_UNKNOWN. Successful"
msgstr ""
#: plugins/check_smtp.c:832
-msgid "connects, but incorrect reponse messages from the host result in"
+msgid "connects, but incorrect response messages from the host result in"
msgstr ""
#: plugins/check_smtp.c:833
diff --git a/po/monitoring-plugins.pot b/po/monitoring-plugins.pot
index 8f220e98..5bc23637 100644
--- a/po/monitoring-plugins.pot
+++ b/po/monitoring-plugins.pot
@@ -841,7 +841,7 @@ msgstr ""
#: plugins/check_fping.c:187
#, c-format
-msgid "FPING UNKNOW - %s not found\n"
+msgid "FPING UNKNOWN - %s not found\n"
msgstr ""
#: plugins/check_fping.c:191
@@ -1528,7 +1528,7 @@ msgstr ""
#: plugins/check_http.c:1545
msgid ""
"other errors return STATE_UNKNOWN. Successful connects, but incorrect "
-"reponse"
+"response"
msgstr ""
#: plugins/check_http.c:1546
@@ -3546,7 +3546,7 @@ msgid ""
msgstr ""
#: plugins/check_pgsql.c:568
-msgid "a password, but no effort is made to obsure or encrypt the password."
+msgid "a password, but no effort is made to obscure or encrypt the password."
msgstr ""
#: plugins/check_pgsql.c:601
@@ -4070,7 +4070,7 @@ msgid "The user to authenticate"
msgstr ""
#: plugins/check_radius.c:352
-msgid "Password for autentication (SECURITY RISK)"
+msgid "Password for authentication (SECURITY RISK)"
msgstr ""
#: plugins/check_radius.c:354
@@ -4203,7 +4203,7 @@ msgstr ""
#: plugins/check_real.c:440
msgid ""
-"but incorrect reponse messages from the host result in STATE_WARNING return"
+"but incorrect response messages from the host result in STATE_WARNING return"
msgstr ""
#: plugins/check_real.c:441
@@ -4417,7 +4417,7 @@ msgid "STATE_CRITICAL, other errors return STATE_UNKNOWN. Successful"
msgstr ""
#: plugins/check_smtp.c:832
-msgid "connects, but incorrect reponse messages from the host result in"
+msgid "connects, but incorrect response messages from the host result in"
msgstr ""
#: plugins/check_smtp.c:833
diff --git a/tools/squid.conf b/tools/squid.conf
new file mode 100644
index 00000000..bed7a583
--- /dev/null
+++ b/tools/squid.conf
@@ -0,0 +1,7979 @@
+# WELCOME TO SQUID 3.5.27
+# ----------------------------
+#
+# This is the documentation for the Squid configuration file.
+# This documentation can also be found online at:
+# http://www.squid-cache.org/Doc/config/
+#
+# You may wish to look at the Squid home page and wiki for the
+# FAQ and other documentation:
+# http://www.squid-cache.org/
+# http://wiki.squid-cache.org/SquidFaq
+# http://wiki.squid-cache.org/ConfigExamples
+#
+# This documentation shows what the defaults for various directives
+# happen to be. If you don't need to change the default, you should
+# leave the line out of your squid.conf in most cases.
+#
+# In some cases "none" refers to no default setting at all,
+# while in other cases it refers to the value of the option
+# - the comments for that keyword indicate if this is the case.
+#
+
+# Configuration options can be included using the "include" directive.
+# Include takes a list of files to include. Quoting and wildcards are
+# supported.
+#
+# For example,
+#
+# include /path/to/included/file/squid.acl.config
+#
+# Includes can be nested up to a hard-coded depth of 16 levels.
+# This arbitrary restriction is to prevent recursive include references
+# from causing Squid entering an infinite loop whilst trying to load
+# configuration files.
+#
+# Values with byte units
+#
+# Squid accepts size units on some size related directives. All
+# such directives are documented with a default value displaying
+# a unit.
+#
+# Units accepted by Squid are:
+# bytes - byte
+# KB - Kilobyte (1024 bytes)
+# MB - Megabyte
+# GB - Gigabyte
+#
+# Values with spaces, quotes, and other special characters
+#
+# Squid supports directive parameters with spaces, quotes, and other
+# special characters. Surround such parameters with "double quotes". Use
+# the configuration_includes_quoted_values directive to enable or
+# disable that support.
+#
+# Squid supports reading configuration option parameters from external
+# files using the syntax:
+# parameters("/path/filename")
+# For example:
+# acl whitelist dstdomain parameters("/etc/squid/whitelist.txt")
+#
+# Conditional configuration
+#
+# If-statements can be used to make configuration directives
+# depend on conditions:
+#
+# if <CONDITION>
+# ... regular configuration directives ...
+# [else
+# ... regular configuration directives ...]
+# endif
+#
+# The else part is optional. The keywords "if", "else", and "endif"
+# must be typed on their own lines, as if they were regular
+# configuration directives.
+#
+# NOTE: An else-if condition is not supported.
+#
+# These individual conditions types are supported:
+#
+# true
+# Always evaluates to true.
+# false
+# Always evaluates to false.
+# <integer> = <integer>
+# Equality comparison of two integer numbers.
+#
+#
+# SMP-Related Macros
+#
+# The following SMP-related preprocessor macros can be used.
+#
+# ${process_name} expands to the current Squid process "name"
+# (e.g., squid1, squid2, or cache1).
+#
+# ${process_number} expands to the current Squid process
+# identifier, which is an integer number (e.g., 1, 2, 3) unique
+# across all Squid processes of the current service instance.
+#
+# ${service_name} expands into the current Squid service instance
+# name identifier which is provided by -n on the command line.
+#
+
+# TAG: broken_vary_encoding
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: cache_vary
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: error_map
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: external_refresh_check
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: location_rewrite_program
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: refresh_stale_hit
+# This option is not yet supported by Squid-3.
+#Default:
+# none
+
+# TAG: hierarchy_stoplist
+# Remove this line. Use always_direct or cache_peer_access ACLs instead if you need to prevent cache_peer use.
+#Default:
+# none
+
+# TAG: log_access
+# Remove this line. Use acls with access_log directives to control access logging
+#Default:
+# none
+
+# TAG: log_icap
+# Remove this line. Use acls with icap_log directives to control icap logging
+#Default:
+# none
+
+# TAG: ignore_ims_on_miss
+# Remove this line. The HTTP/1.1 feature is now configured by 'cache_miss_revalidate'.
+#Default:
+# none
+
+# TAG: chunked_request_body_max_size
+# Remove this line. Squid is now HTTP/1.1 compliant.
+#Default:
+# none
+
+# TAG: dns_v4_fallback
+# Remove this line. Squid performs a 'Happy Eyeballs' algorithm, the 'fallback' algorithm is no longer relevant.
+#Default:
+# none
+
+# TAG: emulate_httpd_log
+# Replace this with an access_log directive using the format 'common' or 'combined'.
+#Default:
+# none
+
+# TAG: forward_log
+# Use a regular access.log with ACL limiting it to MISS events.
+#Default:
+# none
+
+# TAG: ftp_list_width
+# Remove this line. Configure FTP page display using the CSS controls in errorpages.css instead.
+#Default:
+# none
+
+# TAG: ignore_expect_100
+# Remove this line. The HTTP/1.1 feature is now fully supported by default.
+#Default:
+# none
+
+# TAG: log_fqdn
+# Remove this option from your config. To log FQDN use %>A in the log format.
+#Default:
+# none
+
+# TAG: log_ip_on_direct
+# Remove this option from your config. To log server or peer names use %<A in the log format.
+#Default:
+# none
+
+# TAG: maximum_single_addr_tries
+# Replaced by connect_retries. The behaviour has changed, please read the documentation before altering.
+#Default:
+# none
+
+# TAG: referer_log
+# Replace this with an access_log directive using the format 'referrer'.
+#Default:
+# none
+
+# TAG: update_headers
+# Remove this line. The feature is supported by default in storage types where update is implemented.
+#Default:
+# none
+
+# TAG: url_rewrite_concurrency
+# Remove this line. Set the 'concurrency=' option of url_rewrite_children instead.
+#Default:
+# none
+
+# TAG: useragent_log
+# Replace this with an access_log directive using the format 'useragent'.
+#Default:
+# none
+
+# TAG: dns_testnames
+# Remove this line. DNS is no longer tested on startup.
+#Default:
+# none
+
+# TAG: extension_methods
+# Remove this line. All valid methods for HTTP are accepted by default.
+#Default:
+# none
+
+# TAG: zero_buffers
+#Default:
+# none
+
+# TAG: incoming_rate
+#Default:
+# none
+
+# TAG: server_http11
+# Remove this line. HTTP/1.1 is supported by default.
+#Default:
+# none
+
+# TAG: upgrade_http0.9
+# Remove this line. ICY/1.0 streaming protocol is supported by default.
+#Default:
+# none
+
+# TAG: zph_local
+# Alter these entries. Use the qos_flows directive instead.
+#Default:
+# none
+
+# TAG: header_access
+# Since squid-3.0 replace with request_header_access or reply_header_access
+# depending on whether you wish to match client requests or server replies.
+#Default:
+# none
+
+# TAG: httpd_accel_no_pmtu_disc
+# Since squid-3.0 use the 'disable-pmtu-discovery' flag on http_port instead.
+#Default:
+# none
+
+# TAG: wais_relay_host
+# Replace this line with 'cache_peer' configuration.
+#Default:
+# none
+
+# TAG: wais_relay_port
+# Replace this line with 'cache_peer' configuration.
+#Default:
+# none
+
+# OPTIONS FOR SMP
+# -----------------------------------------------------------------------------
+
+# TAG: workers
+# Number of main Squid processes or "workers" to fork and maintain.
+# 0: "no daemon" mode, like running "squid -N ..."
+# 1: "no SMP" mode, start one main Squid process daemon (default)
+# N: start N main Squid process daemons (i.e., SMP mode)
+#
+# In SMP mode, each worker does nearly all what a single Squid daemon
+# does (e.g., listen on http_port and forward HTTP requests).
+#Default:
+# SMP support disabled.
+
+# TAG: cpu_affinity_map
+# Usage: cpu_affinity_map process_numbers=P1,P2,... cores=C1,C2,...
+#
+# Sets 1:1 mapping between Squid processes and CPU cores. For example,
+#
+# cpu_affinity_map process_numbers=1,2,3,4 cores=1,3,5,7
+#
+# affects processes 1 through 4 only and places them on the first
+# four even cores, starting with core #1.
+#
+# CPU cores are numbered starting from 1. Requires support for
+# sched_getaffinity(2) and sched_setaffinity(2) system calls.
+#
+# Multiple cpu_affinity_map options are merged.
+#
+# See also: workers
+#Default:
+# Let operating system decide.
+
+# OPTIONS FOR AUTHENTICATION
+# -----------------------------------------------------------------------------
+
+# TAG: auth_param
+# This is used to define parameters for the various authentication
+# schemes supported by Squid.
+#
+# format: auth_param scheme parameter [setting]
+#
+# The order in which authentication schemes are presented to the client is
+# dependent on the order the scheme first appears in config file. IE
+# has a bug (it's not RFC 2617 compliant) in that it will use the basic
+# scheme if basic is the first entry presented, even if more secure
+# schemes are presented. For now use the order in the recommended
+# settings section below. If other browsers have difficulties (don't
+# recognize the schemes offered even if you are using basic) either
+# put basic first, or disable the other schemes (by commenting out their
+# program entry).
+#
+# Once an authentication scheme is fully configured, it can only be
+# shutdown by shutting squid down and restarting. Changes can be made on
+# the fly and activated with a reconfigure. I.E. You can change to a
+# different helper, but not unconfigure the helper completely.
+#
+# Please note that while this directive defines how Squid processes
+# authentication it does not automatically activate authentication.
+# To use authentication you must in addition make use of ACLs based
+# on login name in http_access (proxy_auth, proxy_auth_regex or
+# external with %LOGIN used in the format tag). The browser will be
+# challenged for authentication on the first such acl encountered
+# in http_access processing and will also be re-challenged for new
+# login credentials if the request is being denied by a proxy_auth
+# type acl.
+#
+# WARNING: authentication can't be used in a transparently intercepting
+# proxy as the client then thinks it is talking to an origin server and
+# not the proxy. This is a limitation of bending the TCP/IP protocol to
+# transparently intercepting port 80, not a limitation in Squid.
+# Ports flagged 'transparent', 'intercept', or 'tproxy' have
+# authentication disabled.
+#
+# === Parameters common to all schemes. ===
+#
+# "program" cmdline
+# Specifies the command for the external authenticator.
+#
+# By default, each authentication scheme is not used unless a
+# program is specified.
+#
+# See http://wiki.squid-cache.org/Features/AddonHelpers for
+# more details on helper operations and creating your own.
+#
+# "key_extras" format
+# Specifies a string to be append to request line format for
+# the authentication helper. "Quoted" format values may contain
+# spaces and logformat %macros. In theory, any logformat %macro
+# can be used. In practice, a %macro expands as a dash (-) if
+# the helper request is sent before the required macro
+# information is available to Squid.
+#
+# By default, Squid uses request formats provided in
+# scheme-specific examples below (search for %credentials).
+#
+# The expanded key_extras value is added to the Squid credentials
+# cache and, hence, will affect authentication. It can be used to
+# autenticate different users with identical user names (e.g.,
+# when user authentication depends on http_port).
+#
+# Avoid adding frequently changing information to key_extras. For
+# example, if you add user source IP, and it changes frequently
+# in your environment, then max_user_ip ACL is going to treat
+# every user+IP combination as a unique "user", breaking the ACL
+# and wasting a lot of memory on those user records. It will also
+# force users to authenticate from scratch whenever their IP
+# changes.
+#
+# "realm" string
+# Specifies the protection scope (aka realm name) which is to be
+# reported to the client for the authentication scheme. It is
+# commonly part of the text the user will see when prompted for
+# their username and password.
+#
+# For Basic the default is "Squid proxy-caching web server".
+# For Digest there is no default, this parameter is mandatory.
+# For NTLM and Negotiate this parameter is ignored.
+#
+# "children" numberofchildren [startup=N] [idle=N] [concurrency=N]
+#
+# The maximum number of authenticator processes to spawn. If
+# you start too few Squid will have to wait for them to process
+# a backlog of credential verifications, slowing it down. When
+# password verifications are done via a (slow) network you are
+# likely to need lots of authenticator processes.
+#
+# The startup= and idle= options permit some skew in the exact
+# amount run. A minimum of startup=N will begin during startup
+# and reconfigure. Squid will start more in groups of up to
+# idle=N in an attempt to meet traffic needs and to keep idle=N
+# free above those traffic needs up to the maximum.
+#
+# The concurrency= option sets the number of concurrent requests
+# the helper can process. The default of 0 is used for helpers
+# who only supports one request at a time. Setting this to a
+# number greater than 0 changes the protocol used to include a
+# channel ID field first on the request/response line, allowing
+# multiple requests to be sent to the same helper in parallel
+# without waiting for the response.
+#
+# Concurrency must not be set unless it's known the helper
+# supports the input format with channel-ID fields.
+#
+# NOTE: NTLM and Negotiate schemes do not support concurrency
+# in the Squid code module even though some helpers can.
+#
+#
+#
+# === Example Configuration ===
+#
+# This configuration displays the recommended authentication scheme
+# order from most to least secure with recommended minimum configuration
+# settings for each scheme:
+#
+##auth_param negotiate program <uncomment and complete this line to activate>
+##auth_param negotiate children 20 startup=0 idle=1
+##auth_param negotiate keep_alive on
+##
+##auth_param digest program <uncomment and complete this line to activate>
+##auth_param digest children 20 startup=0 idle=1
+##auth_param digest realm Squid proxy-caching web server
+##auth_param digest nonce_garbage_interval 5 minutes
+##auth_param digest nonce_max_duration 30 minutes
+##auth_param digest nonce_max_count 50
+##
+##auth_param ntlm program <uncomment and complete this line to activate>
+##auth_param ntlm children 20 startup=0 idle=1
+##auth_param ntlm keep_alive on
+##
+##auth_param basic program <uncomment and complete this line>
+##auth_param basic children 5 startup=5 idle=1
+##auth_param basic realm Squid proxy-caching web server
+##auth_param basic credentialsttl 2 hours
+#Default:
+# none
+
+# TAG: authenticate_cache_garbage_interval
+# The time period between garbage collection across the username cache.
+# This is a trade-off between memory utilization (long intervals - say
+# 2 days) and CPU (short intervals - say 1 minute). Only change if you
+# have good reason to.
+#Default:
+# authenticate_cache_garbage_interval 1 hour
+
+# TAG: authenticate_ttl
+# The time a user & their credentials stay in the logged in
+# user cache since their last request. When the garbage
+# interval passes, all user credentials that have passed their
+# TTL are removed from memory.
+#Default:
+# authenticate_ttl 1 hour
+
+# TAG: authenticate_ip_ttl
+# If you use proxy authentication and the 'max_user_ip' ACL,
+# this directive controls how long Squid remembers the IP
+# addresses associated with each user. Use a small value
+# (e.g., 60 seconds) if your users might change addresses
+# quickly, as is the case with dialup. You might be safe
+# using a larger value (e.g., 2 hours) in a corporate LAN
+# environment with relatively static address assignments.
+#Default:
+# authenticate_ip_ttl 1 second
+
+# ACCESS CONTROLS
+# -----------------------------------------------------------------------------
+
+# TAG: external_acl_type
+# This option defines external acl classes using a helper program
+# to look up the status
+#
+# external_acl_type name [options] FORMAT.. /path/to/helper [helper arguments..]
+#
+# Options:
+#
+# ttl=n TTL in seconds for cached results (defaults to 3600
+# for 1 hour)
+#
+# negative_ttl=n
+# TTL for cached negative lookups (default same
+# as ttl)
+#
+# grace=n Percentage remaining of TTL where a refresh of a
+# cached entry should be initiated without needing to
+# wait for a new reply. (default is for no grace period)
+#
+# cache=n The maximum number of entries in the result cache. The
+# default limit is 262144 entries. Each cache entry usually
+# consumes at least 256 bytes. Squid currently does not remove
+# expired cache entries until the limit is reached, so a proxy
+# will sooner or later reach the limit. The expanded FORMAT
+# value is used as the cache key, so if the details in FORMAT
+# are highly variable, a larger cache may be needed to produce
+# reduction in helper load.
+#
+# children-max=n
+# Maximum number of acl helper processes spawned to service
+# external acl lookups of this type. (default 5)
+#
+# children-startup=n
+# Minimum number of acl helper processes to spawn during
+# startup and reconfigure to service external acl lookups
+# of this type. (default 0)
+#
+# children-idle=n
+# Number of acl helper processes to keep ahead of traffic
+# loads. Squid will spawn this many at once whenever load
+# rises above the capabilities of existing processes.
+# Up to the value of children-max. (default 1)
+#
+# concurrency=n concurrency level per process. Only used with helpers
+# capable of processing more than one query at a time.
+#
+# protocol=2.5 Compatibility mode for Squid-2.5 external acl helpers.
+#
+# ipv4 / ipv6 IP protocol used to communicate with this helper.
+# The default is to auto-detect IPv6 and use it when available.
+#
+#
+# FORMAT specifications
+#
+# %LOGIN Authenticated user login name
+# %un A user name. Expands to the first available name
+# from the following list of information sources:
+# - authenticated user name, like %ul or %LOGIN
+# - user name sent by an external ACL, like %EXT_USER
+# - SSL client name, like %us in logformat
+# - ident user name, like %ui in logformat
+# %EXT_USER Username from previous external acl
+# %EXT_LOG Log details from previous external acl
+# %EXT_TAG Tag from previous external acl
+# %IDENT Ident user name
+# %SRC Client IP
+# %SRCPORT Client source port
+# %URI Requested URI
+# %DST Requested host
+# %PROTO Requested URL scheme
+# %PORT Requested port
+# %PATH Requested URL path
+# %METHOD Request method
+# %MYADDR Squid interface address
+# %MYPORT Squid http_port number
+# %PATH Requested URL-path (including query-string if any)
+# %USER_CERT SSL User certificate in PEM format
+# %USER_CERTCHAIN SSL User certificate chain in PEM format
+# %USER_CERT_xx SSL User certificate subject attribute xx
+# %USER_CA_CERT_xx SSL User certificate issuer attribute xx
+# %ssl::>sni SSL client SNI sent to Squid
+# %ssl::<cert_subject SSL server certificate DN
+# %ssl::<cert_issuer SSL server certificate issuer DN
+#
+# %>{Header} HTTP request header "Header"
+# %>{Hdr:member}
+# HTTP request header "Hdr" list member "member"
+# %>{Hdr:;member}
+# HTTP request header list member using ; as
+# list separator. ; can be any non-alphanumeric
+# character.
+#
+# %<{Header} HTTP reply header "Header"
+# %<{Hdr:member}
+# HTTP reply header "Hdr" list member "member"
+# %<{Hdr:;member}
+# HTTP reply header list member using ; as
+# list separator. ; can be any non-alphanumeric
+# character.
+#
+# %ACL The name of the ACL being tested.
+# %DATA The ACL arguments. If not used then any arguments
+# is automatically added at the end of the line
+# sent to the helper.
+# NOTE: this will encode the arguments as one token,
+# whereas the default will pass each separately.
+#
+# %% The percent sign. Useful for helpers which need
+# an unchanging input format.
+#
+#
+# General request syntax:
+#
+# [channel-ID] FORMAT-values [acl-values ...]
+#
+#
+# FORMAT-values consists of transaction details expanded with
+# whitespace separation per the config file FORMAT specification
+# using the FORMAT macros listed above.
+#
+# acl-values consists of any string specified in the referencing
+# config 'acl ... external' line. see the "acl external" directive.
+#
+# Request values sent to the helper are URL escaped to protect
+# each value in requests against whitespaces.
+#
+# If using protocol=2.5 then the request sent to the helper is not
+# URL escaped to protect against whitespace.
+#
+# NOTE: protocol=3.0 is deprecated as no longer necessary.
+#
+# When using the concurrency= option the protocol is changed by
+# introducing a query channel tag in front of the request/response.
+# The query channel tag is a number between 0 and concurrency-1.
+# This value must be echoed back unchanged to Squid as the first part
+# of the response relating to its request.
+#
+#
+# The helper receives lines expanded per the above format specification
+# and for each input line returns 1 line starting with OK/ERR/BH result
+# code and optionally followed by additional keywords with more details.
+#
+#
+# General result syntax:
+#
+# [channel-ID] result keyword=value ...
+#
+# Result consists of one of the codes:
+#
+# OK
+# the ACL test produced a match.
+#
+# ERR
+# the ACL test does not produce a match.
+#
+# BH
+# An internal error occurred in the helper, preventing
+# a result being identified.
+#
+# The meaning of 'a match' is determined by your squid.conf
+# access control configuration. See the Squid wiki for details.
+#
+# Defined keywords:
+#
+# user= The users name (login)
+#
+# password= The users password (for login= cache_peer option)
+#
+# message= Message describing the reason for this response.
+# Available as %o in error pages.
+# Useful on (ERR and BH results).
+#
+# tag= Apply a tag to a request. Only sets a tag once,
+# does not alter existing tags.
+#
+# log= String to be logged in access.log. Available as
+# %ea in logformat specifications.
+#
+# clt_conn_tag= Associates a TAG with the client TCP connection.
+# Please see url_rewrite_program related documentation
+# for this kv-pair.
+#
+# Any keywords may be sent on any response whether OK, ERR or BH.
+#
+# All response keyword values need to be a single token with URL
+# escaping, or enclosed in double quotes (") and escaped using \ on
+# any double quotes or \ characters within the value. The wrapping
+# double quotes are removed before the value is interpreted by Squid.
+# \r and \n are also replace by CR and LF.
+#
+# Some example key values:
+#
+# user=John%20Smith
+# user="John Smith"
+# user="J. \"Bob\" Smith"
+#Default:
+# none
+
+# TAG: acl
+# Defining an Access List
+#
+# Every access list definition must begin with an aclname and acltype,
+# followed by either type-specific arguments or a quoted filename that
+# they are read from.
+#
+# acl aclname acltype argument ...
+# acl aclname acltype "file" ...
+#
+# When using "file", the file should contain one item per line.
+#
+# Some acl types supports options which changes their default behaviour.
+# The available options are:
+#
+# -i,+i By default, regular expressions are CASE-SENSITIVE. To make them
+# case-insensitive, use the -i option. To return case-sensitive
+# use the +i option between patterns, or make a new ACL line
+# without -i.
+#
+# -n Disable lookups and address type conversions. If lookup or
+# conversion is required because the parameter type (IP or
+# domain name) does not match the message address type (domain
+# name or IP), then the ACL would immediately declare a mismatch
+# without any warnings or lookups.
+#
+# -- Used to stop processing all options, in the case the first acl
+# value has '-' character as first character (for example the '-'
+# is a valid domain name)
+#
+# Some acl types require suspending the current request in order
+# to access some external data source.
+# Those which do are marked with the tag [slow], those which
+# don't are marked as [fast].
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl
+# for further information
+#
+# ***** ACL TYPES AVAILABLE *****
+#
+# acl aclname src ip-address/mask ... # clients IP address [fast]
+# acl aclname src addr1-addr2/mask ... # range of addresses [fast]
+# acl aclname dst [-n] ip-address/mask ... # URL host's IP address [slow]
+# acl aclname localip ip-address/mask ... # IP address the client connected to [fast]
+#
+# acl aclname arp mac-address ... (xx:xx:xx:xx:xx:xx notation)
+# # [fast]
+# # The 'arp' ACL code is not portable to all operating systems.
+# # It works on Linux, Solaris, Windows, FreeBSD, and some other
+# # BSD variants.
+# #
+# # NOTE: Squid can only determine the MAC/EUI address for IPv4
+# # clients that are on the same subnet. If the client is on a
+# # different subnet, then Squid cannot find out its address.
+# #
+# # NOTE 2: IPv6 protocol does not contain ARP. MAC/EUI is either
+# # encoded directly in the IPv6 address or not available.
+#
+# acl aclname srcdomain .foo.com ...
+# # reverse lookup, from client IP [slow]
+# acl aclname dstdomain [-n] .foo.com ...
+# # Destination server from URL [fast]
+# acl aclname srcdom_regex [-i] \.foo\.com ...
+# # regex matching client name [slow]
+# acl aclname dstdom_regex [-n] [-i] \.foo\.com ...
+# # regex matching server [fast]
+# #
+# # For dstdomain and dstdom_regex a reverse lookup is tried if a IP
+# # based URL is used and no match is found. The name "none" is used
+# # if the reverse lookup fails.
+#
+# acl aclname src_as number ...
+# acl aclname dst_as number ...
+# # [fast]
+# # Except for access control, AS numbers can be used for
+# # routing of requests to specific caches. Here's an
+# # example for routing all requests for AS#1241 and only
+# # those to mycache.mydomain.net:
+# # acl asexample dst_as 1241
+# # cache_peer_access mycache.mydomain.net allow asexample
+# # cache_peer_access mycache_mydomain.net deny all
+#
+# acl aclname peername myPeer ...
+# # [fast]
+# # match against a named cache_peer entry
+# # set unique name= on cache_peer lines for reliable use.
+#
+# acl aclname time [day-abbrevs] [h1:m1-h2:m2]
+# # [fast]
+# # day-abbrevs:
+# # S - Sunday
+# # M - Monday
+# # T - Tuesday
+# # W - Wednesday
+# # H - Thursday
+# # F - Friday
+# # A - Saturday
+# # h1:m1 must be less than h2:m2
+#
+# acl aclname url_regex [-i] ^http:// ...
+# # regex matching on whole URL [fast]
+# acl aclname urllogin [-i] [^a-zA-Z0-9] ...
+# # regex matching on URL login field
+# acl aclname urlpath_regex [-i] \.gif$ ...
+# # regex matching on URL path [fast]
+#
+# acl aclname port 80 70 21 0-1024... # destination TCP port [fast]
+# # ranges are alloed
+# acl aclname localport 3128 ... # TCP port the client connected to [fast]
+# # NP: for interception mode this is usually '80'
+#
+# acl aclname myportname 3128 ... # *_port name [fast]
+#
+# acl aclname proto HTTP FTP ... # request protocol [fast]
+#
+# acl aclname method GET POST ... # HTTP request method [fast]
+#
+# acl aclname http_status 200 301 500- 400-403 ...
+# # status code in reply [fast]
+#
+# acl aclname browser [-i] regexp ...
+# # pattern match on User-Agent header (see also req_header below) [fast]
+#
+# acl aclname referer_regex [-i] regexp ...
+# # pattern match on Referer header [fast]
+# # Referer is highly unreliable, so use with care
+#
+# acl aclname ident username ...
+# acl aclname ident_regex [-i] pattern ...
+# # string match on ident output [slow]
+# # use REQUIRED to accept any non-null ident.
+#
+# acl aclname proxy_auth [-i] username ...
+# acl aclname proxy_auth_regex [-i] pattern ...
+# # perform http authentication challenge to the client and match against
+# # supplied credentials [slow]
+# #
+# # takes a list of allowed usernames.
+# # use REQUIRED to accept any valid username.
+# #
+# # Will use proxy authentication in forward-proxy scenarios, and plain
+# # http authenticaiton in reverse-proxy scenarios
+# #
+# # NOTE: when a Proxy-Authentication header is sent but it is not
+# # needed during ACL checking the username is NOT logged
+# # in access.log.
+# #
+# # NOTE: proxy_auth requires a EXTERNAL authentication program
+# # to check username/password combinations (see
+# # auth_param directive).
+# #
+# # NOTE: proxy_auth can't be used in a transparent/intercepting proxy
+# # as the browser needs to be configured for using a proxy in order
+# # to respond to proxy authentication.
+#
+# acl aclname snmp_community string ...
+# # A community string to limit access to your SNMP Agent [fast]
+# # Example:
+# #
+# # acl snmppublic snmp_community public
+#
+# acl aclname maxconn number
+# # This will be matched when the client's IP address has
+# # more than <number> TCP connections established. [fast]
+# # NOTE: This only measures direct TCP links so X-Forwarded-For
+# # indirect clients are not counted.
+#
+# acl aclname max_user_ip [-s] number
+# # This will be matched when the user attempts to log in from more
+# # than <number> different ip addresses. The authenticate_ip_ttl
+# # parameter controls the timeout on the ip entries. [fast]
+# # If -s is specified the limit is strict, denying browsing
+# # from any further IP addresses until the ttl has expired. Without
+# # -s Squid will just annoy the user by "randomly" denying requests.
+# # (the counter is reset each time the limit is reached and a
+# # request is denied)
+# # NOTE: in acceleration mode or where there is mesh of child proxies,
+# # clients may appear to come from multiple addresses if they are
+# # going through proxy farms, so a limit of 1 may cause user problems.
+#
+# acl aclname random probability
+# # Pseudo-randomly match requests. Based on the probability given.
+# # Probability may be written as a decimal (0.333), fraction (1/3)
+# # or ratio of matches:non-matches (3:5).
+#
+# acl aclname req_mime_type [-i] mime-type ...
+# # regex match against the mime type of the request generated
+# # by the client. Can be used to detect file upload or some
+# # types HTTP tunneling requests [fast]
+# # NOTE: This does NOT match the reply. You cannot use this
+# # to match the returned file type.
+#
+# acl aclname req_header header-name [-i] any\.regex\.here
+# # regex match against any of the known request headers. May be
+# # thought of as a superset of "browser", "referer" and "mime-type"
+# # ACL [fast]
+#
+# acl aclname rep_mime_type [-i] mime-type ...
+# # regex match against the mime type of the reply received by
+# # squid. Can be used to detect file download or some
+# # types HTTP tunneling requests. [fast]
+# # NOTE: This has no effect in http_access rules. It only has
+# # effect in rules that affect the reply data stream such as
+# # http_reply_access.
+#
+# acl aclname rep_header header-name [-i] any\.regex\.here
+# # regex match against any of the known reply headers. May be
+# # thought of as a superset of "browser", "referer" and "mime-type"
+# # ACLs [fast]
+#
+# acl aclname external class_name [arguments...]
+# # external ACL lookup via a helper class defined by the
+# # external_acl_type directive [slow]
+#
+# acl aclname user_cert attribute values...
+# # match against attributes in a user SSL certificate
+# # attribute is one of DN/C/O/CN/L/ST or a numerical OID [fast]
+#
+# acl aclname ca_cert attribute values...
+# # match against attributes a users issuing CA SSL certificate
+# # attribute is one of DN/C/O/CN/L/ST or a numerical OID [fast]
+#
+# acl aclname ext_user username ...
+# acl aclname ext_user_regex [-i] pattern ...
+# # string match on username returned by external acl helper [slow]
+# # use REQUIRED to accept any non-null user name.
+#
+# acl aclname tag tagvalue ...
+# # string match on tag returned by external acl helper [fast]
+# # DEPRECATED. Only the first tag will match with this ACL.
+# # Use the 'note' ACL instead for handling multiple tag values.
+#
+# acl aclname hier_code codename ...
+# # string match against squid hierarchy code(s); [fast]
+# # e.g., DIRECT, PARENT_HIT, NONE, etc.
+# #
+# # NOTE: This has no effect in http_access rules. It only has
+# # effect in rules that affect the reply data stream such as
+# # http_reply_access.
+#
+# acl aclname note name [value ...]
+# # match transaction annotation [fast]
+# # Without values, matches any annotation with a given name.
+# # With value(s), matches any annotation with a given name that
+# # also has one of the given values.
+# # Names and values are compared using a string equality test.
+# # Annotation sources include note and adaptation_meta directives
+# # as well as helper and eCAP responses.
+#
+# acl aclname adaptation_service service ...
+# # Matches the name of any icap_service, ecap_service,
+# # adaptation_service_set, or adaptation_service_chain that Squid
+# # has used (or attempted to use) for the master transaction.
+# # This ACL must be defined after the corresponding adaptation
+# # service is named in squid.conf. This ACL is usable with
+# # adaptation_meta because it starts matching immediately after
+# # the service has been selected for adaptation.
+#
+# acl aclname any-of acl1 acl2 ...
+# # match any one of the acls [fast or slow]
+# # The first matching ACL stops further ACL evaluation.
+# #
+# # ACLs from multiple any-of lines with the same name are ORed.
+# # For example, A = (a1 or a2) or (a3 or a4) can be written as
+# # acl A any-of a1 a2
+# # acl A any-of a3 a4
+# #
+# # This group ACL is fast if all evaluated ACLs in the group are fast
+# # and slow otherwise.
+#
+# acl aclname all-of acl1 acl2 ...
+# # match all of the acls [fast or slow]
+# # The first mismatching ACL stops further ACL evaluation.
+# #
+# # ACLs from multiple all-of lines with the same name are ORed.
+# # For example, B = (b1 and b2) or (b3 and b4) can be written as
+# # acl B all-of b1 b2
+# # acl B all-of b3 b4
+# #
+# # This group ACL is fast if all evaluated ACLs in the group are fast
+# # and slow otherwise.
+#
+# Examples:
+# acl macaddress arp 09:00:2b:23:45:67
+# acl myexample dst_as 1241
+# acl password proxy_auth REQUIRED
+# acl fileupload req_mime_type -i ^multipart/form-data$
+# acl javascript rep_mime_type -i ^application/x-javascript$
+#
+#Default:
+# ACLs all, manager, localhost, and to_localhost are predefined.
+#
+#
+# Recommended minimum configuration:
+#
+
+# Example rule allowing access from your local networks.
+# Adapt to list your (internal) IP networks from where browsing
+# should be allowed
+acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
+acl localnet src 172.16.0.0/12 # RFC1918 possible internal network
+acl localnet src 192.168.0.0/16 # RFC1918 possible internal network
+acl localnet src fc00::/7 # RFC 4193 local private network range
+acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
+
+acl SSL_ports port 443
+acl Safe_ports port 80 # http
+acl Safe_ports port 21 # ftp
+acl Safe_ports port 443 # https
+acl Safe_ports port 70 # gopher
+acl Safe_ports port 210 # wais
+acl Safe_ports port 1025-65535 # unregistered ports
+acl Safe_ports port 280 # http-mgmt
+acl Safe_ports port 488 # gss-http
+acl Safe_ports port 591 # filemaker
+acl Safe_ports port 777 # multiling http
+acl CONNECT method CONNECT
+
+# TAG: proxy_protocol_access
+# Determine which client proxies can be trusted to provide correct
+# information regarding real client IP address using PROXY protocol.
+#
+# Requests may pass through a chain of several other proxies
+# before reaching us. The original source details may by sent in:
+# * HTTP message Forwarded header, or
+# * HTTP message X-Forwarded-For header, or
+# * PROXY protocol connection header.
+#
+# This directive is solely for validating new PROXY protocol
+# connections received from a port flagged with require-proxy-header.
+# It is checked only once after TCP connection setup.
+#
+# A deny match results in TCP connection closure.
+#
+# An allow match is required for Squid to permit the corresponding
+# TCP connection, before Squid even looks for HTTP request headers.
+# If there is an allow match, Squid starts using PROXY header information
+# to determine the source address of the connection for all future ACL
+# checks, logging, etc.
+#
+# SECURITY CONSIDERATIONS:
+#
+# Any host from which we accept client IP details can place
+# incorrect information in the relevant header, and Squid
+# will use the incorrect information as if it were the
+# source address of the request. This may enable remote
+# hosts to bypass any access control restrictions that are
+# based on the client's source addresses.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# all TCP connections to ports with require-proxy-header will be denied
+
+# TAG: follow_x_forwarded_for
+# Determine which client proxies can be trusted to provide correct
+# information regarding real client IP address.
+#
+# Requests may pass through a chain of several other proxies
+# before reaching us. The original source details may by sent in:
+# * HTTP message Forwarded header, or
+# * HTTP message X-Forwarded-For header, or
+# * PROXY protocol connection header.
+#
+# PROXY protocol connections are controlled by the proxy_protocol_access
+# directive which is checked before this.
+#
+# If a request reaches us from a source that is allowed by this
+# directive, then we trust the information it provides regarding
+# the IP of the client it received from (if any).
+#
+# For the purpose of ACLs used in this directive the src ACL type always
+# matches the address we are testing and srcdomain matches its rDNS.
+#
+# On each HTTP request Squid checks for X-Forwarded-For header fields.
+# If found the header values are iterated in reverse order and an allow
+# match is required for Squid to continue on to the next value.
+# The verification ends when a value receives a deny match, cannot be
+# tested, or there are no more values to test.
+# NOTE: Squid does not yet follow the Forwarded HTTP header.
+#
+# The end result of this process is an IP address that we will
+# refer to as the indirect client address. This address may
+# be treated as the client address for access control, ICAP, delay
+# pools and logging, depending on the acl_uses_indirect_client,
+# icap_uses_indirect_client, delay_pool_uses_indirect_client,
+# log_uses_indirect_client and tproxy_uses_indirect_client options.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+# SECURITY CONSIDERATIONS:
+#
+# Any host from which we accept client IP details can place
+# incorrect information in the relevant header, and Squid
+# will use the incorrect information as if it were the
+# source address of the request. This may enable remote
+# hosts to bypass any access control restrictions that are
+# based on the client's source addresses.
+#
+# For example:
+#
+# acl localhost src 127.0.0.1
+# acl my_other_proxy srcdomain .proxy.example.com
+# follow_x_forwarded_for allow localhost
+# follow_x_forwarded_for allow my_other_proxy
+#Default:
+# X-Forwarded-For header will be ignored.
+
+# TAG: acl_uses_indirect_client on|off
+# Controls whether the indirect client address
+# (see follow_x_forwarded_for) is used instead of the
+# direct client address in acl matching.
+#
+# NOTE: maxconn ACL considers direct TCP links and indirect
+# clients will always have zero. So no match.
+#Default:
+# acl_uses_indirect_client on
+
+# TAG: delay_pool_uses_indirect_client on|off
+# Controls whether the indirect client address
+# (see follow_x_forwarded_for) is used instead of the
+# direct client address in delay pools.
+#Default:
+# delay_pool_uses_indirect_client on
+
+# TAG: log_uses_indirect_client on|off
+# Controls whether the indirect client address
+# (see follow_x_forwarded_for) is used instead of the
+# direct client address in the access log.
+#Default:
+# log_uses_indirect_client on
+
+# TAG: tproxy_uses_indirect_client on|off
+# Controls whether the indirect client address
+# (see follow_x_forwarded_for) is used instead of the
+# direct client address when spoofing the outgoing client.
+#
+# This has no effect on requests arriving in non-tproxy
+# mode ports.
+#
+# SECURITY WARNING: Usage of this option is dangerous
+# and should not be used trivially. Correct configuration
+# of follow_x_forwarded_for with a limited set of trusted
+# sources is required to prevent abuse of your proxy.
+#Default:
+# tproxy_uses_indirect_client off
+
+# TAG: spoof_client_ip
+# Control client IP address spoofing of TPROXY traffic based on
+# defined access lists.
+#
+# spoof_client_ip allow|deny [!]aclname ...
+#
+# If there are no "spoof_client_ip" lines present, the default
+# is to "allow" spoofing of any suitable request.
+#
+# Note that the cache_peer "no-tproxy" option overrides this ACL.
+#
+# This clause supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow spoofing on all TPROXY traffic.
+
+# TAG: http_access
+# Allowing or Denying access based on defined access lists
+#
+# To allow or deny a message received on an HTTP, HTTPS, or FTP port:
+# http_access allow|deny [!]aclname ...
+#
+# NOTE on default values:
+#
+# If there are no "access" lines present, the default is to deny
+# the request.
+#
+# If none of the "access" lines cause a match, the default is the
+# opposite of the last line in the list. If the last line was
+# deny, the default is allow. Conversely, if the last line
+# is allow, the default will be deny. For these reasons, it is a
+# good idea to have an "deny all" entry at the end of your access
+# lists to avoid potential confusion.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+#Default:
+# Deny, unless rules exist in squid.conf.
+#
+
+#
+# Recommended minimum Access Permission configuration:
+#
+# Deny requests to certain unsafe ports
+http_access deny !Safe_ports
+
+# Deny CONNECT to other than secure SSL ports
+http_access deny CONNECT !SSL_ports
+
+# Only allow cachemgr access from localhost
+http_access allow localhost manager
+http_access deny manager
+
+# We strongly recommend the following be uncommented to protect innocent
+# web applications running on the proxy server who think the only
+# one who can access services on "localhost" is a local user
+#http_access deny to_localhost
+
+#
+# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
+#
+
+# Example rule allowing access from your local networks.
+# Adapt localnet in the ACL section to list your (internal) IP networks
+# from where browsing should be allowed
+http_access allow localnet
+http_access allow localhost
+
+# And finally deny all other access to this proxy
+http_access deny all
+
+# TAG: adapted_http_access
+# Allowing or Denying access based on defined access lists
+#
+# Essentially identical to http_access, but runs after redirectors
+# and ICAP/eCAP adaptation. Allowing access control based on their
+# output.
+#
+# If not set then only http_access is used.
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: http_reply_access
+# Allow replies to client requests. This is complementary to http_access.
+#
+# http_reply_access allow|deny [!] aclname ...
+#
+# NOTE: if there are no access lines present, the default is to allow
+# all replies.
+#
+# If none of the access lines cause a match the opposite of the
+# last line will apply. Thus it is good practice to end the rules
+# with an "allow all" or "deny all" entry.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: icp_access
+# Allowing or Denying access to the ICP port based on defined
+# access lists
+#
+# icp_access allow|deny [!]aclname ...
+#
+# NOTE: The default if no icp_access lines are present is to
+# deny all traffic. This default may cause problems with peers
+# using ICP.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+## Allow ICP queries from local networks only
+##icp_access allow localnet
+##icp_access deny all
+#Default:
+# Deny, unless rules exist in squid.conf.
+
+# TAG: htcp_access
+# Allowing or Denying access to the HTCP port based on defined
+# access lists
+#
+# htcp_access allow|deny [!]aclname ...
+#
+# See also htcp_clr_access for details on access control for
+# cache purge (CLR) HTCP messages.
+#
+# NOTE: The default if no htcp_access lines are present is to
+# deny all traffic. This default may cause problems with peers
+# using the htcp option.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+## Allow HTCP queries from local networks only
+##htcp_access allow localnet
+##htcp_access deny all
+#Default:
+# Deny, unless rules exist in squid.conf.
+
+# TAG: htcp_clr_access
+# Allowing or Denying access to purge content using HTCP based
+# on defined access lists.
+# See htcp_access for details on general HTCP access control.
+#
+# htcp_clr_access allow|deny [!]aclname ...
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+## Allow HTCP CLR requests from trusted peers
+#acl htcp_clr_peer src 192.0.2.2 2001:DB8::2
+#htcp_clr_access allow htcp_clr_peer
+#htcp_clr_access deny all
+#Default:
+# Deny, unless rules exist in squid.conf.
+
+# TAG: miss_access
+# Determines whether network access is permitted when satisfying a request.
+#
+# For example;
+# to force your neighbors to use you as a sibling instead of
+# a parent.
+#
+# acl localclients src 192.0.2.0/24 2001:DB8::a:0/64
+# miss_access deny !localclients
+# miss_access allow all
+#
+# This means only your local clients are allowed to fetch relayed/MISS
+# replies from the network and all other clients can only fetch cached
+# objects (HITs).
+#
+# The default for this setting allows all clients who passed the
+# http_access rules to relay via this proxy.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: ident_lookup_access
+# A list of ACL elements which, if matched, cause an ident
+# (RFC 931) lookup to be performed for this request. For
+# example, you might choose to always perform ident lookups
+# for your main multi-user Unix boxes, but not for your Macs
+# and PCs. By default, ident lookups are not performed for
+# any requests.
+#
+# To enable ident lookups for specific client addresses, you
+# can follow this example:
+#
+# acl ident_aware_hosts src 198.168.1.0/24
+# ident_lookup_access allow ident_aware_hosts
+# ident_lookup_access deny all
+#
+# Only src type ACL checks are fully supported. A srcdomain
+# ACL might work at times, but it will not always provide
+# the correct result.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Unless rules exist in squid.conf, IDENT is not fetched.
+
+# TAG: reply_body_max_size size [acl acl...]
+# This option specifies the maximum size of a reply body. It can be
+# used to prevent users from downloading very large files, such as
+# MP3's and movies. When the reply headers are received, the
+# reply_body_max_size lines are processed, and the first line where
+# all (if any) listed ACLs are true is used as the maximum body size
+# for this reply.
+#
+# This size is checked twice. First when we get the reply headers,
+# we check the content-length value. If the content length value exists
+# and is larger than the allowed size, the request is denied and the
+# user receives an error message that says "the request or reply
+# is too large." If there is no content-length, and the reply
+# size exceeds this limit, the client's connection is just closed
+# and they will receive a partial reply.
+#
+# WARNING: downstream caches probably can not detect a partial reply
+# if there is no content-length header, so they will cache
+# partial responses and give them out as hits. You should NOT
+# use this option if you have downstream caches.
+#
+# WARNING: A maximum size smaller than the size of squid's error messages
+# will cause an infinite loop and crash squid. Ensure that the smallest
+# non-zero value you use is greater that the maximum header size plus
+# the size of your largest error page.
+#
+# If you set this parameter none (the default), there will be
+# no limit imposed.
+#
+# Configuration Format is:
+# reply_body_max_size SIZE UNITS [acl ...]
+# ie.
+# reply_body_max_size 10 MB
+#
+#Default:
+# No limit is applied.
+
+# NETWORK OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: http_port
+# Usage: port [mode] [options]
+# hostname:port [mode] [options]
+# 1.2.3.4:port [mode] [options]
+#
+# The socket addresses where Squid will listen for HTTP client
+# requests. You may specify multiple socket addresses.
+# There are three forms: port alone, hostname with port, and
+# IP address with port. If you specify a hostname or IP
+# address, Squid binds the socket to that specific
+# address. Most likely, you do not need to bind to a specific
+# address, so you can use the port number alone.
+#
+# If you are running Squid in accelerator mode, you
+# probably want to listen on port 80 also, or instead.
+#
+# The -a command line option may be used to specify additional
+# port(s) where Squid listens for proxy request. Such ports will
+# be plain proxy ports with no options.
+#
+# You may specify multiple socket addresses on multiple lines.
+#
+# Modes:
+#
+# intercept Support for IP-Layer NAT interception delivering
+# traffic to this Squid port.
+# NP: disables authentication on the port.
+#
+# tproxy Support Linux TPROXY (or BSD divert-to) with spoofing
+# of outgoing connections using the client IP address.
+# NP: disables authentication on the port.
+#
+# accel Accelerator / reverse proxy mode
+#
+# ssl-bump For each CONNECT request allowed by ssl_bump ACLs,
+# establish secure connection with the client and with
+# the server, decrypt HTTPS messages as they pass through
+# Squid, and treat them as unencrypted HTTP messages,
+# becoming the man-in-the-middle.
+#
+# The ssl_bump option is required to fully enable
+# bumping of CONNECT requests.
+#
+# Omitting the mode flag causes default forward proxy mode to be used.
+#
+#
+# Accelerator Mode Options:
+#
+# defaultsite=domainname
+# What to use for the Host: header if it is not present
+# in a request. Determines what site (not origin server)
+# accelerators should consider the default.
+#
+# no-vhost Disable using HTTP/1.1 Host header for virtual domain support.
+#
+# protocol= Protocol to reconstruct accelerated and intercepted
+# requests with. Defaults to HTTP/1.1 for http_port and
+# HTTPS/1.1 for https_port.
+# When an unsupported value is configured Squid will
+# produce a FATAL error.
+# Values: HTTP or HTTP/1.1, HTTPS or HTTPS/1.1
+#
+# vport Virtual host port support. Using the http_port number
+# instead of the port passed on Host: headers.
+#
+# vport=NN Virtual host port support. Using the specified port
+# number instead of the port passed on Host: headers.
+#
+# act-as-origin
+# Act as if this Squid is the origin server.
+# This currently means generate new Date: and Expires:
+# headers on HIT instead of adding Age:.
+#
+# ignore-cc Ignore request Cache-Control headers.
+#
+# WARNING: This option violates HTTP specifications if
+# used in non-accelerator setups.
+#
+# allow-direct Allow direct forwarding in accelerator mode. Normally
+# accelerated requests are denied direct forwarding as if
+# never_direct was used.
+#
+# WARNING: this option opens accelerator mode to security
+# vulnerabilities usually only affecting in interception
+# mode. Make sure to protect forwarding with suitable
+# http_access rules when using this.
+#
+#
+# SSL Bump Mode Options:
+# In addition to these options ssl-bump requires TLS/SSL options.
+#
+# generate-host-certificates[=<on|off>]
+# Dynamically create SSL server certificates for the
+# destination hosts of bumped CONNECT requests.When
+# enabled, the cert and key options are used to sign
+# generated certificates. Otherwise generated
+# certificate will be selfsigned.
+# If there is a CA certificate lifetime of the generated
+# certificate equals lifetime of the CA certificate. If
+# generated certificate is selfsigned lifetime is three
+# years.
+# This option is disabled by default. See the ssl-bump
+# option above for more information.
+#
+# dynamic_cert_mem_cache_size=SIZE
+# Approximate total RAM size spent on cached generated
+# certificates. If set to zero, caching is disabled.
+#
+# TLS / SSL Options:
+#
+# cert= Path to SSL certificate (PEM format).
+#
+# key= Path to SSL private key file (PEM format)
+# if not specified, the certificate file is
+# assumed to be a combined certificate and
+# key file.
+#
+# version= The version of SSL/TLS supported
+# 1 automatic (default)
+# 2 SSLv2 only
+# 3 SSLv3 only
+# 4 TLSv1.0 only
+# 5 TLSv1.1 only
+# 6 TLSv1.2 only
+#
+# cipher= Colon separated list of supported ciphers.
+# NOTE: some ciphers such as EDH ciphers depend on
+# additional settings. If those settings are
+# omitted the ciphers may be silently ignored
+# by the OpenSSL library.
+#
+# options= Various SSL implementation options. The most important
+# being:
+# NO_SSLv2 Disallow the use of SSLv2
+# NO_SSLv3 Disallow the use of SSLv3
+# NO_TLSv1 Disallow the use of TLSv1.0
+# NO_TLSv1_1 Disallow the use of TLSv1.1
+# NO_TLSv1_2 Disallow the use of TLSv1.2
+# SINGLE_DH_USE Always create a new key when using
+# temporary/ephemeral DH key exchanges
+# NO_TICKET Disables TLS tickets extension
+#
+# SINGLE_ECDH_USE
+# Enable ephemeral ECDH key exchange.
+# The adopted curve should be specified
+# using the tls-dh option.
+#
+# ALL Enable various bug workarounds
+# suggested as "harmless" by OpenSSL
+# Be warned that this reduces SSL/TLS
+# strength to some attacks.
+# See OpenSSL SSL_CTX_set_options documentation for a
+# complete list of options.
+#
+# clientca= File containing the list of CAs to use when
+# requesting a client certificate.
+#
+# cafile= File containing additional CA certificates to
+# use when verifying client certificates. If unset
+# clientca will be used.
+#
+# capath= Directory containing additional CA certificates
+# and CRL lists to use when verifying client certificates.
+#
+# crlfile= File of additional CRL lists to use when verifying
+# the client certificate, in addition to CRLs stored in
+# the capath. Implies VERIFY_CRL flag below.
+#
+# tls-dh=[curve:]file
+# File containing DH parameters for temporary/ephemeral DH key
+# exchanges, optionally prefixed by a curve for ephemeral ECDH
+# key exchanges.
+# See OpenSSL documentation for details on how to create the
+# DH parameter file. Supported curves for ECDH can be listed
+# using the "openssl ecparam -list_curves" command.
+# WARNING: EDH and EECDH ciphers will be silently disabled if
+# this option is not set.
+#
+# sslflags= Various flags modifying the use of SSL:
+# DELAYED_AUTH
+# Don't request client certificates
+# immediately, but wait until acl processing
+# requires a certificate (not yet implemented).
+# NO_DEFAULT_CA
+# Don't use the default CA lists built in
+# to OpenSSL.
+# NO_SESSION_REUSE
+# Don't allow for session reuse. Each connection
+# will result in a new SSL session.
+# VERIFY_CRL
+# Verify CRL lists when accepting client
+# certificates.
+# VERIFY_CRL_ALL
+# Verify CRL lists for all certificates in the
+# client certificate chain.
+#
+# sslcontext= SSL session ID context identifier.
+#
+# Other Options:
+#
+# connection-auth[=on|off]
+# use connection-auth=off to tell Squid to prevent
+# forwarding Microsoft connection oriented authentication
+# (NTLM, Negotiate and Kerberos)
+#
+# disable-pmtu-discovery=
+# Control Path-MTU discovery usage:
+# off lets OS decide on what to do (default).
+# transparent disable PMTU discovery when transparent
+# support is enabled.
+# always disable always PMTU discovery.
+#
+# In many setups of transparently intercepting proxies
+# Path-MTU discovery can not work on traffic towards the
+# clients. This is the case when the intercepting device
+# does not fully track connections and fails to forward
+# ICMP must fragment messages to the cache server. If you
+# have such setup and experience that certain clients
+# sporadically hang or never complete requests set
+# disable-pmtu-discovery option to 'transparent'.
+#
+# name= Specifies a internal name for the port. Defaults to
+# the port specification (port or addr:port)
+#
+# tcpkeepalive[=idle,interval,timeout]
+# Enable TCP keepalive probes of idle connections.
+# In seconds; idle is the initial time before TCP starts
+# probing the connection, interval how often to probe, and
+# timeout the time before giving up.
+#
+# require-proxy-header
+# Require PROXY protocol version 1 or 2 connections.
+# The proxy_protocol_access is required to whitelist
+# downstream proxies which can be trusted.
+#
+# If you run Squid on a dual-homed machine with an internal
+# and an external interface we recommend you to specify the
+# internal address:port in http_port. This way Squid will only be
+# visible on the internal address.
+#
+#
+
+# Squid normally listens to port 3128
+http_port 3128
+
+# TAG: https_port
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Usage: [ip:]port cert=certificate.pem [key=key.pem] [mode] [options...]
+#
+# The socket address where Squid will listen for client requests made
+# over TLS or SSL connections. Commonly referred to as HTTPS.
+#
+# This is most useful for situations where you are running squid in
+# accelerator mode and you want to do the SSL work at the accelerator level.
+#
+# You may specify multiple socket addresses on multiple lines,
+# each with their own SSL certificate and/or options.
+#
+# Modes:
+#
+# accel Accelerator / reverse proxy mode
+#
+# intercept Support for IP-Layer interception of
+# outgoing requests without browser settings.
+# NP: disables authentication and IPv6 on the port.
+#
+# tproxy Support Linux TPROXY for spoofing outgoing
+# connections using the client IP address.
+# NP: disables authentication and maybe IPv6 on the port.
+#
+# ssl-bump For each intercepted connection allowed by ssl_bump
+# ACLs, establish a secure connection with the client and with
+# the server, decrypt HTTPS messages as they pass through
+# Squid, and treat them as unencrypted HTTP messages,
+# becoming the man-in-the-middle.
+#
+# An "ssl_bump server-first" match is required to
+# fully enable bumping of intercepted SSL connections.
+#
+# Requires tproxy or intercept.
+#
+# Omitting the mode flag causes default forward proxy mode to be used.
+#
+#
+# See http_port for a list of generic options
+#
+#
+# SSL Options:
+#
+# cert= Path to SSL certificate (PEM format).
+#
+# key= Path to SSL private key file (PEM format)
+# if not specified, the certificate file is
+# assumed to be a combined certificate and
+# key file.
+#
+# version= The version of SSL/TLS supported
+# 1 automatic (default)
+# 2 SSLv2 only
+# 3 SSLv3 only
+# 4 TLSv1 only
+#
+# cipher= Colon separated list of supported ciphers.
+#
+# options= Various SSL engine options. The most important
+# being:
+# NO_SSLv2 Disallow the use of SSLv2
+# NO_SSLv3 Disallow the use of SSLv3
+# NO_TLSv1 Disallow the use of TLSv1
+#
+# SINGLE_DH_USE Always create a new key when using
+# temporary/ephemeral DH key exchanges
+#
+# SINGLE_ECDH_USE
+# Enable ephemeral ECDH key exchange.
+# The adopted curve should be specified
+# using the tls-dh option.
+#
+# See src/ssl_support.c or OpenSSL SSL_CTX_set_options
+# documentation for a complete list of options.
+#
+# clientca= File containing the list of CAs to use when
+# requesting a client certificate.
+#
+# cafile= File containing additional CA certificates to
+# use when verifying client certificates. If unset
+# clientca will be used.
+#
+# capath= Directory containing additional CA certificates
+# and CRL lists to use when verifying client certificates.
+#
+# crlfile= File of additional CRL lists to use when verifying
+# the client certificate, in addition to CRLs stored in
+# the capath. Implies VERIFY_CRL flag below.
+#
+# tls-dh=[curve:]file
+# File containing DH parameters for temporary/ephemeral DH key
+# exchanges, optionally prefixed by a curve for ephemeral ECDH
+# key exchanges.
+#
+# sslflags= Various flags modifying the use of SSL:
+# DELAYED_AUTH
+# Don't request client certificates
+# immediately, but wait until acl processing
+# requires a certificate (not yet implemented).
+# NO_DEFAULT_CA
+# Don't use the default CA lists built in
+# to OpenSSL.
+# NO_SESSION_REUSE
+# Don't allow for session reuse. Each connection
+# will result in a new SSL session.
+# VERIFY_CRL
+# Verify CRL lists when accepting client
+# certificates.
+# VERIFY_CRL_ALL
+# Verify CRL lists for all certificates in the
+# client certificate chain.
+#
+# sslcontext= SSL session ID context identifier.
+#
+# generate-host-certificates[=<on|off>]
+# Dynamically create SSL server certificates for the
+# destination hosts of bumped SSL requests.When
+# enabled, the cert and key options are used to sign
+# generated certificates. Otherwise generated
+# certificate will be selfsigned.
+# If there is CA certificate life time of generated
+# certificate equals lifetime of CA certificate. If
+# generated certificate is selfsigned lifetime is three
+# years.
+# This option is disabled by default. See the ssl-bump
+# option above for more information.
+#
+# dynamic_cert_mem_cache_size=SIZE
+# Approximate total RAM size spent on cached generated
+# certificates. If set to zero, caching is disabled.
+#
+# See http_port for a list of available options.
+#Default:
+# none
+
+# TAG: ftp_port
+# Enables Native FTP proxy by specifying the socket address where Squid
+# listens for FTP client requests. See http_port directive for various
+# ways to specify the listening address and mode.
+#
+# Usage: ftp_port address [mode] [options]
+#
+# WARNING: This is a new, experimental, complex feature that has seen
+# limited production exposure. Some Squid modules (e.g., caching) do not
+# currently work with native FTP proxying, and many features have not
+# even been tested for compatibility. Test well before deploying!
+#
+# Native FTP proxying differs substantially from proxying HTTP requests
+# with ftp:// URIs because Squid works as an FTP server and receives
+# actual FTP commands (rather than HTTP requests with FTP URLs).
+#
+# Native FTP commands accepted at ftp_port are internally converted or
+# wrapped into HTTP-like messages. The same happens to Native FTP
+# responses received from FTP origin servers. Those HTTP-like messages
+# are shoveled through regular access control and adaptation layers
+# between the FTP client and the FTP origin server. This allows Squid to
+# examine, adapt, block, and log FTP exchanges. Squid reuses most HTTP
+# mechanisms when shoveling wrapped FTP messages. For example,
+# http_access and adaptation_access directives are used.
+#
+# Modes:
+#
+# intercept Same as http_port intercept. The FTP origin address is
+# determined based on the intended destination of the
+# intercepted connection.
+#
+# tproxy Support Linux TPROXY for spoofing outgoing
+# connections using the client IP address.
+# NP: disables authentication and maybe IPv6 on the port.
+#
+# By default (i.e., without an explicit mode option), Squid extracts the
+# FTP origin address from the login@origin parameter of the FTP USER
+# command. Many popular FTP clients support such native FTP proxying.
+#
+# Options:
+#
+# name=token Specifies an internal name for the port. Defaults to
+# the port address. Usable with myportname ACL.
+#
+# ftp-track-dirs
+# Enables tracking of FTP directories by injecting extra
+# PWD commands and adjusting Request-URI (in wrapping
+# HTTP requests) to reflect the current FTP server
+# directory. Tracking is disabled by default.
+#
+# protocol=FTP Protocol to reconstruct accelerated and intercepted
+# requests with. Defaults to FTP. No other accepted
+# values have been tested with. An unsupported value
+# results in a FATAL error. Accepted values are FTP,
+# HTTP (or HTTP/1.1), and HTTPS (or HTTPS/1.1).
+#
+# Other http_port modes and options that are not specific to HTTP and
+# HTTPS may also work.
+#Default:
+# none
+
+# TAG: tcp_outgoing_tos
+# Allows you to select a TOS/Diffserv value for packets outgoing
+# on the server side, based on an ACL.
+#
+# tcp_outgoing_tos ds-field [!]aclname ...
+#
+# Example where normal_service_net uses the TOS value 0x00
+# and good_service_net uses 0x20
+#
+# acl normal_service_net src 10.0.0.0/24
+# acl good_service_net src 10.0.1.0/24
+# tcp_outgoing_tos 0x00 normal_service_net
+# tcp_outgoing_tos 0x20 good_service_net
+#
+# TOS/DSCP values really only have local significance - so you should
+# know what you're specifying. For more information, see RFC2474,
+# RFC2475, and RFC3260.
+#
+# The TOS/DSCP byte must be exactly that - a octet value 0 - 255, or
+# "default" to use whatever default your host has.
+# Note that only multiples of 4 are usable as the two rightmost bits have
+# been redefined for use by ECN (RFC 3168 section 23.1).
+# The squid parser will enforce this by masking away the ECN bits.
+#
+# Processing proceeds in the order specified, and stops at first fully
+# matching line.
+#
+# Only fast ACLs are supported.
+#Default:
+# none
+
+# TAG: clientside_tos
+# Allows you to select a TOS/DSCP value for packets being transmitted
+# on the client-side, based on an ACL.
+#
+# clientside_tos ds-field [!]aclname ...
+#
+# Example where normal_service_net uses the TOS value 0x00
+# and good_service_net uses 0x20
+#
+# acl normal_service_net src 10.0.0.0/24
+# acl good_service_net src 10.0.1.0/24
+# clientside_tos 0x00 normal_service_net
+# clientside_tos 0x20 good_service_net
+#
+# Note: This feature is incompatible with qos_flows. Any TOS values set here
+# will be overwritten by TOS values in qos_flows.
+#
+# The TOS/DSCP byte must be exactly that - a octet value 0 - 255, or
+# "default" to use whatever default your host has.
+# Note that only multiples of 4 are usable as the two rightmost bits have
+# been redefined for use by ECN (RFC 3168 section 23.1).
+# The squid parser will enforce this by masking away the ECN bits.
+#
+#Default:
+# none
+
+# TAG: tcp_outgoing_mark
+# Note: This option is only available if Squid is rebuilt with the
+# Packet MARK (Linux)
+#
+# Allows you to apply a Netfilter mark value to outgoing packets
+# on the server side, based on an ACL.
+#
+# tcp_outgoing_mark mark-value [!]aclname ...
+#
+# Example where normal_service_net uses the mark value 0x00
+# and good_service_net uses 0x20
+#
+# acl normal_service_net src 10.0.0.0/24
+# acl good_service_net src 10.0.1.0/24
+# tcp_outgoing_mark 0x00 normal_service_net
+# tcp_outgoing_mark 0x20 good_service_net
+#
+# Only fast ACLs are supported.
+#Default:
+# none
+
+# TAG: clientside_mark
+# Note: This option is only available if Squid is rebuilt with the
+# Packet MARK (Linux)
+#
+# Allows you to apply a Netfilter mark value to packets being transmitted
+# on the client-side, based on an ACL.
+#
+# clientside_mark mark-value [!]aclname ...
+#
+# Example where normal_service_net uses the mark value 0x00
+# and good_service_net uses 0x20
+#
+# acl normal_service_net src 10.0.0.0/24
+# acl good_service_net src 10.0.1.0/24
+# clientside_mark 0x00 normal_service_net
+# clientside_mark 0x20 good_service_net
+#
+# Note: This feature is incompatible with qos_flows. Any mark values set here
+# will be overwritten by mark values in qos_flows.
+#Default:
+# none
+
+# TAG: qos_flows
+# Allows you to select a TOS/DSCP value to mark outgoing
+# connections to the client, based on where the reply was sourced.
+# For platforms using netfilter, allows you to set a netfilter mark
+# value instead of, or in addition to, a TOS value.
+#
+# By default this functionality is disabled. To enable it with the default
+# settings simply use "qos_flows mark" or "qos_flows tos". Default
+# settings will result in the netfilter mark or TOS value being copied
+# from the upstream connection to the client. Note that it is the connection
+# CONNMARK value not the packet MARK value that is copied.
+#
+# It is not currently possible to copy the mark or TOS value from the
+# client to the upstream connection request.
+#
+# TOS values really only have local significance - so you should
+# know what you're specifying. For more information, see RFC2474,
+# RFC2475, and RFC3260.
+#
+# The TOS/DSCP byte must be exactly that - a octet value 0 - 255.
+# Note that only multiples of 4 are usable as the two rightmost bits have
+# been redefined for use by ECN (RFC 3168 section 23.1).
+# The squid parser will enforce this by masking away the ECN bits.
+#
+# Mark values can be any unsigned 32-bit integer value.
+#
+# This setting is configured by setting the following values:
+#
+# tos|mark Whether to set TOS or netfilter mark values
+#
+# local-hit=0xFF Value to mark local cache hits.
+#
+# sibling-hit=0xFF Value to mark hits from sibling peers.
+#
+# parent-hit=0xFF Value to mark hits from parent peers.
+#
+# miss=0xFF[/mask] Value to mark cache misses. Takes precedence
+# over the preserve-miss feature (see below), unless
+# mask is specified, in which case only the bits
+# specified in the mask are written.
+#
+# The TOS variant of the following features are only possible on Linux
+# and require your kernel to be patched with the TOS preserving ZPH
+# patch, available from http://zph.bratcheda.org
+# No patch is needed to preserve the netfilter mark, which will work
+# with all variants of netfilter.
+#
+# disable-preserve-miss
+# This option disables the preservation of the TOS or netfilter
+# mark. By default, the existing TOS or netfilter mark value of
+# the response coming from the remote server will be retained
+# and masked with miss-mark.
+# NOTE: in the case of a netfilter mark, the mark must be set on
+# the connection (using the CONNMARK target) not on the packet
+# (MARK target).
+#
+# miss-mask=0xFF
+# Allows you to mask certain bits in the TOS or mark value
+# received from the remote server, before copying the value to
+# the TOS sent towards clients.
+# Default for tos: 0xFF (TOS from server is not changed).
+# Default for mark: 0xFFFFFFFF (mark from server is not changed).
+#
+# All of these features require the --enable-zph-qos compilation flag
+# (enabled by default). Netfilter marking also requires the
+# libnetfilter_conntrack libraries (--with-netfilter-conntrack) and
+# libcap 2.09+ (--with-libcap).
+#
+#Default:
+# none
+
+# TAG: tcp_outgoing_address
+# Allows you to map requests to different outgoing IP addresses
+# based on the username or source address of the user making
+# the request.
+#
+# tcp_outgoing_address ipaddr [[!]aclname] ...
+#
+# For example;
+# Forwarding clients with dedicated IPs for certain subnets.
+#
+# acl normal_service_net src 10.0.0.0/24
+# acl good_service_net src 10.0.2.0/24
+#
+# tcp_outgoing_address 2001:db8::c001 good_service_net
+# tcp_outgoing_address 10.1.0.2 good_service_net
+#
+# tcp_outgoing_address 2001:db8::beef normal_service_net
+# tcp_outgoing_address 10.1.0.1 normal_service_net
+#
+# tcp_outgoing_address 2001:db8::1
+# tcp_outgoing_address 10.1.0.3
+#
+# Processing proceeds in the order specified, and stops at first fully
+# matching line.
+#
+# Squid will add an implicit IP version test to each line.
+# Requests going to IPv4 websites will use the outgoing 10.1.0.* addresses.
+# Requests going to IPv6 websites will use the outgoing 2001:db8:* addresses.
+#
+#
+# NOTE: The use of this directive using client dependent ACLs is
+# incompatible with the use of server side persistent connections. To
+# ensure correct results it is best to set server_persistent_connections
+# to off when using this directive in such configurations.
+#
+# NOTE: The use of this directive to set a local IP on outgoing TCP links
+# is incompatible with using TPROXY to set client IP out outbound TCP links.
+# When needing to contact peers use the no-tproxy cache_peer option and the
+# client_dst_passthru directive re-enable normal forwarding such as this.
+#
+#Default:
+# Address selection is performed by the operating system.
+
+# TAG: host_verify_strict
+# Regardless of this option setting, when dealing with intercepted
+# traffic, Squid always verifies that the destination IP address matches
+# the Host header domain or IP (called 'authority form URL').
+#
+# This enforcement is performed to satisfy a MUST-level requirement in
+# RFC 2616 section 14.23: "The Host field value MUST represent the naming
+# authority of the origin server or gateway given by the original URL".
+#
+# When set to ON:
+# Squid always responds with an HTTP 409 (Conflict) error
+# page and logs a security warning if there is no match.
+#
+# Squid verifies that the destination IP address matches
+# the Host header for forward-proxy and reverse-proxy traffic
+# as well. For those traffic types, Squid also enables the
+# following checks, comparing the corresponding Host header
+# and Request-URI components:
+#
+# * The host names (domain or IP) must be identical,
+# but valueless or missing Host header disables all checks.
+# For the two host names to match, both must be either IP
+# or FQDN.
+#
+# * Port numbers must be identical, but if a port is missing
+# the scheme-default port is assumed.
+#
+#
+# When set to OFF (the default):
+# Squid allows suspicious requests to continue but logs a
+# security warning and blocks caching of the response.
+#
+# * Forward-proxy traffic is not checked at all.
+#
+# * Reverse-proxy traffic is not checked at all.
+#
+# * Intercepted traffic which passes verification is handled
+# according to client_dst_passthru.
+#
+# * Intercepted requests which fail verification are sent
+# to the client original destination instead of DIRECT.
+# This overrides 'client_dst_passthru off'.
+#
+# For now suspicious intercepted CONNECT requests are always
+# responded to with an HTTP 409 (Conflict) error page.
+#
+#
+# SECURITY NOTE:
+#
+# As described in CVE-2009-0801 when the Host: header alone is used
+# to determine the destination of a request it becomes trivial for
+# malicious scripts on remote websites to bypass browser same-origin
+# security policy and sandboxing protections.
+#
+# The cause of this is that such applets are allowed to perform their
+# own HTTP stack, in which case the same-origin policy of the browser
+# sandbox only verifies that the applet tries to contact the same IP
+# as from where it was loaded at the IP level. The Host: header may
+# be different from the connected IP and approved origin.
+#
+#Default:
+# host_verify_strict off
+
+# TAG: client_dst_passthru
+# With NAT or TPROXY intercepted traffic Squid may pass the request
+# directly to the original client destination IP or seek a faster
+# source using the HTTP Host header.
+#
+# Using Host to locate alternative servers can provide faster
+# connectivity with a range of failure recovery options.
+# But can also lead to connectivity trouble when the client and
+# server are attempting stateful interactions unaware of the proxy.
+#
+# This option (on by default) prevents alternative DNS entries being
+# located to send intercepted traffic DIRECT to an origin server.
+# The clients original destination IP and port will be used instead.
+#
+# Regardless of this option setting, when dealing with intercepted
+# traffic Squid will verify the Host: header and any traffic which
+# fails Host verification will be treated as if this option were ON.
+#
+# see host_verify_strict for details on the verification process.
+#Default:
+# client_dst_passthru on
+
+# SSL OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: ssl_unclean_shutdown
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Some browsers (especially MSIE) bugs out on SSL shutdown
+# messages.
+#Default:
+# ssl_unclean_shutdown off
+
+# TAG: ssl_engine
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# The OpenSSL engine to use. You will need to set this if you
+# would like to use hardware SSL acceleration for example.
+#Default:
+# none
+
+# TAG: sslproxy_client_certificate
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Client SSL Certificate to use when proxying https:// URLs
+#Default:
+# none
+
+# TAG: sslproxy_client_key
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Client SSL Key to use when proxying https:// URLs
+#Default:
+# none
+
+# TAG: sslproxy_version
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# SSL version level to use when proxying https:// URLs
+#
+# The versions of SSL/TLS supported:
+#
+# 1 automatic (default)
+# 2 SSLv2 only
+# 3 SSLv3 only
+# 4 TLSv1.0 only
+# 5 TLSv1.1 only
+# 6 TLSv1.2 only
+#Default:
+# automatic SSL/TLS version negotiation
+
+# TAG: sslproxy_options
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Colon (:) or comma (,) separated list of SSL implementation options
+# to use when proxying https:// URLs
+#
+# The most important being:
+#
+# NO_SSLv2 Disallow the use of SSLv2
+# NO_SSLv3 Disallow the use of SSLv3
+# NO_TLSv1 Disallow the use of TLSv1.0
+# NO_TLSv1_1 Disallow the use of TLSv1.1
+# NO_TLSv1_2 Disallow the use of TLSv1.2
+#
+# SINGLE_DH_USE
+# Always create a new key when using temporary/ephemeral
+# DH key exchanges
+#
+# NO_TICKET
+# Disable use of RFC5077 session tickets. Some servers
+# may have problems understanding the TLS extension due
+# to ambiguous specification in RFC4507.
+#
+# ALL Enable various bug workarounds suggested as "harmless"
+# by OpenSSL. Be warned that this may reduce SSL/TLS
+# strength to some attacks.
+#
+# See the OpenSSL SSL_CTX_set_options documentation for a
+# complete list of possible options.
+#
+# WARNING: This directive takes a single token. If a space is used
+# the value(s) after that space are SILENTLY IGNORED.
+#Default:
+# none
+
+# TAG: sslproxy_cipher
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# SSL cipher list to use when proxying https:// URLs
+#
+# Colon separated list of supported ciphers.
+#Default:
+# none
+
+# TAG: sslproxy_cafile
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# file containing CA certificates to use when verifying server
+# certificates while proxying https:// URLs
+#Default:
+# none
+
+# TAG: sslproxy_capath
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# directory containing CA certificates to use when verifying
+# server certificates while proxying https:// URLs
+#Default:
+# none
+
+# TAG: sslproxy_session_ttl
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Sets the timeout value for SSL sessions
+#Default:
+# sslproxy_session_ttl 300
+
+# TAG: sslproxy_session_cache_size
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Sets the cache size to use for ssl session
+#Default:
+# sslproxy_session_cache_size 2 MB
+
+# TAG: sslproxy_foreign_intermediate_certs
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Many origin servers fail to send their full server certificate
+# chain for verification, assuming the client already has or can
+# easily locate any missing intermediate certificates.
+#
+# Squid uses the certificates from the specified file to fill in
+# these missing chains when trying to validate origin server
+# certificate chains.
+#
+# The file is expected to contain zero or more PEM-encoded
+# intermediate certificates. These certificates are not treated
+# as trusted root certificates, and any self-signed certificate in
+# this file will be ignored.
+#Default:
+# none
+
+# TAG: sslproxy_cert_sign_hash
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Sets the hashing algorithm to use when signing generated certificates.
+# Valid algorithm names depend on the OpenSSL library used. The following
+# names are usually available: sha1, sha256, sha512, and md5. Please see
+# your OpenSSL library manual for the available hashes. By default, Squids
+# that support this option use sha256 hashes.
+#
+# Squid does not forcefully purge cached certificates that were generated
+# with an algorithm other than the currently configured one. They remain
+# in the cache, subject to the regular cache eviction policy, and become
+# useful if the algorithm changes again.
+#Default:
+# none
+
+# TAG: ssl_bump
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# This option is consulted when a CONNECT request is received on
+# an http_port (or a new connection is intercepted at an
+# https_port), provided that port was configured with an ssl-bump
+# flag. The subsequent data on the connection is either treated as
+# HTTPS and decrypted OR tunneled at TCP level without decryption,
+# depending on the first matching bumping "action".
+#
+# ssl_bump <action> [!]acl ...
+#
+# The following bumping actions are currently supported:
+#
+# splice
+# Become a TCP tunnel without decrypting proxied traffic.
+# This is the default action.
+#
+# bump
+# When used on step SslBump1, establishes a secure connection
+# with the client first, then connect to the server.
+# When used on step SslBump2 or SslBump3, establishes a secure
+# connection with the server and, using a mimicked server
+# certificate, with the client.
+#
+# peek
+# Receive client (step SslBump1) or server (step SslBump2)
+# certificate while preserving the possibility of splicing the
+# connection. Peeking at the server certificate (during step 2)
+# usually precludes bumping of the connection at step 3.
+#
+# stare
+# Receive client (step SslBump1) or server (step SslBump2)
+# certificate while preserving the possibility of bumping the
+# connection. Staring at the server certificate (during step 2)
+# usually precludes splicing of the connection at step 3.
+#
+# terminate
+# Close client and server connections.
+#
+# Backward compatibility actions available at step SslBump1:
+#
+# client-first
+# Bump the connection. Establish a secure connection with the
+# client first, then connect to the server. This old mode does
+# not allow Squid to mimic server SSL certificate and does not
+# work with intercepted SSL connections.
+#
+# server-first
+# Bump the connection. Establish a secure connection with the
+# server first, then establish a secure connection with the
+# client, using a mimicked server certificate. Works with both
+# CONNECT requests and intercepted SSL connections, but does
+# not allow to make decisions based on SSL handshake info.
+#
+# peek-and-splice
+# Decide whether to bump or splice the connection based on
+# client-to-squid and server-to-squid SSL hello messages.
+# XXX: Remove.
+#
+# none
+# Same as the "splice" action.
+#
+# All ssl_bump rules are evaluated at each of the supported bumping
+# steps. Rules with actions that are impossible at the current step are
+# ignored. The first matching ssl_bump action wins and is applied at the
+# end of the current step. If no rules match, the splice action is used.
+# See the at_step ACL for a list of the supported SslBump steps.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+# See also: http_port ssl-bump, https_port ssl-bump, and acl at_step.
+#
+#
+# # Example: Bump all TLS connections except those originating from
+# # localhost or those going to example.com.
+#
+# acl broken_sites ssl::server_name .example.com
+# ssl_bump splice localhost
+# ssl_bump splice broken_sites
+# ssl_bump bump all
+#Default:
+# Become a TCP tunnel without decrypting proxied traffic.
+
+# TAG: sslproxy_flags
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Various flags modifying the use of SSL while proxying https:// URLs:
+# DONT_VERIFY_PEER Accept certificates that fail verification.
+# For refined control, see sslproxy_cert_error.
+# NO_DEFAULT_CA Don't use the default CA list built in
+# to OpenSSL.
+#Default:
+# none
+
+# TAG: sslproxy_cert_error
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Use this ACL to bypass server certificate validation errors.
+#
+# For example, the following lines will bypass all validation errors
+# when talking to servers for example.com. All other
+# validation errors will result in ERR_SECURE_CONNECT_FAIL error.
+#
+# acl BrokenButTrustedServers dstdomain example.com
+# sslproxy_cert_error allow BrokenButTrustedServers
+# sslproxy_cert_error deny all
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+# Using slow acl types may result in server crashes
+#
+# Without this option, all server certificate validation errors
+# terminate the transaction to protect Squid and the client.
+#
+# SQUID_X509_V_ERR_INFINITE_VALIDATION error cannot be bypassed
+# but should not happen unless your OpenSSL library is buggy.
+#
+# SECURITY WARNING:
+# Bypassing validation errors is dangerous because an
+# error usually implies that the server cannot be trusted
+# and the connection may be insecure.
+#
+# See also: sslproxy_flags and DONT_VERIFY_PEER.
+#Default:
+# Server certificate errors terminate the transaction.
+
+# TAG: sslproxy_cert_sign
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+#
+# sslproxy_cert_sign <signing algorithm> acl ...
+#
+# The following certificate signing algorithms are supported:
+#
+# signTrusted
+# Sign using the configured CA certificate which is usually
+# placed in and trusted by end-user browsers. This is the
+# default for trusted origin server certificates.
+#
+# signUntrusted
+# Sign to guarantee an X509_V_ERR_CERT_UNTRUSTED browser error.
+# This is the default for untrusted origin server certificates
+# that are not self-signed (see ssl::certUntrusted).
+#
+# signSelf
+# Sign using a self-signed certificate with the right CN to
+# generate a X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT error in the
+# browser. This is the default for self-signed origin server
+# certificates (see ssl::certSelfSigned).
+#
+# This clause only supports fast acl types.
+#
+# When sslproxy_cert_sign acl(s) match, Squid uses the corresponding
+# signing algorithm to generate the certificate and ignores all
+# subsequent sslproxy_cert_sign options (the first match wins). If no
+# acl(s) match, the default signing algorithm is determined by errors
+# detected when obtaining and validating the origin server certificate.
+#
+# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
+# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
+# CONNECT request that carries a domain name. In all other cases (CONNECT
+# to an IP address or an intercepted SSL connection), Squid cannot detect
+# the domain mismatch at certificate generation time when
+# bump-server-first is used.
+#Default:
+# none
+
+# TAG: sslproxy_cert_adapt
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+#
+# sslproxy_cert_adapt <adaptation algorithm> acl ...
+#
+# The following certificate adaptation algorithms are supported:
+#
+# setValidAfter
+# Sets the "Not After" property to the "Not After" property of
+# the CA certificate used to sign generated certificates.
+#
+# setValidBefore
+# Sets the "Not Before" property to the "Not Before" property of
+# the CA certificate used to sign generated certificates.
+#
+# setCommonName or setCommonName{CN}
+# Sets Subject.CN property to the host name specified as a
+# CN parameter or, if no explicit CN parameter was specified,
+# extracted from the CONNECT request. It is a misconfiguration
+# to use setCommonName without an explicit parameter for
+# intercepted or tproxied SSL connections.
+#
+# This clause only supports fast acl types.
+#
+# Squid first groups sslproxy_cert_adapt options by adaptation algorithm.
+# Within a group, when sslproxy_cert_adapt acl(s) match, Squid uses the
+# corresponding adaptation algorithm to generate the certificate and
+# ignores all subsequent sslproxy_cert_adapt options in that algorithm's
+# group (i.e., the first match wins within each algorithm group). If no
+# acl(s) match, the default mimicking action takes place.
+#
+# WARNING: SQUID_X509_V_ERR_DOMAIN_MISMATCH and ssl:certDomainMismatch can
+# be used with sslproxy_cert_adapt, but if and only if Squid is bumping a
+# CONNECT request that carries a domain name. In all other cases (CONNECT
+# to an IP address or an intercepted SSL connection), Squid cannot detect
+# the domain mismatch at certificate generation time when
+# bump-server-first is used.
+#Default:
+# none
+
+# TAG: sslpassword_program
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Specify a program used for entering SSL key passphrases
+# when using encrypted SSL certificate keys. If not specified
+# keys must either be unencrypted, or Squid started with the -N
+# option to allow it to query interactively for the passphrase.
+#
+# The key file name is given as argument to the program allowing
+# selection of the right password if you have multiple encrypted
+# keys.
+#Default:
+# none
+
+# OPTIONS RELATING TO EXTERNAL SSL_CRTD
+# -----------------------------------------------------------------------------
+
+# TAG: sslcrtd_program
+# Note: This option is only available if Squid is rebuilt with the
+# --enable-ssl-crtd
+#
+# Specify the location and options of the executable for ssl_crtd process.
+# /usr/lib/squid/ssl_crtd program requires -s and -M parameters
+# For more information use:
+# /usr/lib/squid/ssl_crtd -h
+#Default:
+# sslcrtd_program /usr/lib/squid/ssl_crtd -s /var/lib/ssl_db -M 4MB
+
+# TAG: sslcrtd_children
+# Note: This option is only available if Squid is rebuilt with the
+# --enable-ssl-crtd
+#
+# The maximum number of processes spawn to service ssl server.
+# The maximum this may be safely set to is 32.
+#
+# The startup= and idle= options allow some measure of skew in your
+# tuning.
+#
+# startup=N
+#
+# Sets the minimum number of processes to spawn when Squid
+# starts or reconfigures. When set to zero the first request will
+# cause spawning of the first child process to handle it.
+#
+# Starting too few children temporary slows Squid under load while it
+# tries to spawn enough additional processes to cope with traffic.
+#
+# idle=N
+#
+# Sets a minimum of how many processes Squid is to try and keep available
+# at all times. When traffic begins to rise above what the existing
+# processes can handle this many more will be spawned up to the maximum
+# configured. A minimum setting of 1 is required.
+#
+# You must have at least one ssl_crtd process.
+#Default:
+# sslcrtd_children 32 startup=5 idle=1
+
+# TAG: sslcrtvalidator_program
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# Specify the location and options of the executable for ssl_crt_validator
+# process.
+#
+# Usage: sslcrtvalidator_program [ttl=n] [cache=n] path ...
+#
+# Options:
+# ttl=n TTL in seconds for cached results. The default is 60 secs
+# cache=n limit the result cache size. The default value is 2048
+#Default:
+# none
+
+# TAG: sslcrtvalidator_children
+# Note: This option is only available if Squid is rebuilt with the
+# --with-openssl
+#
+# The maximum number of processes spawn to service SSL server.
+# The maximum this may be safely set to is 32.
+#
+# The startup= and idle= options allow some measure of skew in your
+# tuning.
+#
+# startup=N
+#
+# Sets the minimum number of processes to spawn when Squid
+# starts or reconfigures. When set to zero the first request will
+# cause spawning of the first child process to handle it.
+#
+# Starting too few children temporary slows Squid under load while it
+# tries to spawn enough additional processes to cope with traffic.
+#
+# idle=N
+#
+# Sets a minimum of how many processes Squid is to try and keep available
+# at all times. When traffic begins to rise above what the existing
+# processes can handle this many more will be spawned up to the maximum
+# configured. A minimum setting of 1 is required.
+#
+# concurrency=
+#
+# The number of requests each certificate validator helper can handle in
+# parallel. A value of 0 indicates the certficate validator does not
+# support concurrency. Defaults to 1.
+#
+# When this directive is set to a value >= 1 then the protocol
+# used to communicate with the helper is modified to include
+# a request ID in front of the request/response. The request
+# ID from the request must be echoed back with the response
+# to that request.
+#
+# You must have at least one ssl_crt_validator process.
+#Default:
+# sslcrtvalidator_children 32 startup=5 idle=1 concurrency=1
+
+# OPTIONS WHICH AFFECT THE NEIGHBOR SELECTION ALGORITHM
+# -----------------------------------------------------------------------------
+
+# TAG: cache_peer
+# To specify other caches in a hierarchy, use the format:
+#
+# cache_peer hostname type http-port icp-port [options]
+#
+# For example,
+#
+# # proxy icp
+# # hostname type port port options
+# # -------------------- -------- ----- ----- -----------
+# cache_peer parent.foo.net parent 3128 3130 default
+# cache_peer sib1.foo.net sibling 3128 3130 proxy-only
+# cache_peer sib2.foo.net sibling 3128 3130 proxy-only
+# cache_peer example.com parent 80 0 default
+# cache_peer cdn.example.com sibling 3128 0
+#
+# type: either 'parent', 'sibling', or 'multicast'.
+#
+# proxy-port: The port number where the peer accept HTTP requests.
+# For other Squid proxies this is usually 3128
+# For web servers this is usually 80
+#
+# icp-port: Used for querying neighbor caches about objects.
+# Set to 0 if the peer does not support ICP or HTCP.
+# See ICP and HTCP options below for additional details.
+#
+#
+# ==== ICP OPTIONS ====
+#
+# You MUST also set icp_port and icp_access explicitly when using these options.
+# The defaults will prevent peer traffic using ICP.
+#
+#
+# no-query Disable ICP queries to this neighbor.
+#
+# multicast-responder
+# Indicates the named peer is a member of a multicast group.
+# ICP queries will not be sent directly to the peer, but ICP
+# replies will be accepted from it.
+#
+# closest-only Indicates that, for ICP_OP_MISS replies, we'll only forward
+# CLOSEST_PARENT_MISSes and never FIRST_PARENT_MISSes.
+#
+# background-ping
+# To only send ICP queries to this neighbor infrequently.
+# This is used to keep the neighbor round trip time updated
+# and is usually used in conjunction with weighted-round-robin.
+#
+#
+# ==== HTCP OPTIONS ====
+#
+# You MUST also set htcp_port and htcp_access explicitly when using these options.
+# The defaults will prevent peer traffic using HTCP.
+#
+#
+# htcp Send HTCP, instead of ICP, queries to the neighbor.
+# You probably also want to set the "icp-port" to 4827
+# instead of 3130. This directive accepts a comma separated
+# list of options described below.
+#
+# htcp=oldsquid Send HTCP to old Squid versions (2.5 or earlier).
+#
+# htcp=no-clr Send HTCP to the neighbor but without
+# sending any CLR requests. This cannot be used with
+# only-clr.
+#
+# htcp=only-clr Send HTCP to the neighbor but ONLY CLR requests.
+# This cannot be used with no-clr.
+#
+# htcp=no-purge-clr
+# Send HTCP to the neighbor including CLRs but only when
+# they do not result from PURGE requests.
+#
+# htcp=forward-clr
+# Forward any HTCP CLR requests this proxy receives to the peer.
+#
+#
+# ==== PEER SELECTION METHODS ====
+#
+# The default peer selection method is ICP, with the first responding peer
+# being used as source. These options can be used for better load balancing.
+#
+#
+# default This is a parent cache which can be used as a "last-resort"
+# if a peer cannot be located by any of the peer-selection methods.
+# If specified more than once, only the first is used.
+#
+# round-robin Load-Balance parents which should be used in a round-robin
+# fashion in the absence of any ICP queries.
+# weight=N can be used to add bias.
+#
+# weighted-round-robin
+# Load-Balance parents which should be used in a round-robin
+# fashion with the frequency of each parent being based on the
+# round trip time. Closer parents are used more often.
+# Usually used for background-ping parents.
+# weight=N can be used to add bias.
+#
+# carp Load-Balance parents which should be used as a CARP array.
+# The requests will be distributed among the parents based on the
+# CARP load balancing hash function based on their weight.
+#
+# userhash Load-balance parents based on the client proxy_auth or ident username.
+#
+# sourcehash Load-balance parents based on the client source IP.
+#
+# multicast-siblings
+# To be used only for cache peers of type "multicast".
+# ALL members of this multicast group have "sibling"
+# relationship with it, not "parent". This is to a multicast
+# group when the requested object would be fetched only from
+# a "parent" cache, anyway. It's useful, e.g., when
+# configuring a pool of redundant Squid proxies, being
+# members of the same multicast group.
+#
+#
+# ==== PEER SELECTION OPTIONS ====
+#
+# weight=N use to affect the selection of a peer during any weighted
+# peer-selection mechanisms.
+# The weight must be an integer; default is 1,
+# larger weights are favored more.
+# This option does not affect parent selection if a peering
+# protocol is not in use.
+#
+# basetime=N Specify a base amount to be subtracted from round trip
+# times of parents.
+# It is subtracted before division by weight in calculating
+# which parent to fectch from. If the rtt is less than the
+# base time the rtt is set to a minimal value.
+#
+# ttl=N Specify a TTL to use when sending multicast ICP queries
+# to this address.
+# Only useful when sending to a multicast group.
+# Because we don't accept ICP replies from random
+# hosts, you must configure other group members as
+# peers with the 'multicast-responder' option.
+#
+# no-delay To prevent access to this neighbor from influencing the
+# delay pools.
+#
+# digest-url=URL Tell Squid to fetch the cache digest (if digests are
+# enabled) for this host from the specified URL rather
+# than the Squid default location.
+#
+#
+# ==== CARP OPTIONS ====
+#
+# carp-key=key-specification
+# use a different key than the full URL to hash against the peer.
+# the key-specification is a comma-separated list of the keywords
+# scheme, host, port, path, params
+# Order is not important.
+#
+# ==== ACCELERATOR / REVERSE-PROXY OPTIONS ====
+#
+# originserver Causes this parent to be contacted as an origin server.
+# Meant to be used in accelerator setups when the peer
+# is a web server.
+#
+# forceddomain=name
+# Set the Host header of requests forwarded to this peer.
+# Useful in accelerator setups where the server (peer)
+# expects a certain domain name but clients may request
+# others. ie example.com or www.example.com
+#
+# no-digest Disable request of cache digests.
+#
+# no-netdb-exchange
+# Disables requesting ICMP RTT database (NetDB).
+#
+#
+# ==== AUTHENTICATION OPTIONS ====
+#
+# login=user:password
+# If this is a personal/workgroup proxy and your parent
+# requires proxy authentication.
+#
+# Note: The string can include URL escapes (i.e. %20 for
+# spaces). This also means % must be written as %%.
+#
+# login=PASSTHRU
+# Send login details received from client to this peer.
+# Both Proxy- and WWW-Authorization headers are passed
+# without alteration to the peer.
+# Authentication is not required by Squid for this to work.
+#
+# Note: This will pass any form of authentication but
+# only Basic auth will work through a proxy unless the
+# connection-auth options are also used.
+#
+# login=PASS Send login details received from client to this peer.
+# Authentication is not required by this option.
+#
+# If there are no client-provided authentication headers
+# to pass on, but username and password are available
+# from an external ACL user= and password= result tags
+# they may be sent instead.
+#
+# Note: To combine this with proxy_auth both proxies must
+# share the same user database as HTTP only allows for
+# a single login (one for proxy, one for origin server).
+# Also be warned this will expose your users proxy
+# password to the peer. USE WITH CAUTION
+#
+# login=*:password
+# Send the username to the upstream cache, but with a
+# fixed password. This is meant to be used when the peer
+# is in another administrative domain, but it is still
+# needed to identify each user.
+# The star can optionally be followed by some extra
+# information which is added to the username. This can
+# be used to identify this proxy to the peer, similar to
+# the login=username:password option above.
+#
+# login=NEGOTIATE
+# If this is a personal/workgroup proxy and your parent
+# requires a secure proxy authentication.
+# The first principal from the default keytab or defined by
+# the environment variable KRB5_KTNAME will be used.
+#
+# WARNING: The connection may transmit requests from multiple
+# clients. Negotiate often assumes end-to-end authentication
+# and a single-client. Which is not strictly true here.
+#
+# login=NEGOTIATE:principal_name
+# If this is a personal/workgroup proxy and your parent
+# requires a secure proxy authentication.
+# The principal principal_name from the default keytab or
+# defined by the environment variable KRB5_KTNAME will be
+# used.
+#
+# WARNING: The connection may transmit requests from multiple
+# clients. Negotiate often assumes end-to-end authentication
+# and a single-client. Which is not strictly true here.
+#
+# connection-auth=on|off
+# Tell Squid that this peer does or not support Microsoft
+# connection oriented authentication, and any such
+# challenges received from there should be ignored.
+# Default is auto to automatically determine the status
+# of the peer.
+#
+#
+# ==== SSL / HTTPS / TLS OPTIONS ====
+#
+# ssl Encrypt connections to this peer with SSL/TLS.
+#
+# sslcert=/path/to/ssl/certificate
+# A client SSL certificate to use when connecting to
+# this peer.
+#
+# sslkey=/path/to/ssl/key
+# The private SSL key corresponding to sslcert above.
+# If 'sslkey' is not specified 'sslcert' is assumed to
+# reference a combined file containing both the
+# certificate and the key.
+#
+# Notes:
+#
+# On Debian/Ubuntu systems a default snakeoil certificate is
+# available in /etc/ssl and users can set:
+#
+# cert=/etc/ssl/certs/ssl-cert-snakeoil.pem
+#
+# and
+#
+# key=/etc/ssl/private/ssl-cert-snakeoil.key
+#
+# for testing.
+#
+# sslversion=1|2|3|4|5|6
+# The SSL version to use when connecting to this peer
+# 1 = automatic (default)
+# 2 = SSL v2 only
+# 3 = SSL v3 only
+# 4 = TLS v1.0 only
+# 5 = TLS v1.1 only
+# 6 = TLS v1.2 only
+#
+# sslcipher=... The list of valid SSL ciphers to use when connecting
+# to this peer.
+#
+# ssloptions=... Specify various SSL implementation options:
+#
+# NO_SSLv2 Disallow the use of SSLv2
+# NO_SSLv3 Disallow the use of SSLv3
+# NO_TLSv1 Disallow the use of TLSv1.0
+# NO_TLSv1_1 Disallow the use of TLSv1.1
+# NO_TLSv1_2 Disallow the use of TLSv1.2
+#
+# SINGLE_DH_USE
+# Always create a new key when using
+# temporary/ephemeral DH key exchanges
+#
+# NO_TICKET
+# Disable use of RFC5077 session tickets. Some servers
+# may have problems understanding the TLS extension due
+# to ambiguous specification in RFC4507.
+#
+# ALL Enable various bug workarounds
+# suggested as "harmless" by OpenSSL
+# Be warned that this reduces SSL/TLS
+# strength to some attacks.
+#
+# See the OpenSSL SSL_CTX_set_options documentation for a
+# more complete list.
+#
+# sslcafile=... A file containing additional CA certificates to use
+# when verifying the peer certificate.
+#
+# sslcapath=... A directory containing additional CA certificates to
+# use when verifying the peer certificate.
+#
+# sslcrlfile=... A certificate revocation list file to use when
+# verifying the peer certificate.
+#
+# sslflags=... Specify various flags modifying the SSL implementation:
+#
+# DONT_VERIFY_PEER
+# Accept certificates even if they fail to
+# verify.
+# NO_DEFAULT_CA
+# Don't use the default CA list built in
+# to OpenSSL.
+# DONT_VERIFY_DOMAIN
+# Don't verify the peer certificate
+# matches the server name
+#
+# ssldomain= The peer name as advertised in it's certificate.
+# Used for verifying the correctness of the received peer
+# certificate. If not specified the peer hostname will be
+# used.
+#
+# front-end-https
+# Enable the "Front-End-Https: On" header needed when
+# using Squid as a SSL frontend in front of Microsoft OWA.
+# See MS KB document Q307347 for details on this header.
+# If set to auto the header will only be added if the
+# request is forwarded as a https:// URL.
+#
+#
+# ==== GENERAL OPTIONS ====
+#
+# connect-timeout=N
+# A peer-specific connect timeout.
+# Also see the peer_connect_timeout directive.
+#
+# connect-fail-limit=N
+# How many times connecting to a peer must fail before
+# it is marked as down. Standby connection failures
+# count towards this limit. Default is 10.
+#
+# allow-miss Disable Squid's use of only-if-cached when forwarding
+# requests to siblings. This is primarily useful when
+# icp_hit_stale is used by the sibling. Excessive use
+# of this option may result in forwarding loops. One way
+# to prevent peering loops when using this option, is to
+# deny cache peer usage on requests from a peer:
+# acl fromPeer ...
+# cache_peer_access peerName deny fromPeer
+#
+# max-conn=N Limit the number of concurrent connections the Squid
+# may open to this peer, including already opened idle
+# and standby connections. There is no peer-specific
+# connection limit by default.
+#
+# A peer exceeding the limit is not used for new
+# requests unless a standby connection is available.
+#
+# max-conn currently works poorly with idle persistent
+# connections: When a peer reaches its max-conn limit,
+# and there are idle persistent connections to the peer,
+# the peer may not be selected because the limiting code
+# does not know whether Squid can reuse those idle
+# connections.
+#
+# standby=N Maintain a pool of N "hot standby" connections to an
+# UP peer, available for requests when no idle
+# persistent connection is available (or safe) to use.
+# By default and with zero N, no such pool is maintained.
+# N must not exceed the max-conn limit (if any).
+#
+# At start or after reconfiguration, Squid opens new TCP
+# standby connections until there are N connections
+# available and then replenishes the standby pool as
+# opened connections are used up for requests. A used
+# connection never goes back to the standby pool, but
+# may go to the regular idle persistent connection pool
+# shared by all peers and origin servers.
+#
+# Squid never opens multiple new standby connections
+# concurrently. This one-at-a-time approach minimizes
+# flooding-like effect on peers. Furthermore, just a few
+# standby connections should be sufficient in most cases
+# to supply most new requests with a ready-to-use
+# connection.
+#
+# Standby connections obey server_idle_pconn_timeout.
+# For the feature to work as intended, the peer must be
+# configured to accept and keep them open longer than
+# the idle timeout at the connecting Squid, to minimize
+# race conditions typical to idle used persistent
+# connections. Default request_timeout and
+# server_idle_pconn_timeout values ensure such a
+# configuration.
+#
+# name=xxx Unique name for the peer.
+# Required if you have multiple peers on the same host
+# but different ports.
+# This name can be used in cache_peer_access and similar
+# directives to identify the peer.
+# Can be used by outgoing access controls through the
+# peername ACL type.
+#
+# no-tproxy Do not use the client-spoof TPROXY support when forwarding
+# requests to this peer. Use normal address selection instead.
+# This overrides the spoof_client_ip ACL.
+#
+# proxy-only objects fetched from the peer will not be stored locally.
+#
+#Default:
+# none
+
+# TAG: cache_peer_domain
+# Use to limit the domains for which a neighbor cache will be
+# queried.
+#
+# Usage:
+# cache_peer_domain cache-host domain [domain ...]
+# cache_peer_domain cache-host !domain
+#
+# For example, specifying
+#
+# cache_peer_domain parent.foo.net .edu
+#
+# has the effect such that UDP query packets are sent to
+# 'bigserver' only when the requested object exists on a
+# server in the .edu domain. Prefixing the domainname
+# with '!' means the cache will be queried for objects
+# NOT in that domain.
+#
+# NOTE: * Any number of domains may be given for a cache-host,
+# either on the same or separate lines.
+# * When multiple domains are given for a particular
+# cache-host, the first matched domain is applied.
+# * Cache hosts with no domain restrictions are queried
+# for all requests.
+# * There are no defaults.
+# * There is also a 'cache_peer_access' tag in the ACL
+# section.
+#Default:
+# none
+
+# TAG: cache_peer_access
+# Restricts usage of cache_peer proxies.
+#
+# Usage:
+# cache_peer_access peer-name allow|deny [!]aclname ...
+#
+# For the required peer-name parameter, use either the value of the
+# cache_peer name=value parameter or, if name=value is missing, the
+# cache_peer hostname parameter.
+#
+# This directive narrows down the selection of peering candidates, but
+# does not determine the order in which the selected candidates are
+# contacted. That order is determined by the peer selection algorithms
+# (see PEER SELECTION sections in the cache_peer documentation).
+#
+# If a deny rule matches, the corresponding peer will not be contacted
+# for the current transaction -- Squid will not send ICP queries and
+# will not forward HTTP requests to that peer. An allow match leaves
+# the corresponding peer in the selection. The first match for a given
+# peer wins for that peer.
+#
+# The relative order of cache_peer_access directives for the same peer
+# matters. The relative order of any two cache_peer_access directives
+# for different peers does not matter. To ease interpretation, it is a
+# good idea to group cache_peer_access directives for the same peer
+# together.
+#
+# A single cache_peer_access directive may be evaluated multiple times
+# for a given transaction because individual peer selection algorithms
+# may check it independently from each other. These redundant checks
+# may be optimized away in future Squid versions.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# No peer usage restrictions.
+
+# TAG: neighbor_type_domain
+# Modify the cache_peer neighbor type when passing requests
+# about specific domains to the peer.
+#
+# Usage:
+# neighbor_type_domain neighbor parent|sibling domain domain ...
+#
+# For example:
+# cache_peer foo.example.com parent 3128 3130
+# neighbor_type_domain foo.example.com sibling .au .de
+#
+# The above configuration treats all requests to foo.example.com as a
+# parent proxy unless the request is for a .au or .de ccTLD domain name.
+#Default:
+# The peer type from cache_peer directive is used for all requests to that peer.
+
+# TAG: dead_peer_timeout (seconds)
+# This controls how long Squid waits to declare a peer cache
+# as "dead." If there are no ICP replies received in this
+# amount of time, Squid will declare the peer dead and not
+# expect to receive any further ICP replies. However, it
+# continues to send ICP queries, and will mark the peer as
+# alive upon receipt of the first subsequent ICP reply.
+#
+# This timeout also affects when Squid expects to receive ICP
+# replies from peers. If more than 'dead_peer' seconds have
+# passed since the last ICP reply was received, Squid will not
+# expect to receive an ICP reply on the next query. Thus, if
+# your time between requests is greater than this timeout, you
+# will see a lot of requests sent DIRECT to origin servers
+# instead of to your parents.
+#Default:
+# dead_peer_timeout 10 seconds
+
+# TAG: forward_max_tries
+# Controls how many different forward paths Squid will try
+# before giving up. See also forward_timeout.
+#
+# NOTE: connect_retries (default: none) can make each of these
+# possible forwarding paths be tried multiple times.
+#Default:
+# forward_max_tries 25
+
+# MEMORY CACHE OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: cache_mem (bytes)
+# NOTE: THIS PARAMETER DOES NOT SPECIFY THE MAXIMUM PROCESS SIZE.
+# IT ONLY PLACES A LIMIT ON HOW MUCH ADDITIONAL MEMORY SQUID WILL
+# USE AS A MEMORY CACHE OF OBJECTS. SQUID USES MEMORY FOR OTHER
+# THINGS AS WELL. SEE THE SQUID FAQ SECTION 8 FOR DETAILS.
+#
+# 'cache_mem' specifies the ideal amount of memory to be used
+# for:
+# * In-Transit objects
+# * Hot Objects
+# * Negative-Cached objects
+#
+# Data for these objects are stored in 4 KB blocks. This
+# parameter specifies the ideal upper limit on the total size of
+# 4 KB blocks allocated. In-Transit objects take the highest
+# priority.
+#
+# In-transit objects have priority over the others. When
+# additional space is needed for incoming data, negative-cached
+# and hot objects will be released. In other words, the
+# negative-cached and hot objects will fill up any unused space
+# not needed for in-transit objects.
+#
+# If circumstances require, this limit will be exceeded.
+# Specifically, if your incoming request rate requires more than
+# 'cache_mem' of memory to hold in-transit objects, Squid will
+# exceed this limit to satisfy the new requests. When the load
+# decreases, blocks will be freed until the high-water mark is
+# reached. Thereafter, blocks will be used to store hot
+# objects.
+#
+# If shared memory caching is enabled, Squid does not use the shared
+# cache space for in-transit objects, but they still consume as much
+# local memory as they need. For more details about the shared memory
+# cache, see memory_cache_shared.
+#Default:
+# cache_mem 256 MB
+
+# TAG: maximum_object_size_in_memory (bytes)
+# Objects greater than this size will not be attempted to kept in
+# the memory cache. This should be set high enough to keep objects
+# accessed frequently in memory to improve performance whilst low
+# enough to keep larger objects from hoarding cache_mem.
+#Default:
+# maximum_object_size_in_memory 512 KB
+
+# TAG: memory_cache_shared on|off
+# Controls whether the memory cache is shared among SMP workers.
+#
+# The shared memory cache is meant to occupy cache_mem bytes and replace
+# the non-shared memory cache, although some entities may still be
+# cached locally by workers for now (e.g., internal and in-transit
+# objects may be served from a local memory cache even if shared memory
+# caching is enabled).
+#
+# By default, the memory cache is shared if and only if all of the
+# following conditions are satisfied: Squid runs in SMP mode with
+# multiple workers, cache_mem is positive, and Squid environment
+# supports required IPC primitives (e.g., POSIX shared memory segments
+# and GCC-style atomic operations).
+#
+# To avoid blocking locks, shared memory uses opportunistic algorithms
+# that do not guarantee that every cachable entity that could have been
+# shared among SMP workers will actually be shared.
+#Default:
+# "on" where supported if doing memory caching with multiple SMP workers.
+
+# TAG: memory_cache_mode
+# Controls which objects to keep in the memory cache (cache_mem)
+#
+# always Keep most recently fetched objects in memory (default)
+#
+# disk Only disk cache hits are kept in memory, which means
+# an object must first be cached on disk and then hit
+# a second time before cached in memory.
+#
+# network Only objects fetched from network is kept in memory
+#Default:
+# Keep the most recently fetched objects in memory
+
+# TAG: memory_replacement_policy
+# The memory replacement policy parameter determines which
+# objects are purged from memory when memory space is needed.
+#
+# See cache_replacement_policy for details on algorithms.
+#Default:
+# memory_replacement_policy lru
+
+# DISK CACHE OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: cache_replacement_policy
+# The cache replacement policy parameter determines which
+# objects are evicted (replaced) when disk space is needed.
+#
+# lru : Squid's original list based LRU policy
+# heap GDSF : Greedy-Dual Size Frequency
+# heap LFUDA: Least Frequently Used with Dynamic Aging
+# heap LRU : LRU policy implemented using a heap
+#
+# Applies to any cache_dir lines listed below this directive.
+#
+# The LRU policies keeps recently referenced objects.
+#
+# The heap GDSF policy optimizes object hit rate by keeping smaller
+# popular objects in cache so it has a better chance of getting a
+# hit. It achieves a lower byte hit rate than LFUDA though since
+# it evicts larger (possibly popular) objects.
+#
+# The heap LFUDA policy keeps popular objects in cache regardless of
+# their size and thus optimizes byte hit rate at the expense of
+# hit rate since one large, popular object will prevent many
+# smaller, slightly less popular objects from being cached.
+#
+# Both policies utilize a dynamic aging mechanism that prevents
+# cache pollution that can otherwise occur with frequency-based
+# replacement policies.
+#
+# NOTE: if using the LFUDA replacement policy you should increase
+# the value of maximum_object_size above its default of 4 MB to
+# to maximize the potential byte hit rate improvement of LFUDA.
+#
+# For more information about the GDSF and LFUDA cache replacement
+# policies see http://www.hpl.hp.com/techreports/1999/HPL-1999-69.html
+# and http://fog.hpl.external.hp.com/techreports/98/HPL-98-173.html.
+#Default:
+# cache_replacement_policy lru
+
+# TAG: minimum_object_size (bytes)
+# Objects smaller than this size will NOT be saved on disk. The
+# value is specified in bytes, and the default is 0 KB, which
+# means all responses can be stored.
+#Default:
+# no limit
+
+# TAG: maximum_object_size (bytes)
+# Set the default value for max-size parameter on any cache_dir.
+# The value is specified in bytes, and the default is 4 MB.
+#
+# If you wish to get a high BYTES hit ratio, you should probably
+# increase this (one 32 MB object hit counts for 3200 10KB
+# hits).
+#
+# If you wish to increase hit ratio more than you want to
+# save bandwidth you should leave this low.
+#
+# NOTE: if using the LFUDA replacement policy you should increase
+# this value to maximize the byte hit rate improvement of LFUDA!
+# See cache_replacement_policy for a discussion of this policy.
+#Default:
+# maximum_object_size 4 MB
+
+# TAG: cache_dir
+# Format:
+# cache_dir Type Directory-Name Fs-specific-data [options]
+#
+# You can specify multiple cache_dir lines to spread the
+# cache among different disk partitions.
+#
+# Type specifies the kind of storage system to use. Only "ufs"
+# is built by default. To enable any of the other storage systems
+# see the --enable-storeio configure option.
+#
+# 'Directory' is a top-level directory where cache swap
+# files will be stored. If you want to use an entire disk
+# for caching, this can be the mount-point directory.
+# The directory must exist and be writable by the Squid
+# process. Squid will NOT create this directory for you.
+#
+# In SMP configurations, cache_dir must not precede the workers option
+# and should use configuration macros or conditionals to give each
+# worker interested in disk caching a dedicated cache directory.
+#
+#
+# ==== The ufs store type ====
+#
+# "ufs" is the old well-known Squid storage format that has always
+# been there.
+#
+# Usage:
+# cache_dir ufs Directory-Name Mbytes L1 L2 [options]
+#
+# 'Mbytes' is the amount of disk space (MB) to use under this
+# directory. The default is 100 MB. Change this to suit your
+# configuration. Do NOT put the size of your disk drive here.
+# Instead, if you want Squid to use the entire disk drive,
+# subtract 20% and use that value.
+#
+# 'L1' is the number of first-level subdirectories which
+# will be created under the 'Directory'. The default is 16.
+#
+# 'L2' is the number of second-level subdirectories which
+# will be created under each first-level directory. The default
+# is 256.
+#
+#
+# ==== The aufs store type ====
+#
+# "aufs" uses the same storage format as "ufs", utilizing
+# POSIX-threads to avoid blocking the main Squid process on
+# disk-I/O. This was formerly known in Squid as async-io.
+#
+# Usage:
+# cache_dir aufs Directory-Name Mbytes L1 L2 [options]
+#
+# see argument descriptions under ufs above
+#
+#
+# ==== The diskd store type ====
+#
+# "diskd" uses the same storage format as "ufs", utilizing a
+# separate process to avoid blocking the main Squid process on
+# disk-I/O.
+#
+# Usage:
+# cache_dir diskd Directory-Name Mbytes L1 L2 [options] [Q1=n] [Q2=n]
+#
+# see argument descriptions under ufs above
+#
+# Q1 specifies the number of unacknowledged I/O requests when Squid
+# stops opening new files. If this many messages are in the queues,
+# Squid won't open new files. Default is 64
+#
+# Q2 specifies the number of unacknowledged messages when Squid
+# starts blocking. If this many messages are in the queues,
+# Squid blocks until it receives some replies. Default is 72
+#
+# When Q1 < Q2 (the default), the cache directory is optimized
+# for lower response time at the expense of a decrease in hit
+# ratio. If Q1 > Q2, the cache directory is optimized for
+# higher hit ratio at the expense of an increase in response
+# time.
+#
+#
+# ==== The rock store type ====
+#
+# Usage:
+# cache_dir rock Directory-Name Mbytes [options]
+#
+# The Rock Store type is a database-style storage. All cached
+# entries are stored in a "database" file, using fixed-size slots.
+# A single entry occupies one or more slots.
+#
+# If possible, Squid using Rock Store creates a dedicated kid
+# process called "disker" to avoid blocking Squid worker(s) on disk
+# I/O. One disker kid is created for each rock cache_dir. Diskers
+# are created only when Squid, running in daemon mode, has support
+# for the IpcIo disk I/O module.
+#
+# swap-timeout=msec: Squid will not start writing a miss to or
+# reading a hit from disk if it estimates that the swap operation
+# will take more than the specified number of milliseconds. By
+# default and when set to zero, disables the disk I/O time limit
+# enforcement. Ignored when using blocking I/O module because
+# blocking synchronous I/O does not allow Squid to estimate the
+# expected swap wait time.
+#
+# max-swap-rate=swaps/sec: Artificially limits disk access using
+# the specified I/O rate limit. Swap out requests that
+# would cause the average I/O rate to exceed the limit are
+# delayed. Individual swap in requests (i.e., hits or reads) are
+# not delayed, but they do contribute to measured swap rate and
+# since they are placed in the same FIFO queue as swap out
+# requests, they may wait longer if max-swap-rate is smaller.
+# This is necessary on file systems that buffer "too
+# many" writes and then start blocking Squid and other processes
+# while committing those writes to disk. Usually used together
+# with swap-timeout to avoid excessive delays and queue overflows
+# when disk demand exceeds available disk "bandwidth". By default
+# and when set to zero, disables the disk I/O rate limit
+# enforcement. Currently supported by IpcIo module only.
+#
+# slot-size=bytes: The size of a database "record" used for
+# storing cached responses. A cached response occupies at least
+# one slot and all database I/O is done using individual slots so
+# increasing this parameter leads to more disk space waste while
+# decreasing it leads to more disk I/O overheads. Should be a
+# multiple of your operating system I/O page size. Defaults to
+# 16KBytes. A housekeeping header is stored with each slot and
+# smaller slot-sizes will be rejected. The header is smaller than
+# 100 bytes.
+#
+#
+# ==== COMMON OPTIONS ====
+#
+# no-store no new objects should be stored to this cache_dir.
+#
+# min-size=n the minimum object size in bytes this cache_dir
+# will accept. It's used to restrict a cache_dir
+# to only store large objects (e.g. AUFS) while
+# other stores are optimized for smaller objects
+# (e.g. Rock).
+# Defaults to 0.
+#
+# max-size=n the maximum object size in bytes this cache_dir
+# supports.
+# The value in maximum_object_size directive sets
+# the default unless more specific details are
+# available (ie a small store capacity).
+#
+# Note: To make optimal use of the max-size limits you should order
+# the cache_dir lines with the smallest max-size value first.
+#
+#Default:
+# No disk cache. Store cache ojects only in memory.
+#
+
+# Uncomment and adjust the following to add a disk cache directory.
+#cache_dir ufs /var/spool/squid 100 16 256
+
+# TAG: store_dir_select_algorithm
+# How Squid selects which cache_dir to use when the response
+# object will fit into more than one.
+#
+# Regardless of which algorithm is used the cache_dir min-size
+# and max-size parameters are obeyed. As such they can affect
+# the selection algorithm by limiting the set of considered
+# cache_dir.
+#
+# Algorithms:
+#
+# least-load
+#
+# This algorithm is suited to caches with similar cache_dir
+# sizes and disk speeds.
+#
+# The disk with the least I/O pending is selected.
+# When there are multiple disks with the same I/O load ranking
+# the cache_dir with most available capacity is selected.
+#
+# When a mix of cache_dir sizes are configured the faster disks
+# have a naturally lower I/O loading and larger disks have more
+# capacity. So space used to store objects and data throughput
+# may be very unbalanced towards larger disks.
+#
+#
+# round-robin
+#
+# This algorithm is suited to caches with unequal cache_dir
+# disk sizes.
+#
+# Each cache_dir is selected in a rotation. The next suitable
+# cache_dir is used.
+#
+# Available cache_dir capacity is only considered in relation
+# to whether the object will fit and meets the min-size and
+# max-size parameters.
+#
+# Disk I/O loading is only considered to prevent overload on slow
+# disks. This algorithm does not spread objects by size, so any
+# I/O loading per-disk may appear very unbalanced and volatile.
+#
+# If several cache_dirs use similar min-size, max-size, or other
+# limits to to reject certain responses, then do not group such
+# cache_dir lines together, to avoid round-robin selection bias
+# towards the first cache_dir after the group. Instead, interleave
+# cache_dir lines from different groups. For example:
+#
+# store_dir_select_algorithm round-robin
+# cache_dir rock /hdd1 ... min-size=100000
+# cache_dir rock /ssd1 ... max-size=99999
+# cache_dir rock /hdd2 ... min-size=100000
+# cache_dir rock /ssd2 ... max-size=99999
+# cache_dir rock /hdd3 ... min-size=100000
+# cache_dir rock /ssd3 ... max-size=99999
+#Default:
+# store_dir_select_algorithm least-load
+
+# TAG: max_open_disk_fds
+# To avoid having disk as the I/O bottleneck Squid can optionally
+# bypass the on-disk cache if more than this amount of disk file
+# descriptors are open.
+#
+# A value of 0 indicates no limit.
+#Default:
+# no limit
+
+# TAG: cache_swap_low (percent, 0-100)
+# The low-water mark for AUFS/UFS/diskd cache object eviction by
+# the cache_replacement_policy algorithm.
+#
+# Removal begins when the swap (disk) usage of a cache_dir is
+# above this low-water mark and attempts to maintain utilization
+# near the low-water mark.
+#
+# As swap utilization increases towards the high-water mark set
+# by cache_swap_high object eviction becomes more agressive.
+#
+# The value difference in percentages between low- and high-water
+# marks represent an eviction rate of 300 objects per second and
+# the rate continues to scale in agressiveness by multiples of
+# this above the high-water mark.
+#
+# Defaults are 90% and 95%. If you have a large cache, 5% could be
+# hundreds of MB. If this is the case you may wish to set these
+# numbers closer together.
+#
+# See also cache_swap_high and cache_replacement_policy
+#Default:
+# cache_swap_low 90
+
+# TAG: cache_swap_high (percent, 0-100)
+# The high-water mark for AUFS/UFS/diskd cache object eviction by
+# the cache_replacement_policy algorithm.
+#
+# Removal begins when the swap (disk) usage of a cache_dir is
+# above the low-water mark set by cache_swap_low and attempts to
+# maintain utilization near the low-water mark.
+#
+# As swap utilization increases towards this high-water mark object
+# eviction becomes more agressive.
+#
+# The value difference in percentages between low- and high-water
+# marks represent an eviction rate of 300 objects per second and
+# the rate continues to scale in agressiveness by multiples of
+# this above the high-water mark.
+#
+# Defaults are 90% and 95%. If you have a large cache, 5% could be
+# hundreds of MB. If this is the case you may wish to set these
+# numbers closer together.
+#
+# See also cache_swap_low and cache_replacement_policy
+#Default:
+# cache_swap_high 95
+
+# LOGFILE OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: logformat
+# Usage:
+#
+# logformat <name> <format specification>
+#
+# Defines an access log format.
+#
+# The <format specification> is a string with embedded % format codes
+#
+# % format codes all follow the same basic structure where all but
+# the formatcode is optional. Output strings are automatically escaped
+# as required according to their context and the output format
+# modifiers are usually not needed, but can be specified if an explicit
+# output format is desired.
+#
+# % ["|[|'|#] [-] [[0]width] [{argument}] formatcode
+#
+# " output in quoted string format
+# [ output in squid text log format as used by log_mime_hdrs
+# # output in URL quoted format
+# ' output as-is
+#
+# - left aligned
+#
+# width minimum and/or maximum field width:
+# [width_min][.width_max]
+# When minimum starts with 0, the field is zero-padded.
+# String values exceeding maximum width are truncated.
+#
+# {arg} argument such as header name etc
+#
+# Format codes:
+#
+# % a literal % character
+# sn Unique sequence number per log line entry
+# err_code The ID of an error response served by Squid or
+# a similar internal error identifier.
+# err_detail Additional err_code-dependent error information.
+# note The annotation specified by the argument. Also
+# logs the adaptation meta headers set by the
+# adaptation_meta configuration parameter.
+# If no argument given all annotations logged.
+# The argument may include a separator to use with
+# annotation values:
+# name[:separator]
+# By default, multiple note values are separated with ","
+# and multiple notes are separated with "\r\n".
+# When logging named notes with %{name}note, the
+# explicitly configured separator is used between note
+# values. When logging all notes with %note, the
+# explicitly configured separator is used between
+# individual notes. There is currently no way to
+# specify both value and notes separators when logging
+# all notes with %note.
+#
+# Connection related format codes:
+#
+# >a Client source IP address
+# >A Client FQDN
+# >p Client source port
+# >eui Client source EUI (MAC address, EUI-48 or EUI-64 identifier)
+# >la Local IP address the client connected to
+# >lp Local port number the client connected to
+# >qos Client connection TOS/DSCP value set by Squid
+# >nfmark Client connection netfilter mark set by Squid
+#
+# la Local listening IP address the client connection was connected to.
+# lp Local listening port number the client connection was connected to.
+#
+# <a Server IP address of the last server or peer connection
+# <A Server FQDN or peer name
+# <p Server port number of the last server or peer connection
+# <la Local IP address of the last server or peer connection
+# <lp Local port number of the last server or peer connection
+# <qos Server connection TOS/DSCP value set by Squid
+# <nfmark Server connection netfilter mark set by Squid
+#
+# Time related format codes:
+#
+# ts Seconds since epoch
+# tu subsecond time (milliseconds)
+# tl Local time. Optional strftime format argument
+# default %d/%b/%Y:%H:%M:%S %z
+# tg GMT time. Optional strftime format argument
+# default %d/%b/%Y:%H:%M:%S %z
+# tr Response time (milliseconds)
+# dt Total time spent making DNS lookups (milliseconds)
+# tS Approximate master transaction start time in
+# <full seconds since epoch>.<fractional seconds> format.
+# Currently, Squid considers the master transaction
+# started when a complete HTTP request header initiating
+# the transaction is received from the client. This is
+# the same value that Squid uses to calculate transaction
+# response time when logging %tr to access.log. Currently,
+# Squid uses millisecond resolution for %tS values,
+# similar to the default access.log "current time" field
+# (%ts.%03tu).
+#
+# Access Control related format codes:
+#
+# et Tag returned by external acl
+# ea Log string returned by external acl
+# un User name (any available)
+# ul User name from authentication
+# ue User name from external acl helper
+# ui User name from ident
+# un A user name. Expands to the first available name
+# from the following list of information sources:
+# - authenticated user name, like %ul
+# - user name supplied by an external ACL, like %ue
+# - SSL client name, like %us
+# - ident user name, like %ui
+# credentials Client credentials. The exact meaning depends on
+# the authentication scheme: For Basic authentication,
+# it is the password; for Digest, the realm sent by the
+# client; for NTLM and Negotiate, the client challenge
+# or client credentials prefixed with "YR " or "KK ".
+#
+# HTTP related format codes:
+#
+# REQUEST
+#
+# [http::]rm Request method (GET/POST etc)
+# [http::]>rm Request method from client
+# [http::]<rm Request method sent to server or peer
+# [http::]ru Request URL from client (historic, filtered for logging)
+# [http::]>ru Request URL from client
+# [http::]<ru Request URL sent to server or peer
+# [http::]>rs Request URL scheme from client
+# [http::]<rs Request URL scheme sent to server or peer
+# [http::]>rd Request URL domain from client
+# [http::]<rd Request URL domain sent to server or peer
+# [http::]>rP Request URL port from client
+# [http::]<rP Request URL port sent to server or peer
+# [http::]rp Request URL path excluding hostname
+# [http::]>rp Request URL path excluding hostname from client
+# [http::]<rp Request URL path excluding hostname sent to server or peer
+# [http::]rv Request protocol version
+# [http::]>rv Request protocol version from client
+# [http::]<rv Request protocol version sent to server or peer
+#
+# [http::]>h Original received request header.
+# Usually differs from the request header sent by
+# Squid, although most fields are often preserved.
+# Accepts optional header field name/value filter
+# argument using name[:[separator]element] format.
+# [http::]>ha Received request header after adaptation and
+# redirection (pre-cache REQMOD vectoring point).
+# Usually differs from the request header sent by
+# Squid, although most fields are often preserved.
+# Optional header name argument as for >h
+#
+#
+# RESPONSE
+#
+# [http::]<Hs HTTP status code received from the next hop
+# [http::]>Hs HTTP status code sent to the client
+#
+# [http::]<h Reply header. Optional header name argument
+# as for >h
+#
+# [http::]mt MIME content type
+#
+#
+# SIZE COUNTERS
+#
+# [http::]st Total size of request + reply traffic with client
+# [http::]>st Total size of request received from client.
+# Excluding chunked encoding bytes.
+# [http::]<st Total size of reply sent to client (after adaptation)
+#
+# [http::]>sh Size of request headers received from client
+# [http::]<sh Size of reply headers sent to client (after adaptation)
+#
+# [http::]<sH Reply high offset sent
+# [http::]<sS Upstream object size
+#
+# [http::]<bs Number of HTTP-equivalent message body bytes
+# received from the next hop, excluding chunked
+# transfer encoding and control messages.
+# Generated FTP/Gopher listings are treated as
+# received bodies.
+#
+#
+# TIMING
+#
+# [http::]<pt Peer response time in milliseconds. The timer starts
+# when the last request byte is sent to the next hop
+# and stops when the last response byte is received.
+# [http::]<tt Total time in milliseconds. The timer
+# starts with the first connect request (or write I/O)
+# sent to the first selected peer. The timer stops
+# with the last I/O with the last peer.
+#
+# Squid handling related format codes:
+#
+# Ss Squid request status (TCP_MISS etc)
+# Sh Squid hierarchy status (DEFAULT_PARENT etc)
+#
+# SSL-related format codes:
+#
+# ssl::bump_mode SslBump decision for the transaction:
+#
+# For CONNECT requests that initiated bumping of
+# a connection and for any request received on
+# an already bumped connection, Squid logs the
+# corresponding SslBump mode ("server-first" or
+# "client-first"). See the ssl_bump option for
+# more information about these modes.
+#
+# A "none" token is logged for requests that
+# triggered "ssl_bump" ACL evaluation matching
+# either a "none" rule or no rules at all.
+#
+# In all other cases, a single dash ("-") is
+# logged.
+#
+# ssl::>sni SSL client SNI sent to Squid. Available only
+# after the peek, stare, or splice SSL bumping
+# actions.
+#
+# If ICAP is enabled, the following code becomes available (as
+# well as ICAP log codes documented with the icap_log option):
+#
+# icap::tt Total ICAP processing time for the HTTP
+# transaction. The timer ticks when ICAP
+# ACLs are checked and when ICAP
+# transaction is in progress.
+#
+# If adaptation is enabled the following three codes become available:
+#
+# adapt::<last_h The header of the last ICAP response or
+# meta-information from the last eCAP
+# transaction related to the HTTP transaction.
+# Like <h, accepts an optional header name
+# argument.
+#
+# adapt::sum_trs Summed adaptation transaction response
+# times recorded as a comma-separated list in
+# the order of transaction start time. Each time
+# value is recorded as an integer number,
+# representing response time of one or more
+# adaptation (ICAP or eCAP) transaction in
+# milliseconds. When a failed transaction is
+# being retried or repeated, its time is not
+# logged individually but added to the
+# replacement (next) transaction. See also:
+# adapt::all_trs.
+#
+# adapt::all_trs All adaptation transaction response times.
+# Same as adaptation_strs but response times of
+# individual transactions are never added
+# together. Instead, all transaction response
+# times are recorded individually.
+#
+# You can prefix adapt::*_trs format codes with adaptation
+# service name in curly braces to record response time(s) specific
+# to that service. For example: %{my_service}adapt::sum_trs
+#
+# If SSL is enabled, the following formating codes become available:
+#
+# %ssl::>cert_subject The Subject field of the received client
+# SSL certificate or a dash ('-') if Squid has
+# received an invalid/malformed certificate or
+# no certificate at all. Consider encoding the
+# logged value because Subject often has spaces.
+#
+# %ssl::>cert_issuer The Issuer field of the received client
+# SSL certificate or a dash ('-') if Squid has
+# received an invalid/malformed certificate or
+# no certificate at all. Consider encoding the
+# logged value because Issuer often has spaces.
+#
+# The default formats available (which do not need re-defining) are:
+#
+#logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt
+#logformat common %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st %Ss:%Sh
+#logformat combined %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh
+#logformat referrer %ts.%03tu %>a %{Referer}>h %ru
+#logformat useragent %>a [%tl] "%{User-Agent}>h"
+#
+# NOTE: When the log_mime_hdrs directive is set to ON.
+# The squid, common and combined formats have a safely encoded copy
+# of the mime headers appended to each line within a pair of brackets.
+#
+# NOTE: The common and combined formats are not quite true to the Apache definition.
+# The logs from Squid contain an extra status and hierarchy code appended.
+#
+#Default:
+# The format definitions squid, common, combined, referrer, useragent are built in.
+
+# TAG: access_log
+# Configures whether and how Squid logs HTTP and ICP transactions.
+# If access logging is enabled, a single line is logged for every
+# matching HTTP or ICP request. The recommended directive formats are:
+#
+# access_log <module>:<place> [option ...] [acl acl ...]
+# access_log none [acl acl ...]
+#
+# The following directive format is accepted but may be deprecated:
+# access_log <module>:<place> [<logformat name> [acl acl ...]]
+#
+# In most cases, the first ACL name must not contain the '=' character
+# and should not be equal to an existing logformat name. You can always
+# start with an 'all' ACL to work around those restrictions.
+#
+# Will log to the specified module:place using the specified format (which
+# must be defined in a logformat directive) those entries which match
+# ALL the acl's specified (which must be defined in acl clauses).
+# If no acl is specified, all requests will be logged to this destination.
+#
+# ===== Available options for the recommended directive format =====
+#
+# logformat=name Names log line format (either built-in or
+# defined by a logformat directive). Defaults
+# to 'squid'.
+#
+# buffer-size=64KB Defines approximate buffering limit for log
+# records (see buffered_logs). Squid should not
+# keep more than the specified size and, hence,
+# should flush records before the buffer becomes
+# full to avoid overflows under normal
+# conditions (the exact flushing algorithm is
+# module-dependent though). The on-error option
+# controls overflow handling.
+#
+# on-error=die|drop Defines action on unrecoverable errors. The
+# 'drop' action ignores (i.e., does not log)
+# affected log records. The default 'die' action
+# kills the affected worker. The drop action
+# support has not been tested for modules other
+# than tcp.
+#
+# ===== Modules Currently available =====
+#
+# none Do not log any requests matching these ACL.
+# Do not specify Place or logformat name.
+#
+# stdio Write each log line to disk immediately at the completion of
+# each request.
+# Place: the filename and path to be written.
+#
+# daemon Very similar to stdio. But instead of writing to disk the log
+# line is passed to a daemon helper for asychronous handling instead.
+# Place: varies depending on the daemon.
+#
+# log_file_daemon Place: the file name and path to be written.
+#
+# syslog To log each request via syslog facility.
+# Place: The syslog facility and priority level for these entries.
+# Place Format: facility.priority
+#
+# where facility could be any of:
+# authpriv, daemon, local0 ... local7 or user.
+#
+# And priority could be any of:
+# err, warning, notice, info, debug.
+#
+# udp To send each log line as text data to a UDP receiver.
+# Place: The destination host name or IP and port.
+# Place Format: //host:port
+#
+# tcp To send each log line as text data to a TCP receiver.
+# Lines may be accumulated before sending (see buffered_logs).
+# Place: The destination host name or IP and port.
+# Place Format: //host:port
+#
+# Default:
+# access_log daemon:/var/log/squid/access.log squid
+#Default:
+# access_log daemon:/var/log/squid/access.log squid
+
+# TAG: icap_log
+# ICAP log files record ICAP transaction summaries, one line per
+# transaction.
+#
+# The icap_log option format is:
+# icap_log <filepath> [<logformat name> [acl acl ...]]
+# icap_log none [acl acl ...]]
+#
+# Please see access_log option documentation for details. The two
+# kinds of logs share the overall configuration approach and many
+# features.
+#
+# ICAP processing of a single HTTP message or transaction may
+# require multiple ICAP transactions. In such cases, multiple
+# ICAP transaction log lines will correspond to a single access
+# log line.
+#
+# ICAP log supports many access.log logformat %codes. In ICAP context,
+# HTTP message-related %codes are applied to the HTTP message embedded
+# in an ICAP message. Logformat "%http::>..." codes are used for HTTP
+# messages embedded in ICAP requests while "%http::<..." codes are used
+# for HTTP messages embedded in ICAP responses. For example:
+#
+# http::>h To-be-adapted HTTP message headers sent by Squid to
+# the ICAP service. For REQMOD transactions, these are
+# HTTP request headers. For RESPMOD, these are HTTP
+# response headers, but Squid currently cannot log them
+# (i.e., %http::>h will expand to "-" for RESPMOD).
+#
+# http::<h Adapted HTTP message headers sent by the ICAP
+# service to Squid (i.e., HTTP request headers in regular
+# REQMOD; HTTP response headers in RESPMOD and during
+# request satisfaction in REQMOD).
+#
+# ICAP OPTIONS transactions do not embed HTTP messages.
+#
+# Several logformat codes below deal with ICAP message bodies. An ICAP
+# message body, if any, typically includes a complete HTTP message
+# (required HTTP headers plus optional HTTP message body). When
+# computing HTTP message body size for these logformat codes, Squid
+# either includes or excludes chunked encoding overheads; see
+# code-specific documentation for details.
+#
+# For Secure ICAP services, all size-related information is currently
+# computed before/after TLS encryption/decryption, as if TLS was not
+# in use at all.
+#
+# The following format codes are also available for ICAP logs:
+#
+# icap::<A ICAP server IP address. Similar to <A.
+#
+# icap::<service_name ICAP service name from the icap_service
+# option in Squid configuration file.
+#
+# icap::ru ICAP Request-URI. Similar to ru.
+#
+# icap::rm ICAP request method (REQMOD, RESPMOD, or
+# OPTIONS). Similar to existing rm.
+#
+# icap::>st The total size of the ICAP request sent to the ICAP
+# server (ICAP headers + ICAP body), including chunking
+# metadata (if any).
+#
+# icap::<st The total size of the ICAP response received from the
+# ICAP server (ICAP headers + ICAP body), including
+# chunking metadata (if any).
+#
+# icap::<bs The size of the ICAP response body received from the
+# ICAP server, excluding chunking metadata (if any).
+#
+# icap::tr Transaction response time (in
+# milliseconds). The timer starts when
+# the ICAP transaction is created and
+# stops when the transaction is completed.
+# Similar to tr.
+#
+# icap::tio Transaction I/O time (in milliseconds). The
+# timer starts when the first ICAP request
+# byte is scheduled for sending. The timers
+# stops when the last byte of the ICAP response
+# is received.
+#
+# icap::to Transaction outcome: ICAP_ERR* for all
+# transaction errors, ICAP_OPT for OPTION
+# transactions, ICAP_ECHO for 204
+# responses, ICAP_MOD for message
+# modification, and ICAP_SAT for request
+# satisfaction. Similar to Ss.
+#
+# icap::Hs ICAP response status code. Similar to Hs.
+#
+# icap::>h ICAP request header(s). Similar to >h.
+#
+# icap::<h ICAP response header(s). Similar to <h.
+#
+# The default ICAP log format, which can be used without an explicit
+# definition, is called icap_squid:
+#
+#logformat icap_squid %ts.%03tu %6icap::tr %>A %icap::to/%03icap::Hs %icap::<st %icap::rm %icap::ru %un -/%icap::<A -
+#
+# See also: logformat and %adapt::<last_h
+#Default:
+# none
+
+# TAG: logfile_daemon
+# Specify the path to the logfile-writing daemon. This daemon is
+# used to write the access and store logs, if configured.
+#
+# Squid sends a number of commands to the log daemon:
+# L<data>\n - logfile data
+# R\n - rotate file
+# T\n - truncate file
+# O\n - reopen file
+# F\n - flush file
+# r<n>\n - set rotate count to <n>
+# b<n>\n - 1 = buffer output, 0 = don't buffer output
+#
+# No responses is expected.
+#Default:
+# logfile_daemon /usr/lib/squid/log_file_daemon
+
+# TAG: stats_collection allow|deny acl acl...
+# This options allows you to control which requests gets accounted
+# in performance counters.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow logging for all transactions.
+
+# TAG: cache_store_log
+# Logs the activities of the storage manager. Shows which
+# objects are ejected from the cache, and which objects are
+# saved and for how long.
+# There are not really utilities to analyze this data, so you can safely
+# disable it (the default).
+#
+# Store log uses modular logging outputs. See access_log for the list
+# of modules supported.
+#
+# Example:
+# cache_store_log stdio:/var/log/squid/store.log
+# cache_store_log daemon:/var/log/squid/store.log
+#Default:
+# none
+
+# TAG: cache_swap_state
+# Location for the cache "swap.state" file. This index file holds
+# the metadata of objects saved on disk. It is used to rebuild
+# the cache during startup. Normally this file resides in each
+# 'cache_dir' directory, but you may specify an alternate
+# pathname here. Note you must give a full filename, not just
+# a directory. Since this is the index for the whole object
+# list you CANNOT periodically rotate it!
+#
+# If %s can be used in the file name it will be replaced with a
+# a representation of the cache_dir name where each / is replaced
+# with '.'. This is needed to allow adding/removing cache_dir
+# lines when cache_swap_log is being used.
+#
+# If have more than one 'cache_dir', and %s is not used in the name
+# these swap logs will have names such as:
+#
+# cache_swap_log.00
+# cache_swap_log.01
+# cache_swap_log.02
+#
+# The numbered extension (which is added automatically)
+# corresponds to the order of the 'cache_dir' lines in this
+# configuration file. If you change the order of the 'cache_dir'
+# lines in this file, these index files will NOT correspond to
+# the correct 'cache_dir' entry (unless you manually rename
+# them). We recommend you do NOT use this option. It is
+# better to keep these index files in each 'cache_dir' directory.
+#Default:
+# Store the journal inside its cache_dir
+
+# TAG: logfile_rotate
+# Specifies the number of logfile rotations to make when you
+# type 'squid -k rotate'. The default is 10, which will rotate
+# with extensions 0 through 9. Setting logfile_rotate to 0 will
+# disable the file name rotation, but the logfiles are still closed
+# and re-opened. This will enable you to rename the logfiles
+# yourself just before sending the rotate signal.
+#
+# Note, the 'squid -k rotate' command normally sends a USR1
+# signal to the running squid process. In certain situations
+# (e.g. on Linux with Async I/O), USR1 is used for other
+# purposes, so -k rotate uses another signal. It is best to get
+# in the habit of using 'squid -k rotate' instead of 'kill -USR1
+# <pid>'.
+#
+# Note, from Squid-3.1 this option is only a default for cache.log,
+# that log can be rotated separately by using debug_options.
+#
+# Note2, for Debian/Linux the default of logfile_rotate is
+# zero, since it includes external logfile-rotation methods.
+#Default:
+# logfile_rotate 0
+
+# TAG: mime_table
+# Path to Squid's icon configuration file.
+#
+# You shouldn't need to change this, but the default file contains
+# examples and formatting information if you do.
+#Default:
+# mime_table /usr/share/squid/mime.conf
+
+# TAG: log_mime_hdrs on|off
+# The Cache can record both the request and the response MIME
+# headers for each HTTP transaction. The headers are encoded
+# safely and will appear as two bracketed fields at the end of
+# the access log (for either the native or httpd-emulated log
+# formats). To enable this logging set log_mime_hdrs to 'on'.
+#Default:
+# log_mime_hdrs off
+
+# TAG: pid_filename
+# A filename to write the process-id to. To disable, enter "none".
+#Default:
+# pid_filename /var/run/squid.pid
+
+# TAG: client_netmask
+# A netmask for client addresses in logfiles and cachemgr output.
+# Change this to protect the privacy of your cache clients.
+# A netmask of 255.255.255.0 will log all IP's in that range with
+# the last digit set to '0'.
+#Default:
+# Log full client IP address
+
+# TAG: strip_query_terms
+# By default, Squid strips query terms from requested URLs before
+# logging. This protects your user's privacy and reduces log size.
+#
+# When investigating HIT/MISS or other caching behaviour you
+# will need to disable this to see the full URL used by Squid.
+#Default:
+# strip_query_terms on
+
+# TAG: buffered_logs on|off
+# Whether to write/send access_log records ASAP or accumulate them and
+# then write/send them in larger chunks. Buffering may improve
+# performance because it decreases the number of I/Os. However,
+# buffering increases the delay before log records become available to
+# the final recipient (e.g., a disk file or logging daemon) and,
+# hence, increases the risk of log records loss.
+#
+# Note that even when buffered_logs are off, Squid may have to buffer
+# records if it cannot write/send them immediately due to pending I/Os
+# (e.g., the I/O writing the previous log record) or connectivity loss.
+#
+# Currently honored by 'daemon' and 'tcp' access_log modules only.
+#Default:
+# buffered_logs off
+
+# TAG: netdb_filename
+# Where Squid stores it's netdb journal.
+# When enabled this journal preserves netdb state between restarts.
+#
+# To disable, enter "none".
+#Default:
+# netdb_filename stdio:/var/log/squid/netdb.state
+
+# OPTIONS FOR TROUBLESHOOTING
+# -----------------------------------------------------------------------------
+
+# TAG: cache_log
+# Squid administrative logging file.
+#
+# This is where general information about Squid behavior goes. You can
+# increase the amount of data logged to this file and how often it is
+# rotated with "debug_options"
+#Default:
+# cache_log /var/log/squid/cache.log
+
+# TAG: debug_options
+# Logging options are set as section,level where each source file
+# is assigned a unique section. Lower levels result in less
+# output, Full debugging (level 9) can result in a very large
+# log file, so be careful.
+#
+# The magic word "ALL" sets debugging levels for all sections.
+# The default is to run with "ALL,1" to record important warnings.
+#
+# The rotate=N option can be used to keep more or less of these logs
+# than would otherwise be kept by logfile_rotate.
+# For most uses a single log should be enough to monitor current
+# events affecting Squid.
+#Default:
+# Log all critical and important messages.
+
+# TAG: coredump_dir
+# By default Squid leaves core files in the directory from where
+# it was started. If you set 'coredump_dir' to a directory
+# that exists, Squid will chdir() to that directory at startup
+# and coredump files will be left there.
+#
+#Default:
+# Use the directory from where Squid was started.
+#
+
+# Leave coredumps in the first cache dir
+coredump_dir /var/spool/squid
+
+# OPTIONS FOR FTP GATEWAYING
+# -----------------------------------------------------------------------------
+
+# TAG: ftp_user
+# If you want the anonymous login password to be more informative
+# (and enable the use of picky FTP servers), set this to something
+# reasonable for your domain, like wwwuser@somewhere.net
+#
+# The reason why this is domainless by default is the
+# request can be made on the behalf of a user in any domain,
+# depending on how the cache is used.
+# Some FTP server also validate the email address is valid
+# (for example perl.com).
+#Default:
+# ftp_user Squid@
+
+# TAG: ftp_passive
+# If your firewall does not allow Squid to use passive
+# connections, turn off this option.
+#
+# Use of ftp_epsv_all option requires this to be ON.
+#Default:
+# ftp_passive on
+
+# TAG: ftp_epsv_all
+# FTP Protocol extensions permit the use of a special "EPSV ALL" command.
+#
+# NATs may be able to put the connection on a "fast path" through the
+# translator, as the EPRT command will never be used and therefore,
+# translation of the data portion of the segments will never be needed.
+#
+# When a client only expects to do two-way FTP transfers this may be
+# useful.
+# If squid finds that it must do a three-way FTP transfer after issuing
+# an EPSV ALL command, the FTP session will fail.
+#
+# If you have any doubts about this option do not use it.
+# Squid will nicely attempt all other connection methods.
+#
+# Requires ftp_passive to be ON (default) for any effect.
+#Default:
+# ftp_epsv_all off
+
+# TAG: ftp_epsv
+# FTP Protocol extensions permit the use of a special "EPSV" command.
+#
+# NATs may be able to put the connection on a "fast path" through the
+# translator using EPSV, as the EPRT command will never be used
+# and therefore, translation of the data portion of the segments
+# will never be needed.
+#
+# EPSV is often required to interoperate with FTP servers on IPv6
+# networks. On the other hand, it may break some IPv4 servers.
+#
+# By default, EPSV may try EPSV with any FTP server. To fine tune
+# that decision, you may restrict EPSV to certain clients or servers
+# using ACLs:
+#
+# ftp_epsv allow|deny al1 acl2 ...
+#
+# WARNING: Disabling EPSV may cause problems with external NAT and IPv6.
+#
+# Only fast ACLs are supported.
+# Requires ftp_passive to be ON (default) for any effect.
+#Default:
+# none
+
+# TAG: ftp_eprt
+# FTP Protocol extensions permit the use of a special "EPRT" command.
+#
+# This extension provides a protocol neutral alternative to the
+# IPv4-only PORT command. When supported it enables active FTP data
+# channels over IPv6 and efficient NAT handling.
+#
+# Turning this OFF will prevent EPRT being attempted and will skip
+# straight to using PORT for IPv4 servers.
+#
+# Some devices are known to not handle this extension correctly and
+# may result in crashes. Devices which suport EPRT enough to fail
+# cleanly will result in Squid attempting PORT anyway. This directive
+# should only be disabled when EPRT results in device failures.
+#
+# WARNING: Doing so will convert Squid back to the old behavior with all
+# the related problems with external NAT devices/layers and IPv4-only FTP.
+#Default:
+# ftp_eprt on
+
+# TAG: ftp_sanitycheck
+# For security and data integrity reasons Squid by default performs
+# sanity checks of the addresses of FTP data connections ensure the
+# data connection is to the requested server. If you need to allow
+# FTP connections to servers using another IP address for the data
+# connection turn this off.
+#Default:
+# ftp_sanitycheck on
+
+# TAG: ftp_telnet_protocol
+# The FTP protocol is officially defined to use the telnet protocol
+# as transport channel for the control connection. However, many
+# implementations are broken and does not respect this aspect of
+# the FTP protocol.
+#
+# If you have trouble accessing files with ASCII code 255 in the
+# path or similar problems involving this ASCII code you can
+# try setting this directive to off. If that helps, report to the
+# operator of the FTP server in question that their FTP server
+# is broken and does not follow the FTP standard.
+#Default:
+# ftp_telnet_protocol on
+
+# OPTIONS FOR EXTERNAL SUPPORT PROGRAMS
+# -----------------------------------------------------------------------------
+
+# TAG: diskd_program
+# Specify the location of the diskd executable.
+# Note this is only useful if you have compiled in
+# diskd as one of the store io modules.
+#Default:
+# diskd_program /usr/lib/squid/diskd
+
+# TAG: unlinkd_program
+# Specify the location of the executable for file deletion process.
+#Default:
+# unlinkd_program /usr/lib/squid/unlinkd
+
+# TAG: pinger_program
+# Specify the location of the executable for the pinger process.
+#Default:
+# pinger_program /usr/lib/squid/pinger
+
+# TAG: pinger_enable
+# Control whether the pinger is active at run-time.
+# Enables turning ICMP pinger on and off with a simple
+# squid -k reconfigure.
+#Default:
+# pinger_enable on
+
+# OPTIONS FOR URL REWRITING
+# -----------------------------------------------------------------------------
+
+# TAG: url_rewrite_program
+# Specify the location of the executable URL rewriter to use.
+# Since they can perform almost any function there isn't one included.
+#
+# For each requested URL, the rewriter will receive on line with the format
+#
+# [channel-ID <SP>] URL [<SP> extras]<NL>
+#
+# See url_rewrite_extras on how to send "extras" with optional values to
+# the helper.
+# After processing the request the helper must reply using the following format:
+#
+# [channel-ID <SP>] result [<SP> kv-pairs]
+#
+# The result code can be:
+#
+# OK status=30N url="..."
+# Redirect the URL to the one supplied in 'url='.
+# 'status=' is optional and contains the status code to send
+# the client in Squids HTTP response. It must be one of the
+# HTTP redirect status codes: 301, 302, 303, 307, 308.
+# When no status is given Squid will use 302.
+#
+# OK rewrite-url="..."
+# Rewrite the URL to the one supplied in 'rewrite-url='.
+# The new URL is fetched directly by Squid and returned to
+# the client as the response to its request.
+#
+# OK
+# When neither of url= and rewrite-url= are sent Squid does
+# not change the URL.
+#
+# ERR
+# Do not change the URL.
+#
+# BH
+# An internal error occurred in the helper, preventing
+# a result being identified. The 'message=' key name is
+# reserved for delivering a log message.
+#
+#
+# In addition to the above kv-pairs Squid also understands the following
+# optional kv-pairs received from URL rewriters:
+# clt_conn_tag=TAG
+# Associates a TAG with the client TCP connection.
+# The TAG is treated as a regular annotation but persists across
+# future requests on the client connection rather than just the
+# current request. A helper may update the TAG during subsequent
+# requests be returning a new kv-pair.
+#
+# When using the concurrency= option the protocol is changed by
+# introducing a query channel tag in front of the request/response.
+# The query channel tag is a number between 0 and concurrency-1.
+# This value must be echoed back unchanged to Squid as the first part
+# of the response relating to its request.
+#
+# WARNING: URL re-writing ability should be avoided whenever possible.
+# Use the URL redirect form of response instead.
+#
+# Re-write creates a difference in the state held by the client
+# and server. Possibly causing confusion when the server response
+# contains snippets of its view state. Embeded URLs, response
+# and content Location headers, etc. are not re-written by this
+# interface.
+#
+# By default, a URL rewriter is not used.
+#Default:
+# none
+
+# TAG: url_rewrite_children
+# The maximum number of redirector processes to spawn. If you limit
+# it too few Squid will have to wait for them to process a backlog of
+# URLs, slowing it down. If you allow too many they will use RAM
+# and other system resources noticably.
+#
+# The startup= and idle= options allow some measure of skew in your
+# tuning.
+#
+# startup=
+#
+# Sets a minimum of how many processes are to be spawned when Squid
+# starts or reconfigures. When set to zero the first request will
+# cause spawning of the first child process to handle it.
+#
+# Starting too few will cause an initial slowdown in traffic as Squid
+# attempts to simultaneously spawn enough processes to cope.
+#
+# idle=
+#
+# Sets a minimum of how many processes Squid is to try and keep available
+# at all times. When traffic begins to rise above what the existing
+# processes can handle this many more will be spawned up to the maximum
+# configured. A minimum setting of 1 is required.
+#
+# concurrency=
+#
+# The number of requests each redirector helper can handle in
+# parallel. Defaults to 0 which indicates the redirector
+# is a old-style single threaded redirector.
+#
+# When this directive is set to a value >= 1 then the protocol
+# used to communicate with the helper is modified to include
+# an ID in front of the request/response. The ID from the request
+# must be echoed back with the response to that request.
+#Default:
+# url_rewrite_children 20 startup=0 idle=1 concurrency=0
+
+# TAG: url_rewrite_host_header
+# To preserve same-origin security policies in browsers and
+# prevent Host: header forgery by redirectors Squid rewrites
+# any Host: header in redirected requests.
+#
+# If you are running an accelerator this may not be a wanted
+# effect of a redirector. This directive enables you disable
+# Host: alteration in reverse-proxy traffic.
+#
+# WARNING: Entries are cached on the result of the URL rewriting
+# process, so be careful if you have domain-virtual hosts.
+#
+# WARNING: Squid and other software verifies the URL and Host
+# are matching, so be careful not to relay through other proxies
+# or inspecting firewalls with this disabled.
+#Default:
+# url_rewrite_host_header on
+
+# TAG: url_rewrite_access
+# If defined, this access list specifies which requests are
+# sent to the redirector processes.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: url_rewrite_bypass
+# When this is 'on', a request will not go through the
+# redirector if all the helpers are busy. If this is 'off'
+# and the redirector queue grows too large, Squid will exit
+# with a FATAL error and ask you to increase the number of
+# redirectors. You should only enable this if the redirectors
+# are not critical to your caching system. If you use
+# redirectors for access control, and you enable this option,
+# users may have access to pages they should not
+# be allowed to request.
+#Default:
+# url_rewrite_bypass off
+
+# TAG: url_rewrite_extras
+# Specifies a string to be append to request line format for the
+# rewriter helper. "Quoted" format values may contain spaces and
+# logformat %macros. In theory, any logformat %macro can be used.
+# In practice, a %macro expands as a dash (-) if the helper request is
+# sent before the required macro information is available to Squid.
+#Default:
+# url_rewrite_extras "%>a/%>A %un %>rm myip=%la myport=%lp"
+
+# OPTIONS FOR STORE ID
+# -----------------------------------------------------------------------------
+
+# TAG: store_id_program
+# Specify the location of the executable StoreID helper to use.
+# Since they can perform almost any function there isn't one included.
+#
+# For each requested URL, the helper will receive one line with the format
+#
+# [channel-ID <SP>] URL [<SP> extras]<NL>
+#
+#
+# After processing the request the helper must reply using the following format:
+#
+# [channel-ID <SP>] result [<SP> kv-pairs]
+#
+# The result code can be:
+#
+# OK store-id="..."
+# Use the StoreID supplied in 'store-id='.
+#
+# ERR
+# The default is to use HTTP request URL as the store ID.
+#
+# BH
+# An internal error occured in the helper, preventing
+# a result being identified.
+#
+# In addition to the above kv-pairs Squid also understands the following
+# optional kv-pairs received from URL rewriters:
+# clt_conn_tag=TAG
+# Associates a TAG with the client TCP connection.
+# Please see url_rewrite_program related documentation for this
+# kv-pair
+#
+# Helper programs should be prepared to receive and possibly ignore
+# additional whitespace-separated tokens on each input line.
+#
+# When using the concurrency= option the protocol is changed by
+# introducing a query channel tag in front of the request/response.
+# The query channel tag is a number between 0 and concurrency-1.
+# This value must be echoed back unchanged to Squid as the first part
+# of the response relating to its request.
+#
+# NOTE: when using StoreID refresh_pattern will apply to the StoreID
+# returned from the helper and not the URL.
+#
+# WARNING: Wrong StoreID value returned by a careless helper may result
+# in the wrong cached response returned to the user.
+#
+# By default, a StoreID helper is not used.
+#Default:
+# none
+
+# TAG: store_id_extras
+# Specifies a string to be append to request line format for the
+# StoreId helper. "Quoted" format values may contain spaces and
+# logformat %macros. In theory, any logformat %macro can be used.
+# In practice, a %macro expands as a dash (-) if the helper request is
+# sent before the required macro information is available to Squid.
+#Default:
+# store_id_extras "%>a/%>A %un %>rm myip=%la myport=%lp"
+
+# TAG: store_id_children
+# The maximum number of StoreID helper processes to spawn. If you limit
+# it too few Squid will have to wait for them to process a backlog of
+# requests, slowing it down. If you allow too many they will use RAM
+# and other system resources noticably.
+#
+# The startup= and idle= options allow some measure of skew in your
+# tuning.
+#
+# startup=
+#
+# Sets a minimum of how many processes are to be spawned when Squid
+# starts or reconfigures. When set to zero the first request will
+# cause spawning of the first child process to handle it.
+#
+# Starting too few will cause an initial slowdown in traffic as Squid
+# attempts to simultaneously spawn enough processes to cope.
+#
+# idle=
+#
+# Sets a minimum of how many processes Squid is to try and keep available
+# at all times. When traffic begins to rise above what the existing
+# processes can handle this many more will be spawned up to the maximum
+# configured. A minimum setting of 1 is required.
+#
+# concurrency=
+#
+# The number of requests each storeID helper can handle in
+# parallel. Defaults to 0 which indicates the helper
+# is a old-style single threaded program.
+#
+# When this directive is set to a value >= 1 then the protocol
+# used to communicate with the helper is modified to include
+# an ID in front of the request/response. The ID from the request
+# must be echoed back with the response to that request.
+#Default:
+# store_id_children 20 startup=0 idle=1 concurrency=0
+
+# TAG: store_id_access
+# If defined, this access list specifies which requests are
+# sent to the StoreID processes. By default all requests
+# are sent.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: store_id_bypass
+# When this is 'on', a request will not go through the
+# helper if all helpers are busy. If this is 'off'
+# and the helper queue grows too large, Squid will exit
+# with a FATAL error and ask you to increase the number of
+# helpers. You should only enable this if the helperss
+# are not critical to your caching system. If you use
+# helpers for critical caching components, and you enable this
+# option, users may not get objects from cache.
+#Default:
+# store_id_bypass on
+
+# OPTIONS FOR TUNING THE CACHE
+# -----------------------------------------------------------------------------
+
+# TAG: cache
+# Requests denied by this directive will not be served from the cache
+# and their responses will not be stored in the cache. This directive
+# has no effect on other transactions and on already cached responses.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+# This and the two other similar caching directives listed below are
+# checked at different transaction processing stages, have different
+# access to response information, affect different cache operations,
+# and differ in slow ACLs support:
+#
+# * cache: Checked before Squid makes a hit/miss determination.
+# No access to reply information!
+# Denies both serving a hit and storing a miss.
+# Supports both fast and slow ACLs.
+# * send_hit: Checked after a hit was detected.
+# Has access to reply (hit) information.
+# Denies serving a hit only.
+# Supports fast ACLs only.
+# * store_miss: Checked before storing a cachable miss.
+# Has access to reply (miss) information.
+# Denies storing a miss only.
+# Supports fast ACLs only.
+#
+# If you are not sure which of the three directives to use, apply the
+# following decision logic:
+#
+# * If your ACL(s) are of slow type _and_ need response info, redesign.
+# Squid does not support that particular combination at this time.
+# Otherwise:
+# * If your directive ACL(s) are of slow type, use "cache"; and/or
+# * if your directive ACL(s) need no response info, use "cache".
+# Otherwise:
+# * If you do not want the response cached, use store_miss; and/or
+# * if you do not want a hit on a cached response, use send_hit.
+#Default:
+# By default, this directive is unused and has no effect.
+
+# TAG: send_hit
+# Responses denied by this directive will not be served from the cache
+# (but may still be cached, see store_miss). This directive has no
+# effect on the responses it allows and on the cached objects.
+#
+# Please see the "cache" directive for a summary of differences among
+# store_miss, send_hit, and cache directives.
+#
+# Unlike the "cache" directive, send_hit only supports fast acl
+# types. See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+# For example:
+#
+# # apply custom Store ID mapping to some URLs
+# acl MapMe dstdomain .c.example.com
+# store_id_program ...
+# store_id_access allow MapMe
+#
+# # but prevent caching of special responses
+# # such as 302 redirects that cause StoreID loops
+# acl Ordinary http_status 200-299
+# store_miss deny MapMe !Ordinary
+#
+# # and do not serve any previously stored special responses
+# # from the cache (in case they were already cached before
+# # the above store_miss rule was in effect).
+# send_hit deny MapMe !Ordinary
+#Default:
+# By default, this directive is unused and has no effect.
+
+# TAG: store_miss
+# Responses denied by this directive will not be cached (but may still
+# be served from the cache, see send_hit). This directive has no
+# effect on the responses it allows and on the already cached responses.
+#
+# Please see the "cache" directive for a summary of differences among
+# store_miss, send_hit, and cache directives. See the
+# send_hit directive for a usage example.
+#
+# Unlike the "cache" directive, store_miss only supports fast acl
+# types. See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# By default, this directive is unused and has no effect.
+
+# TAG: max_stale time-units
+# This option puts an upper limit on how stale content Squid
+# will serve from the cache if cache validation fails.
+# Can be overriden by the refresh_pattern max-stale option.
+#Default:
+# max_stale 1 week
+
+# TAG: refresh_pattern
+# usage: refresh_pattern [-i] regex min percent max [options]
+#
+# By default, regular expressions are CASE-SENSITIVE. To make
+# them case-insensitive, use the -i option.
+#
+# 'Min' is the time (in minutes) an object without an explicit
+# expiry time should be considered fresh. The recommended
+# value is 0, any higher values may cause dynamic applications
+# to be erroneously cached unless the application designer
+# has taken the appropriate actions.
+#
+# 'Percent' is a percentage of the objects age (time since last
+# modification age) an object without explicit expiry time
+# will be considered fresh.
+#
+# 'Max' is an upper limit on how long objects without an explicit
+# expiry time will be considered fresh. The value is also used
+# to form Cache-Control: max-age header for a request sent from
+# Squid to origin/parent.
+#
+# options: override-expire
+# override-lastmod
+# reload-into-ims
+# ignore-reload
+# ignore-no-store
+# ignore-must-revalidate
+# ignore-private
+# ignore-auth
+# max-stale=NN
+# refresh-ims
+# store-stale
+#
+# override-expire enforces min age even if the server
+# sent an explicit expiry time (e.g., with the
+# Expires: header or Cache-Control: max-age). Doing this
+# VIOLATES the HTTP standard. Enabling this feature
+# could make you liable for problems which it causes.
+#
+# Note: override-expire does not enforce staleness - it only extends
+# freshness / min. If the server returns a Expires time which
+# is longer than your max time, Squid will still consider
+# the object fresh for that period of time.
+#
+# override-lastmod enforces min age even on objects
+# that were modified recently.
+#
+# reload-into-ims changes a client no-cache or ``reload''
+# request for a cached entry into a conditional request using
+# If-Modified-Since and/or If-None-Match headers, provided the
+# cached entry has a Last-Modified and/or a strong ETag header.
+# Doing this VIOLATES the HTTP standard. Enabling this feature
+# could make you liable for problems which it causes.
+#
+# ignore-reload ignores a client no-cache or ``reload''
+# header. Doing this VIOLATES the HTTP standard. Enabling
+# this feature could make you liable for problems which
+# it causes.
+#
+# ignore-no-store ignores any ``Cache-control: no-store''
+# headers received from a server. Doing this VIOLATES
+# the HTTP standard. Enabling this feature could make you
+# liable for problems which it causes.
+#
+# ignore-must-revalidate ignores any ``Cache-Control: must-revalidate``
+# headers received from a server. Doing this VIOLATES
+# the HTTP standard. Enabling this feature could make you
+# liable for problems which it causes.
+#
+# ignore-private ignores any ``Cache-control: private''
+# headers received from a server. Doing this VIOLATES
+# the HTTP standard. Enabling this feature could make you
+# liable for problems which it causes.
+#
+# ignore-auth caches responses to requests with authorization,
+# as if the originserver had sent ``Cache-control: public''
+# in the response header. Doing this VIOLATES the HTTP standard.
+# Enabling this feature could make you liable for problems which
+# it causes.
+#
+# refresh-ims causes squid to contact the origin server
+# when a client issues an If-Modified-Since request. This
+# ensures that the client will receive an updated version
+# if one is available.
+#
+# store-stale stores responses even if they don't have explicit
+# freshness or a validator (i.e., Last-Modified or an ETag)
+# present, or if they're already stale. By default, Squid will
+# not cache such responses because they usually can't be
+# reused. Note that such responses will be stale by default.
+#
+# max-stale=NN provide a maximum staleness factor. Squid won't
+# serve objects more stale than this even if it failed to
+# validate the object. Default: use the max_stale global limit.
+#
+# Basically a cached object is:
+#
+# FRESH if expire > now, else STALE
+# STALE if age > max
+# FRESH if lm-factor < percent, else STALE
+# FRESH if age < min
+# else STALE
+#
+# The refresh_pattern lines are checked in the order listed here.
+# The first entry which matches is used. If none of the entries
+# match the default will be used.
+#
+# Note, you must uncomment all the default lines if you want
+# to change one. The default setting is only active if none is
+# used.
+#
+#
+
+#
+# Add any of your own refresh_pattern entries above these.
+#
+refresh_pattern ^ftp: 1440 20% 10080
+refresh_pattern ^gopher: 1440 0% 1440
+refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
+refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880
+# example lin deb packages
+#refresh_pattern (\.deb|\.udeb)$ 129600 100% 129600
+refresh_pattern . 0 20% 4320
+
+# TAG: quick_abort_min (KB)
+#Default:
+# quick_abort_min 16 KB
+
+# TAG: quick_abort_max (KB)
+#Default:
+# quick_abort_max 16 KB
+
+# TAG: quick_abort_pct (percent)
+# The cache by default continues downloading aborted requests
+# which are almost completed (less than 16 KB remaining). This
+# may be undesirable on slow (e.g. SLIP) links and/or very busy
+# caches. Impatient users may tie up file descriptors and
+# bandwidth by repeatedly requesting and immediately aborting
+# downloads.
+#
+# When the user aborts a request, Squid will check the
+# quick_abort values to the amount of data transferred until
+# then.
+#
+# If the transfer has less than 'quick_abort_min' KB remaining,
+# it will finish the retrieval.
+#
+# If the transfer has more than 'quick_abort_max' KB remaining,
+# it will abort the retrieval.
+#
+# If more than 'quick_abort_pct' of the transfer has completed,
+# it will finish the retrieval.
+#
+# If you do not want any retrieval to continue after the client
+# has aborted, set both 'quick_abort_min' and 'quick_abort_max'
+# to '0 KB'.
+#
+# If you want retrievals to always continue if they are being
+# cached set 'quick_abort_min' to '-1 KB'.
+#Default:
+# quick_abort_pct 95
+
+# TAG: read_ahead_gap buffer-size
+# The amount of data the cache will buffer ahead of what has been
+# sent to the client when retrieving an object from another server.
+#Default:
+# read_ahead_gap 16 KB
+
+# TAG: negative_ttl time-units
+# Set the Default Time-to-Live (TTL) for failed requests.
+# Certain types of failures (such as "connection refused" and
+# "404 Not Found") are able to be negatively-cached for a short time.
+# Modern web servers should provide Expires: header, however if they
+# do not this can provide a minimum TTL.
+# The default is not to cache errors with unknown expiry details.
+#
+# Note that this is different from negative caching of DNS lookups.
+#
+# WARNING: Doing this VIOLATES the HTTP standard. Enabling
+# this feature could make you liable for problems which it
+# causes.
+#Default:
+# negative_ttl 0 seconds
+
+# TAG: positive_dns_ttl time-units
+# Upper limit on how long Squid will cache positive DNS responses.
+# Default is 6 hours (360 minutes). This directive must be set
+# larger than negative_dns_ttl.
+#Default:
+# positive_dns_ttl 6 hours
+
+# TAG: negative_dns_ttl time-units
+# Time-to-Live (TTL) for negative caching of failed DNS lookups.
+# This also sets the lower cache limit on positive lookups.
+# Minimum value is 1 second, and it is not recommendable to go
+# much below 10 seconds.
+#Default:
+# negative_dns_ttl 1 minutes
+
+# TAG: range_offset_limit size [acl acl...]
+# usage: (size) [units] [[!]aclname]
+#
+# Sets an upper limit on how far (number of bytes) into the file
+# a Range request may be to cause Squid to prefetch the whole file.
+# If beyond this limit, Squid forwards the Range request as it is and
+# the result is NOT cached.
+#
+# This is to stop a far ahead range request (lets say start at 17MB)
+# from making Squid fetch the whole object up to that point before
+# sending anything to the client.
+#
+# Multiple range_offset_limit lines may be specified, and they will
+# be searched from top to bottom on each request until a match is found.
+# The first match found will be used. If no line matches a request, the
+# default limit of 0 bytes will be used.
+#
+# 'size' is the limit specified as a number of units.
+#
+# 'units' specifies whether to use bytes, KB, MB, etc.
+# If no units are specified bytes are assumed.
+#
+# A size of 0 causes Squid to never fetch more than the
+# client requested. (default)
+#
+# A size of 'none' causes Squid to always fetch the object from the
+# beginning so it may cache the result. (2.0 style)
+#
+# 'aclname' is the name of a defined ACL.
+#
+# NP: Using 'none' as the byte value here will override any quick_abort settings
+# that may otherwise apply to the range request. The range request will
+# be fully fetched from start to finish regardless of the client
+# actions. This affects bandwidth usage.
+#Default:
+# none
+
+# TAG: minimum_expiry_time (seconds)
+# The minimum caching time according to (Expires - Date)
+# headers Squid honors if the object can't be revalidated.
+# The default is 60 seconds.
+#
+# In reverse proxy environments it might be desirable to honor
+# shorter object lifetimes. It is most likely better to make
+# your server return a meaningful Last-Modified header however.
+#
+# In ESI environments where page fragments often have short
+# lifetimes, this will often be best set to 0.
+#Default:
+# minimum_expiry_time 60 seconds
+
+# TAG: store_avg_object_size (bytes)
+# Average object size, used to estimate number of objects your
+# cache can hold. The default is 13 KB.
+#
+# This is used to pre-seed the cache index memory allocation to
+# reduce expensive reallocate operations while handling clients
+# traffic. Too-large values may result in memory allocation during
+# peak traffic, too-small values will result in wasted memory.
+#
+# Check the cache manager 'info' report metrics for the real
+# object sizes seen by your Squid before tuning this.
+#Default:
+# store_avg_object_size 13 KB
+
+# TAG: store_objects_per_bucket
+# Target number of objects per bucket in the store hash table.
+# Lowering this value increases the total number of buckets and
+# also the storage maintenance rate. The default is 20.
+#Default:
+# store_objects_per_bucket 20
+
+# HTTP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: request_header_max_size (KB)
+# This specifies the maximum size for HTTP headers in a request.
+# Request headers are usually relatively small (about 512 bytes).
+# Placing a limit on the request header size will catch certain
+# bugs (for example with persistent connections) and possibly
+# buffer-overflow or denial-of-service attacks.
+#Default:
+# request_header_max_size 64 KB
+
+# TAG: reply_header_max_size (KB)
+# This specifies the maximum size for HTTP headers in a reply.
+# Reply headers are usually relatively small (about 512 bytes).
+# Placing a limit on the reply header size will catch certain
+# bugs (for example with persistent connections) and possibly
+# buffer-overflow or denial-of-service attacks.
+#Default:
+# reply_header_max_size 64 KB
+
+# TAG: request_body_max_size (bytes)
+# This specifies the maximum size for an HTTP request body.
+# In other words, the maximum size of a PUT/POST request.
+# A user who attempts to send a request with a body larger
+# than this limit receives an "Invalid Request" error message.
+# If you set this parameter to a zero (the default), there will
+# be no limit imposed.
+#
+# See also client_request_buffer_max_size for an alternative
+# limitation on client uploads which can be configured.
+#Default:
+# No limit.
+
+# TAG: client_request_buffer_max_size (bytes)
+# This specifies the maximum buffer size of a client request.
+# It prevents squid eating too much memory when somebody uploads
+# a large file.
+#Default:
+# client_request_buffer_max_size 512 KB
+
+# TAG: broken_posts
+# A list of ACL elements which, if matched, causes Squid to send
+# an extra CRLF pair after the body of a PUT/POST request.
+#
+# Some HTTP servers has broken implementations of PUT/POST,
+# and rely on an extra CRLF pair sent by some WWW clients.
+#
+# Quote from RFC2616 section 4.1 on this matter:
+#
+# Note: certain buggy HTTP/1.0 client implementations generate an
+# extra CRLF's after a POST request. To restate what is explicitly
+# forbidden by the BNF, an HTTP/1.1 client must not preface or follow
+# a request with an extra CRLF.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+#Example:
+# acl buggy_server url_regex ^http://....
+# broken_posts allow buggy_server
+#Default:
+# Obey RFC 2616.
+
+# TAG: adaptation_uses_indirect_client on|off
+# Controls whether the indirect client IP address (instead of the direct
+# client IP address) is passed to adaptation services.
+#
+# See also: follow_x_forwarded_for adaptation_send_client_ip
+#Default:
+# adaptation_uses_indirect_client on
+
+# TAG: via on|off
+# If set (default), Squid will include a Via header in requests and
+# replies as required by RFC2616.
+#Default:
+# via on
+
+# TAG: ie_refresh on|off
+# Microsoft Internet Explorer up until version 5.5 Service
+# Pack 1 has an issue with transparent proxies, wherein it
+# is impossible to force a refresh. Turning this on provides
+# a partial fix to the problem, by causing all IMS-REFRESH
+# requests from older IE versions to check the origin server
+# for fresh content. This reduces hit ratio by some amount
+# (~10% in my experience), but allows users to actually get
+# fresh content when they want it. Note because Squid
+# cannot tell if the user is using 5.5 or 5.5SP1, the behavior
+# of 5.5 is unchanged from old versions of Squid (i.e. a
+# forced refresh is impossible). Newer versions of IE will,
+# hopefully, continue to have the new behavior and will be
+# handled based on that assumption. This option defaults to
+# the old Squid behavior, which is better for hit ratios but
+# worse for clients using IE, if they need to be able to
+# force fresh content.
+#Default:
+# ie_refresh off
+
+# TAG: vary_ignore_expire on|off
+# Many HTTP servers supporting Vary gives such objects
+# immediate expiry time with no cache-control header
+# when requested by a HTTP/1.0 client. This option
+# enables Squid to ignore such expiry times until
+# HTTP/1.1 is fully implemented.
+#
+# WARNING: If turned on this may eventually cause some
+# varying objects not intended for caching to get cached.
+#Default:
+# vary_ignore_expire off
+
+# TAG: request_entities
+# Squid defaults to deny GET and HEAD requests with request entities,
+# as the meaning of such requests are undefined in the HTTP standard
+# even if not explicitly forbidden.
+#
+# Set this directive to on if you have clients which insists
+# on sending request entities in GET or HEAD requests. But be warned
+# that there is server software (both proxies and web servers) which
+# can fail to properly process this kind of request which may make you
+# vulnerable to cache pollution attacks if enabled.
+#Default:
+# request_entities off
+
+# TAG: request_header_access
+# Usage: request_header_access header_name allow|deny [!]aclname ...
+#
+# WARNING: Doing this VIOLATES the HTTP standard. Enabling
+# this feature could make you liable for problems which it
+# causes.
+#
+# This option replaces the old 'anonymize_headers' and the
+# older 'http_anonymizer' option with something that is much
+# more configurable. A list of ACLs for each header name allows
+# removal of specific header fields under specific conditions.
+#
+# This option only applies to outgoing HTTP request headers (i.e.,
+# headers sent by Squid to the next HTTP hop such as a cache peer
+# or an origin server). The option has no effect during cache hit
+# detection. The equivalent adaptation vectoring point in ICAP
+# terminology is post-cache REQMOD.
+#
+# The option is applied to individual outgoing request header
+# fields. For each request header field F, Squid uses the first
+# qualifying sets of request_header_access rules:
+#
+# 1. Rules with header_name equal to F's name.
+# 2. Rules with header_name 'Other', provided F's name is not
+# on the hard-coded list of commonly used HTTP header names.
+# 3. Rules with header_name 'All'.
+#
+# Within that qualifying rule set, rule ACLs are checked as usual.
+# If ACLs of an "allow" rule match, the header field is allowed to
+# go through as is. If ACLs of a "deny" rule match, the header is
+# removed and request_header_replace is then checked to identify
+# if the removed header has a replacement. If no rules within the
+# set have matching ACLs, the header field is left as is.
+#
+# For example, to achieve the same behavior as the old
+# 'http_anonymizer standard' option, you should use:
+#
+# request_header_access From deny all
+# request_header_access Referer deny all
+# request_header_access User-Agent deny all
+#
+# Or, to reproduce the old 'http_anonymizer paranoid' feature
+# you should use:
+#
+# request_header_access Authorization allow all
+# request_header_access Proxy-Authorization allow all
+# request_header_access Cache-Control allow all
+# request_header_access Content-Length allow all
+# request_header_access Content-Type allow all
+# request_header_access Date allow all
+# request_header_access Host allow all
+# request_header_access If-Modified-Since allow all
+# request_header_access Pragma allow all
+# request_header_access Accept allow all
+# request_header_access Accept-Charset allow all
+# request_header_access Accept-Encoding allow all
+# request_header_access Accept-Language allow all
+# request_header_access Connection allow all
+# request_header_access All deny all
+#
+# HTTP reply headers are controlled with the reply_header_access directive.
+#
+# By default, all headers are allowed (no anonymizing is performed).
+#Default:
+# No limits.
+
+# TAG: reply_header_access
+# Usage: reply_header_access header_name allow|deny [!]aclname ...
+#
+# WARNING: Doing this VIOLATES the HTTP standard. Enabling
+# this feature could make you liable for problems which it
+# causes.
+#
+# This option only applies to reply headers, i.e., from the
+# server to the client.
+#
+# This is the same as request_header_access, but in the other
+# direction. Please see request_header_access for detailed
+# documentation.
+#
+# For example, to achieve the same behavior as the old
+# 'http_anonymizer standard' option, you should use:
+#
+# reply_header_access Server deny all
+# reply_header_access WWW-Authenticate deny all
+# reply_header_access Link deny all
+#
+# Or, to reproduce the old 'http_anonymizer paranoid' feature
+# you should use:
+#
+# reply_header_access Allow allow all
+# reply_header_access WWW-Authenticate allow all
+# reply_header_access Proxy-Authenticate allow all
+# reply_header_access Cache-Control allow all
+# reply_header_access Content-Encoding allow all
+# reply_header_access Content-Length allow all
+# reply_header_access Content-Type allow all
+# reply_header_access Date allow all
+# reply_header_access Expires allow all
+# reply_header_access Last-Modified allow all
+# reply_header_access Location allow all
+# reply_header_access Pragma allow all
+# reply_header_access Content-Language allow all
+# reply_header_access Retry-After allow all
+# reply_header_access Title allow all
+# reply_header_access Content-Disposition allow all
+# reply_header_access Connection allow all
+# reply_header_access All deny all
+#
+# HTTP request headers are controlled with the request_header_access directive.
+#
+# By default, all headers are allowed (no anonymizing is
+# performed).
+#Default:
+# No limits.
+
+# TAG: request_header_replace
+# Usage: request_header_replace header_name message
+# Example: request_header_replace User-Agent Nutscrape/1.0 (CP/M; 8-bit)
+#
+# This option allows you to change the contents of headers
+# denied with request_header_access above, by replacing them
+# with some fixed string.
+#
+# This only applies to request headers, not reply headers.
+#
+# By default, headers are removed if denied.
+#Default:
+# none
+
+# TAG: reply_header_replace
+# Usage: reply_header_replace header_name message
+# Example: reply_header_replace Server Foo/1.0
+#
+# This option allows you to change the contents of headers
+# denied with reply_header_access above, by replacing them
+# with some fixed string.
+#
+# This only applies to reply headers, not request headers.
+#
+# By default, headers are removed if denied.
+#Default:
+# none
+
+# TAG: request_header_add
+# Usage: request_header_add field-name field-value acl1 [acl2] ...
+# Example: request_header_add X-Client-CA "CA=%ssl::>cert_issuer" all
+#
+# This option adds header fields to outgoing HTTP requests (i.e.,
+# request headers sent by Squid to the next HTTP hop such as a
+# cache peer or an origin server). The option has no effect during
+# cache hit detection. The equivalent adaptation vectoring point
+# in ICAP terminology is post-cache REQMOD.
+#
+# Field-name is a token specifying an HTTP header name. If a
+# standard HTTP header name is used, Squid does not check whether
+# the new header conflicts with any existing headers or violates
+# HTTP rules. If the request to be modified already contains a
+# field with the same name, the old field is preserved but the
+# header field values are not merged.
+#
+# Field-value is either a token or a quoted string. If quoted
+# string format is used, then the surrounding quotes are removed
+# while escape sequences and %macros are processed.
+#
+# In theory, all of the logformat codes can be used as %macros.
+# However, unlike logging (which happens at the very end of
+# transaction lifetime), the transaction may not yet have enough
+# information to expand a macro when the new header value is needed.
+# And some information may already be available to Squid but not yet
+# committed where the macro expansion code can access it (report
+# such instances!). The macro will be expanded into a single dash
+# ('-') in such cases. Not all macros have been tested.
+#
+# One or more Squid ACLs may be specified to restrict header
+# injection to matching requests. As always in squid.conf, all
+# ACLs in an option ACL list must be satisfied for the insertion
+# to happen. The request_header_add option supports fast ACLs
+# only.
+#Default:
+# none
+
+# TAG: note
+# This option used to log custom information about the master
+# transaction. For example, an admin may configure Squid to log
+# which "user group" the transaction belongs to, where "user group"
+# will be determined based on a set of ACLs and not [just]
+# authentication information.
+# Values of key/value pairs can be logged using %{key}note macros:
+#
+# note key value acl ...
+# logformat myFormat ... %{key}note ...
+#Default:
+# none
+
+# TAG: relaxed_header_parser on|off|warn
+# In the default "on" setting Squid accepts certain forms
+# of non-compliant HTTP messages where it is unambiguous
+# what the sending application intended even if the message
+# is not correctly formatted. The messages is then normalized
+# to the correct form when forwarded by Squid.
+#
+# If set to "warn" then a warning will be emitted in cache.log
+# each time such HTTP error is encountered.
+#
+# If set to "off" then such HTTP errors will cause the request
+# or response to be rejected.
+#Default:
+# relaxed_header_parser on
+
+# TAG: collapsed_forwarding (on|off)
+# When enabled, instead of forwarding each concurrent request for
+# the same URL, Squid just sends the first of them. The other, so
+# called "collapsed" requests, wait for the response to the first
+# request and, if it happens to be cachable, use that response.
+# Here, "concurrent requests" means "received after the first
+# request headers were parsed and before the corresponding response
+# headers were parsed".
+#
+# This feature is disabled by default: enabling collapsed
+# forwarding needlessly delays forwarding requests that look
+# cachable (when they are collapsed) but then need to be forwarded
+# individually anyway because they end up being for uncachable
+# content. However, in some cases, such as acceleration of highly
+# cachable content with periodic or grouped expiration times, the
+# gains from collapsing [large volumes of simultaneous refresh
+# requests] outweigh losses from such delays.
+#
+# Squid collapses two kinds of requests: regular client requests
+# received on one of the listening ports and internal "cache
+# revalidation" requests which are triggered by those regular
+# requests hitting a stale cached object. Revalidation collapsing
+# is currently disabled for Squid instances containing SMP-aware
+# disk or memory caches and for Vary-controlled cached objects.
+#Default:
+# collapsed_forwarding off
+
+# TIMEOUTS
+# -----------------------------------------------------------------------------
+
+# TAG: forward_timeout time-units
+# This parameter specifies how long Squid should at most attempt in
+# finding a forwarding path for the request before giving up.
+#Default:
+# forward_timeout 4 minutes
+
+# TAG: connect_timeout time-units
+# This parameter specifies how long to wait for the TCP connect to
+# the requested server or peer to complete before Squid should
+# attempt to find another path where to forward the request.
+#Default:
+# connect_timeout 1 minute
+
+# TAG: peer_connect_timeout time-units
+# This parameter specifies how long to wait for a pending TCP
+# connection to a peer cache. The default is 30 seconds. You
+# may also set different timeout values for individual neighbors
+# with the 'connect-timeout' option on a 'cache_peer' line.
+#Default:
+# peer_connect_timeout 30 seconds
+
+# TAG: read_timeout time-units
+# Applied on peer server connections.
+#
+# After each successful read(), the timeout will be extended by this
+# amount. If no data is read again after this amount of time,
+# the request is aborted and logged with ERR_READ_TIMEOUT.
+#
+# The default is 15 minutes.
+#Default:
+# read_timeout 15 minutes
+
+# TAG: write_timeout time-units
+# This timeout is tracked for all connections that have data
+# available for writing and are waiting for the socket to become
+# ready. After each successful write, the timeout is extended by
+# the configured amount. If Squid has data to write but the
+# connection is not ready for the configured duration, the
+# transaction associated with the connection is terminated. The
+# default is 15 minutes.
+#Default:
+# write_timeout 15 minutes
+
+# TAG: request_timeout
+# How long to wait for complete HTTP request headers after initial
+# connection establishment.
+#Default:
+# request_timeout 5 minutes
+
+# TAG: client_idle_pconn_timeout
+# How long to wait for the next HTTP request on a persistent
+# client connection after the previous request completes.
+#Default:
+# client_idle_pconn_timeout 2 minutes
+
+# TAG: ftp_client_idle_timeout
+# How long to wait for an FTP request on a connection to Squid ftp_port.
+# Many FTP clients do not deal with idle connection closures well,
+# necessitating a longer default timeout than client_idle_pconn_timeout
+# used for incoming HTTP requests.
+#Default:
+# ftp_client_idle_timeout 30 minutes
+
+# TAG: client_lifetime time-units
+# The maximum amount of time a client (browser) is allowed to
+# remain connected to the cache process. This protects the Cache
+# from having a lot of sockets (and hence file descriptors) tied up
+# in a CLOSE_WAIT state from remote clients that go away without
+# properly shutting down (either because of a network failure or
+# because of a poor client implementation). The default is one
+# day, 1440 minutes.
+#
+# NOTE: The default value is intended to be much larger than any
+# client would ever need to be connected to your cache. You
+# should probably change client_lifetime only as a last resort.
+# If you seem to have many client connections tying up
+# filedescriptors, we recommend first tuning the read_timeout,
+# request_timeout, persistent_request_timeout and quick_abort values.
+#Default:
+# client_lifetime 1 day
+
+# TAG: half_closed_clients
+# Some clients may shutdown the sending side of their TCP
+# connections, while leaving their receiving sides open. Sometimes,
+# Squid can not tell the difference between a half-closed and a
+# fully-closed TCP connection.
+#
+# By default, Squid will immediately close client connections when
+# read(2) returns "no more data to read."
+#
+# Change this option to 'on' and Squid will keep open connections
+# until a read(2) or write(2) on the socket returns an error.
+# This may show some benefits for reverse proxies. But if not
+# it is recommended to leave OFF.
+#Default:
+# half_closed_clients off
+
+# TAG: server_idle_pconn_timeout
+# Timeout for idle persistent connections to servers and other
+# proxies.
+#Default:
+# server_idle_pconn_timeout 1 minute
+
+# TAG: ident_timeout
+# Maximum time to wait for IDENT lookups to complete.
+#
+# If this is too high, and you enabled IDENT lookups from untrusted
+# users, you might be susceptible to denial-of-service by having
+# many ident requests going at once.
+#Default:
+# ident_timeout 10 seconds
+
+# TAG: shutdown_lifetime time-units
+# When SIGTERM or SIGHUP is received, the cache is put into
+# "shutdown pending" mode until all active sockets are closed.
+# This value is the lifetime to set for all open descriptors
+# during shutdown mode. Any active clients after this many
+# seconds will receive a 'timeout' message.
+#Default:
+# shutdown_lifetime 30 seconds
+
+# ADMINISTRATIVE PARAMETERS
+# -----------------------------------------------------------------------------
+
+# TAG: cache_mgr
+# Email-address of local cache manager who will receive
+# mail if the cache dies. The default is "webmaster".
+#Default:
+# cache_mgr webmaster
+
+# TAG: mail_from
+# From: email-address for mail sent when the cache dies.
+# The default is to use 'squid@unique_hostname'.
+#
+# See also: unique_hostname directive.
+#Default:
+# none
+
+# TAG: mail_program
+# Email program used to send mail if the cache dies.
+# The default is "mail". The specified program must comply
+# with the standard Unix mail syntax:
+# mail-program recipient < mailfile
+#
+# Optional command line options can be specified.
+#Default:
+# mail_program mail
+
+# TAG: cache_effective_user
+# If you start Squid as root, it will change its effective/real
+# UID/GID to the user specified below. The default is to change
+# to UID of proxy.
+# see also; cache_effective_group
+#Default:
+# cache_effective_user proxy
+
+# TAG: cache_effective_group
+# Squid sets the GID to the effective user's default group ID
+# (taken from the password file) and supplementary group list
+# from the groups membership.
+#
+# If you want Squid to run with a specific GID regardless of
+# the group memberships of the effective user then set this
+# to the group (or GID) you want Squid to run as. When set
+# all other group privileges of the effective user are ignored
+# and only this GID is effective. If Squid is not started as
+# root the user starting Squid MUST be member of the specified
+# group.
+#
+# This option is not recommended by the Squid Team.
+# Our preference is for administrators to configure a secure
+# user account for squid with UID/GID matching system policies.
+#Default:
+# Use system group memberships of the cache_effective_user account
+
+# TAG: httpd_suppress_version_string on|off
+# Suppress Squid version string info in HTTP headers and HTML error pages.
+#Default:
+# httpd_suppress_version_string off
+
+# TAG: visible_hostname
+# If you want to present a special hostname in error messages, etc,
+# define this. Otherwise, the return value of gethostname()
+# will be used. If you have multiple caches in a cluster and
+# get errors about IP-forwarding you must set them to have individual
+# names with this setting.
+#Default:
+# Automatically detect the system host name
+
+# TAG: unique_hostname
+# If you want to have multiple machines with the same
+# 'visible_hostname' you must give each machine a different
+# 'unique_hostname' so forwarding loops can be detected.
+#Default:
+# Copy the value from visible_hostname
+
+# TAG: hostname_aliases
+# A list of other DNS names your cache has.
+#Default:
+# none
+
+# TAG: umask
+# Minimum umask which should be enforced while the proxy
+# is running, in addition to the umask set at startup.
+#
+# For a traditional octal representation of umasks, start
+# your value with 0.
+#Default:
+# umask 027
+
+# OPTIONS FOR THE CACHE REGISTRATION SERVICE
+# -----------------------------------------------------------------------------
+#
+# This section contains parameters for the (optional) cache
+# announcement service. This service is provided to help
+# cache administrators locate one another in order to join or
+# create cache hierarchies.
+#
+# An 'announcement' message is sent (via UDP) to the registration
+# service by Squid. By default, the announcement message is NOT
+# SENT unless you enable it with 'announce_period' below.
+#
+# The announcement message includes your hostname, plus the
+# following information from this configuration file:
+#
+# http_port
+# icp_port
+# cache_mgr
+#
+# All current information is processed regularly and made
+# available on the Web at http://www.ircache.net/Cache/Tracker/.
+
+# TAG: announce_period
+# This is how frequently to send cache announcements.
+#
+# To enable announcing your cache, just set an announce period.
+#
+# Example:
+# announce_period 1 day
+#Default:
+# Announcement messages disabled.
+
+# TAG: announce_host
+# Set the hostname where announce registration messages will be sent.
+#
+# See also announce_port and announce_file
+#Default:
+# announce_host tracker.ircache.net
+
+# TAG: announce_file
+# The contents of this file will be included in the announce
+# registration messages.
+#Default:
+# none
+
+# TAG: announce_port
+# Set the port where announce registration messages will be sent.
+#
+# See also announce_host and announce_file
+#Default:
+# announce_port 3131
+
+# HTTPD-ACCELERATOR OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: httpd_accel_surrogate_id
+# Surrogates (http://www.esi.org/architecture_spec_1.0.html)
+# need an identification token to allow control targeting. Because
+# a farm of surrogates may all perform the same tasks, they may share
+# an identification token.
+#Default:
+# visible_hostname is used if no specific ID is set.
+
+# TAG: http_accel_surrogate_remote on|off
+# Remote surrogates (such as those in a CDN) honour the header
+# "Surrogate-Control: no-store-remote".
+#
+# Set this to on to have squid behave as a remote surrogate.
+#Default:
+# http_accel_surrogate_remote off
+
+# TAG: esi_parser libxml2|expat|custom
+# ESI markup is not strictly XML compatible. The custom ESI parser
+# will give higher performance, but cannot handle non ASCII character
+# encodings.
+#Default:
+# esi_parser custom
+
+# DELAY POOL PARAMETERS
+# -----------------------------------------------------------------------------
+
+# TAG: delay_pools
+# This represents the number of delay pools to be used. For example,
+# if you have one class 2 delay pool and one class 3 delays pool, you
+# have a total of 2 delay pools.
+#
+# See also delay_parameters, delay_class, delay_access for pool
+# configuration details.
+#Default:
+# delay_pools 0
+
+# TAG: delay_class
+# This defines the class of each delay pool. There must be exactly one
+# delay_class line for each delay pool. For example, to define two
+# delay pools, one of class 2 and one of class 3, the settings above
+# and here would be:
+#
+# Example:
+# delay_pools 4 # 4 delay pools
+# delay_class 1 2 # pool 1 is a class 2 pool
+# delay_class 2 3 # pool 2 is a class 3 pool
+# delay_class 3 4 # pool 3 is a class 4 pool
+# delay_class 4 5 # pool 4 is a class 5 pool
+#
+# The delay pool classes are:
+#
+# class 1 Everything is limited by a single aggregate
+# bucket.
+#
+# class 2 Everything is limited by a single aggregate
+# bucket as well as an "individual" bucket chosen
+# from bits 25 through 32 of the IPv4 address.
+#
+# class 3 Everything is limited by a single aggregate
+# bucket as well as a "network" bucket chosen
+# from bits 17 through 24 of the IP address and a
+# "individual" bucket chosen from bits 17 through
+# 32 of the IPv4 address.
+#
+# class 4 Everything in a class 3 delay pool, with an
+# additional limit on a per user basis. This
+# only takes effect if the username is established
+# in advance - by forcing authentication in your
+# http_access rules.
+#
+# class 5 Requests are grouped according their tag (see
+# external_acl's tag= reply).
+#
+#
+# Each pool also requires a delay_parameters directive to configure the pool size
+# and speed limits used whenever the pool is applied to a request. Along with
+# a set of delay_access directives to determine when it is used.
+#
+# NOTE: If an IP address is a.b.c.d
+# -> bits 25 through 32 are "d"
+# -> bits 17 through 24 are "c"
+# -> bits 17 through 32 are "c * 256 + d"
+#
+# NOTE-2: Due to the use of bitmasks in class 2,3,4 pools they only apply to
+# IPv4 traffic. Class 1 and 5 pools may be used with IPv6 traffic.
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+# See also delay_parameters and delay_access.
+#Default:
+# none
+
+# TAG: delay_access
+# This is used to determine which delay pool a request falls into.
+#
+# delay_access is sorted per pool and the matching starts with pool 1,
+# then pool 2, ..., and finally pool N. The first delay pool where the
+# request is allowed is selected for the request. If it does not allow
+# the request to any pool then the request is not delayed (default).
+#
+# For example, if you want some_big_clients in delay
+# pool 1 and lotsa_little_clients in delay pool 2:
+#
+# delay_access 1 allow some_big_clients
+# delay_access 1 deny all
+# delay_access 2 allow lotsa_little_clients
+# delay_access 2 deny all
+# delay_access 3 allow authenticated_clients
+#
+# See also delay_parameters and delay_class.
+#
+#Default:
+# Deny using the pool, unless allow rules exist in squid.conf for the pool.
+
+# TAG: delay_parameters
+# This defines the parameters for a delay pool. Each delay pool has
+# a number of "buckets" associated with it, as explained in the
+# description of delay_class.
+#
+# For a class 1 delay pool, the syntax is:
+# delay_class pool 1
+# delay_parameters pool aggregate
+#
+# For a class 2 delay pool:
+# delay_class pool 2
+# delay_parameters pool aggregate individual
+#
+# For a class 3 delay pool:
+# delay_class pool 3
+# delay_parameters pool aggregate network individual
+#
+# For a class 4 delay pool:
+# delay_class pool 4
+# delay_parameters pool aggregate network individual user
+#
+# For a class 5 delay pool:
+# delay_class pool 5
+# delay_parameters pool tagrate
+#
+# The option variables are:
+#
+# pool a pool number - ie, a number between 1 and the
+# number specified in delay_pools as used in
+# delay_class lines.
+#
+# aggregate the speed limit parameters for the aggregate bucket
+# (class 1, 2, 3).
+#
+# individual the speed limit parameters for the individual
+# buckets (class 2, 3).
+#
+# network the speed limit parameters for the network buckets
+# (class 3).
+#
+# user the speed limit parameters for the user buckets
+# (class 4).
+#
+# tagrate the speed limit parameters for the tag buckets
+# (class 5).
+#
+# A pair of delay parameters is written restore/maximum, where restore is
+# the number of bytes (not bits - modem and network speeds are usually
+# quoted in bits) per second placed into the bucket, and maximum is the
+# maximum number of bytes which can be in the bucket at any time.
+#
+# There must be one delay_parameters line for each delay pool.
+#
+#
+# For example, if delay pool number 1 is a class 2 delay pool as in the
+# above example, and is being used to strictly limit each host to 64Kbit/sec
+# (plus overheads), with no overall limit, the line is:
+#
+# delay_parameters 1 none 8000/8000
+#
+# Note that 8 x 8K Byte/sec -> 64K bit/sec.
+#
+# Note that the word 'none' is used to represent no limit.
+#
+#
+# And, if delay pool number 2 is a class 3 delay pool as in the above
+# example, and you want to limit it to a total of 256Kbit/sec (strict limit)
+# with each 8-bit network permitted 64Kbit/sec (strict limit) and each
+# individual host permitted 4800bit/sec with a bucket maximum size of 64Kbits
+# to permit a decent web page to be downloaded at a decent speed
+# (if the network is not being limited due to overuse) but slow down
+# large downloads more significantly:
+#
+# delay_parameters 2 32000/32000 8000/8000 600/8000
+#
+# Note that 8 x 32K Byte/sec -> 256K bit/sec.
+# 8 x 8K Byte/sec -> 64K bit/sec.
+# 8 x 600 Byte/sec -> 4800 bit/sec.
+#
+#
+# Finally, for a class 4 delay pool as in the example - each user will
+# be limited to 128Kbits/sec no matter how many workstations they are logged into.:
+#
+# delay_parameters 4 32000/32000 8000/8000 600/64000 16000/16000
+#
+#
+# See also delay_class and delay_access.
+#
+#Default:
+# none
+
+# TAG: delay_initial_bucket_level (percent, 0-100)
+# The initial bucket percentage is used to determine how much is put
+# in each bucket when squid starts, is reconfigured, or first notices
+# a host accessing it (in class 2 and class 3, individual hosts and
+# networks only have buckets associated with them once they have been
+# "seen" by squid).
+#Default:
+# delay_initial_bucket_level 50
+
+# CLIENT DELAY POOL PARAMETERS
+# -----------------------------------------------------------------------------
+
+# TAG: client_delay_pools
+# This option specifies the number of client delay pools used. It must
+# preceed other client_delay_* options.
+#
+# Example:
+# client_delay_pools 2
+#
+# See also client_delay_parameters and client_delay_access.
+#Default:
+# client_delay_pools 0
+
+# TAG: client_delay_initial_bucket_level (percent, 0-no_limit)
+# This option determines the initial bucket size as a percentage of
+# max_bucket_size from client_delay_parameters. Buckets are created
+# at the time of the "first" connection from the matching IP. Idle
+# buckets are periodically deleted up.
+#
+# You can specify more than 100 percent but note that such "oversized"
+# buckets are not refilled until their size goes down to max_bucket_size
+# from client_delay_parameters.
+#
+# Example:
+# client_delay_initial_bucket_level 50
+#Default:
+# client_delay_initial_bucket_level 50
+
+# TAG: client_delay_parameters
+#
+# This option configures client-side bandwidth limits using the
+# following format:
+#
+# client_delay_parameters pool speed_limit max_bucket_size
+#
+# pool is an integer ID used for client_delay_access matching.
+#
+# speed_limit is bytes added to the bucket per second.
+#
+# max_bucket_size is the maximum size of a bucket, enforced after any
+# speed_limit additions.
+#
+# Please see the delay_parameters option for more information and
+# examples.
+#
+# Example:
+# client_delay_parameters 1 1024 2048
+# client_delay_parameters 2 51200 16384
+#
+# See also client_delay_access.
+#
+#Default:
+# none
+
+# TAG: client_delay_access
+# This option determines the client-side delay pool for the
+# request:
+#
+# client_delay_access pool_ID allow|deny acl_name
+#
+# All client_delay_access options are checked in their pool ID
+# order, starting with pool 1. The first checked pool with allowed
+# request is selected for the request. If no ACL matches or there
+# are no client_delay_access options, the request bandwidth is not
+# limited.
+#
+# The ACL-selected pool is then used to find the
+# client_delay_parameters for the request. Client-side pools are
+# not used to aggregate clients. Clients are always aggregated
+# based on their source IP addresses (one bucket per source IP).
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+# Additionally, only the client TCP connection details are available.
+# ACLs testing HTTP properties will not work.
+#
+# Please see delay_access for more examples.
+#
+# Example:
+# client_delay_access 1 allow low_rate_network
+# client_delay_access 2 allow vips_network
+#
+#
+# See also client_delay_parameters and client_delay_pools.
+#Default:
+# Deny use of the pool, unless allow rules exist in squid.conf for the pool.
+
+# WCCPv1 AND WCCPv2 CONFIGURATION OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: wccp_router
+# Use this option to define your WCCP ``home'' router for
+# Squid.
+#
+# wccp_router supports a single WCCP(v1) router
+#
+# wccp2_router supports multiple WCCPv2 routers
+#
+# only one of the two may be used at the same time and defines
+# which version of WCCP to use.
+#Default:
+# WCCP disabled.
+
+# TAG: wccp2_router
+# Use this option to define your WCCP ``home'' router for
+# Squid.
+#
+# wccp_router supports a single WCCP(v1) router
+#
+# wccp2_router supports multiple WCCPv2 routers
+#
+# only one of the two may be used at the same time and defines
+# which version of WCCP to use.
+#Default:
+# WCCPv2 disabled.
+
+# TAG: wccp_version
+# This directive is only relevant if you need to set up WCCP(v1)
+# to some very old and end-of-life Cisco routers. In all other
+# setups it must be left unset or at the default setting.
+# It defines an internal version in the WCCP(v1) protocol,
+# with version 4 being the officially documented protocol.
+#
+# According to some users, Cisco IOS 11.2 and earlier only
+# support WCCP version 3. If you're using that or an earlier
+# version of IOS, you may need to change this value to 3, otherwise
+# do not specify this parameter.
+#Default:
+# wccp_version 4
+
+# TAG: wccp2_rebuild_wait
+# If this is enabled Squid will wait for the cache dir rebuild to finish
+# before sending the first wccp2 HereIAm packet
+#Default:
+# wccp2_rebuild_wait on
+
+# TAG: wccp2_forwarding_method
+# WCCP2 allows the setting of forwarding methods between the
+# router/switch and the cache. Valid values are as follows:
+#
+# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
+# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting)
+#
+# Currently (as of IOS 12.4) cisco routers only support GRE.
+# Cisco switches only support the L2 redirect assignment method.
+#Default:
+# wccp2_forwarding_method gre
+
+# TAG: wccp2_return_method
+# WCCP2 allows the setting of return methods between the
+# router/switch and the cache for packets that the cache
+# decides not to handle. Valid values are as follows:
+#
+# gre - GRE encapsulation (forward the packet in a GRE/WCCP tunnel)
+# l2 - L2 redirect (forward the packet using Layer 2/MAC rewriting)
+#
+# Currently (as of IOS 12.4) cisco routers only support GRE.
+# Cisco switches only support the L2 redirect assignment.
+#
+# If the "ip wccp redirect exclude in" command has been
+# enabled on the cache interface, then it is still safe for
+# the proxy server to use a l2 redirect method even if this
+# option is set to GRE.
+#Default:
+# wccp2_return_method gre
+
+# TAG: wccp2_assignment_method
+# WCCP2 allows the setting of methods to assign the WCCP hash
+# Valid values are as follows:
+#
+# hash - Hash assignment
+# mask - Mask assignment
+#
+# As a general rule, cisco routers support the hash assignment method
+# and cisco switches support the mask assignment method.
+#Default:
+# wccp2_assignment_method hash
+
+# TAG: wccp2_service
+# WCCP2 allows for multiple traffic services. There are two
+# types: "standard" and "dynamic". The standard type defines
+# one service id - http (id 0). The dynamic service ids can be from
+# 51 to 255 inclusive. In order to use a dynamic service id
+# one must define the type of traffic to be redirected; this is done
+# using the wccp2_service_info option.
+#
+# The "standard" type does not require a wccp2_service_info option,
+# just specifying the service id will suffice.
+#
+# MD5 service authentication can be enabled by adding
+# "password=<password>" to the end of this service declaration.
+#
+# Examples:
+#
+# wccp2_service standard 0 # for the 'web-cache' standard service
+# wccp2_service dynamic 80 # a dynamic service type which will be
+# # fleshed out with subsequent options.
+# wccp2_service standard 0 password=foo
+#Default:
+# Use the 'web-cache' standard service.
+
+# TAG: wccp2_service_info
+# Dynamic WCCPv2 services require further information to define the
+# traffic you wish to have diverted.
+#
+# The format is:
+#
+# wccp2_service_info <id> protocol=<protocol> flags=<flag>,<flag>..
+# priority=<priority> ports=<port>,<port>..
+#
+# The relevant WCCPv2 flags:
+# + src_ip_hash, dst_ip_hash
+# + source_port_hash, dst_port_hash
+# + src_ip_alt_hash, dst_ip_alt_hash
+# + src_port_alt_hash, dst_port_alt_hash
+# + ports_source
+#
+# The port list can be one to eight entries.
+#
+# Example:
+#
+# wccp2_service_info 80 protocol=tcp flags=src_ip_hash,ports_source
+# priority=240 ports=80
+#
+# Note: the service id must have been defined by a previous
+# 'wccp2_service dynamic <id>' entry.
+#Default:
+# none
+
+# TAG: wccp2_weight
+# Each cache server gets assigned a set of the destination
+# hash proportional to their weight.
+#Default:
+# wccp2_weight 10000
+
+# TAG: wccp_address
+# Use this option if you require WCCPv2 to use a specific
+# interface address.
+#
+# The default behavior is to not bind to any specific address.
+#Default:
+# Address selected by the operating system.
+
+# TAG: wccp2_address
+# Use this option if you require WCCP to use a specific
+# interface address.
+#
+# The default behavior is to not bind to any specific address.
+#Default:
+# Address selected by the operating system.
+
+# PERSISTENT CONNECTION HANDLING
+# -----------------------------------------------------------------------------
+#
+# Also see "pconn_timeout" in the TIMEOUTS section
+
+# TAG: client_persistent_connections
+# Persistent connection support for clients.
+# Squid uses persistent connections (when allowed). You can use
+# this option to disable persistent connections with clients.
+#Default:
+# client_persistent_connections on
+
+# TAG: server_persistent_connections
+# Persistent connection support for servers.
+# Squid uses persistent connections (when allowed). You can use
+# this option to disable persistent connections with servers.
+#Default:
+# server_persistent_connections on
+
+# TAG: persistent_connection_after_error
+# With this directive the use of persistent connections after
+# HTTP errors can be disabled. Useful if you have clients
+# who fail to handle errors on persistent connections proper.
+#Default:
+# persistent_connection_after_error on
+
+# TAG: detect_broken_pconn
+# Some servers have been found to incorrectly signal the use
+# of HTTP/1.0 persistent connections even on replies not
+# compatible, causing significant delays. This server problem
+# has mostly been seen on redirects.
+#
+# By enabling this directive Squid attempts to detect such
+# broken replies and automatically assume the reply is finished
+# after 10 seconds timeout.
+#Default:
+# detect_broken_pconn off
+
+# CACHE DIGEST OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: digest_generation
+# This controls whether the server will generate a Cache Digest
+# of its contents. By default, Cache Digest generation is
+# enabled if Squid is compiled with --enable-cache-digests defined.
+#Default:
+# digest_generation on
+
+# TAG: digest_bits_per_entry
+# This is the number of bits of the server's Cache Digest which
+# will be associated with the Digest entry for a given HTTP
+# Method and URL (public key) combination. The default is 5.
+#Default:
+# digest_bits_per_entry 5
+
+# TAG: digest_rebuild_period (seconds)
+# This is the wait time between Cache Digest rebuilds.
+#Default:
+# digest_rebuild_period 1 hour
+
+# TAG: digest_rewrite_period (seconds)
+# This is the wait time between Cache Digest writes to
+# disk.
+#Default:
+# digest_rewrite_period 1 hour
+
+# TAG: digest_swapout_chunk_size (bytes)
+# This is the number of bytes of the Cache Digest to write to
+# disk at a time. It defaults to 4096 bytes (4KB), the Squid
+# default swap page.
+#Default:
+# digest_swapout_chunk_size 4096 bytes
+
+# TAG: digest_rebuild_chunk_percentage (percent, 0-100)
+# This is the percentage of the Cache Digest to be scanned at a
+# time. By default it is set to 10% of the Cache Digest.
+#Default:
+# digest_rebuild_chunk_percentage 10
+
+# SNMP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: snmp_port
+# The port number where Squid listens for SNMP requests. To enable
+# SNMP support set this to a suitable port number. Port number
+# 3401 is often used for the Squid SNMP agent. By default it's
+# set to "0" (disabled)
+#
+# Example:
+# snmp_port 3401
+#Default:
+# SNMP disabled.
+
+# TAG: snmp_access
+# Allowing or denying access to the SNMP port.
+#
+# All access to the agent is denied by default.
+# usage:
+#
+# snmp_access allow|deny [!]aclname ...
+#
+# This clause only supports fast acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#
+#Example:
+# snmp_access allow snmppublic localhost
+# snmp_access deny all
+#Default:
+# Deny, unless rules exist in squid.conf.
+
+# TAG: snmp_incoming_address
+# Just like 'udp_incoming_address', but for the SNMP port.
+#
+# snmp_incoming_address is used for the SNMP socket receiving
+# messages from SNMP agents.
+#
+# The default snmp_incoming_address is to listen on all
+# available network interfaces.
+#Default:
+# Accept SNMP packets from all machine interfaces.
+
+# TAG: snmp_outgoing_address
+# Just like 'udp_outgoing_address', but for the SNMP port.
+#
+# snmp_outgoing_address is used for SNMP packets returned to SNMP
+# agents.
+#
+# If snmp_outgoing_address is not set it will use the same socket
+# as snmp_incoming_address. Only change this if you want to have
+# SNMP replies sent using another address than where this Squid
+# listens for SNMP queries.
+#
+# NOTE, snmp_incoming_address and snmp_outgoing_address can not have
+# the same value since they both use the same port.
+#Default:
+# Use snmp_incoming_address or an address selected by the operating system.
+
+# ICP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: icp_port
+# The port number where Squid sends and receives ICP queries to
+# and from neighbor caches. The standard UDP port for ICP is 3130.
+#
+# Example:
+# icp_port 3130
+#Default:
+# ICP disabled.
+
+# TAG: htcp_port
+# The port number where Squid sends and receives HTCP queries to
+# and from neighbor caches. To turn it on you want to set it to
+# 4827.
+#
+# Example:
+# htcp_port 4827
+#Default:
+# HTCP disabled.
+
+# TAG: log_icp_queries on|off
+# If set, ICP queries are logged to access.log. You may wish
+# do disable this if your ICP load is VERY high to speed things
+# up or to simplify log analysis.
+#Default:
+# log_icp_queries on
+
+# TAG: udp_incoming_address
+# udp_incoming_address is used for UDP packets received from other
+# caches.
+#
+# The default behavior is to not bind to any specific address.
+#
+# Only change this if you want to have all UDP queries received on
+# a specific interface/address.
+#
+# NOTE: udp_incoming_address is used by the ICP, HTCP, and DNS
+# modules. Altering it will affect all of them in the same manner.
+#
+# see also; udp_outgoing_address
+#
+# NOTE, udp_incoming_address and udp_outgoing_address can not
+# have the same value since they both use the same port.
+#Default:
+# Accept packets from all machine interfaces.
+
+# TAG: udp_outgoing_address
+# udp_outgoing_address is used for UDP packets sent out to other
+# caches.
+#
+# The default behavior is to not bind to any specific address.
+#
+# Instead it will use the same socket as udp_incoming_address.
+# Only change this if you want to have UDP queries sent using another
+# address than where this Squid listens for UDP queries from other
+# caches.
+#
+# NOTE: udp_outgoing_address is used by the ICP, HTCP, and DNS
+# modules. Altering it will affect all of them in the same manner.
+#
+# see also; udp_incoming_address
+#
+# NOTE, udp_incoming_address and udp_outgoing_address can not
+# have the same value since they both use the same port.
+#Default:
+# Use udp_incoming_address or an address selected by the operating system.
+
+# TAG: icp_hit_stale on|off
+# If you want to return ICP_HIT for stale cache objects, set this
+# option to 'on'. If you have sibling relationships with caches
+# in other administrative domains, this should be 'off'. If you only
+# have sibling relationships with caches under your control,
+# it is probably okay to set this to 'on'.
+# If set to 'on', your siblings should use the option "allow-miss"
+# on their cache_peer lines for connecting to you.
+#Default:
+# icp_hit_stale off
+
+# TAG: minimum_direct_hops
+# If using the ICMP pinging stuff, do direct fetches for sites
+# which are no more than this many hops away.
+#Default:
+# minimum_direct_hops 4
+
+# TAG: minimum_direct_rtt (msec)
+# If using the ICMP pinging stuff, do direct fetches for sites
+# which are no more than this many rtt milliseconds away.
+#Default:
+# minimum_direct_rtt 400
+
+# TAG: netdb_low
+# The low water mark for the ICMP measurement database.
+#
+# Note: high watermark controlled by netdb_high directive.
+#
+# These watermarks are counts, not percents. The defaults are
+# (low) 900 and (high) 1000. When the high water mark is
+# reached, database entries will be deleted until the low
+# mark is reached.
+#Default:
+# netdb_low 900
+
+# TAG: netdb_high
+# The high water mark for the ICMP measurement database.
+#
+# Note: low watermark controlled by netdb_low directive.
+#
+# These watermarks are counts, not percents. The defaults are
+# (low) 900 and (high) 1000. When the high water mark is
+# reached, database entries will be deleted until the low
+# mark is reached.
+#Default:
+# netdb_high 1000
+
+# TAG: netdb_ping_period
+# The minimum period for measuring a site. There will be at
+# least this much delay between successive pings to the same
+# network. The default is five minutes.
+#Default:
+# netdb_ping_period 5 minutes
+
+# TAG: query_icmp on|off
+# If you want to ask your peers to include ICMP data in their ICP
+# replies, enable this option.
+#
+# If your peer has configured Squid (during compilation) with
+# '--enable-icmp' that peer will send ICMP pings to origin server
+# sites of the URLs it receives. If you enable this option the
+# ICP replies from that peer will include the ICMP data (if available).
+# Then, when choosing a parent cache, Squid will choose the parent with
+# the minimal RTT to the origin server. When this happens, the
+# hierarchy field of the access.log will be
+# "CLOSEST_PARENT_MISS". This option is off by default.
+#Default:
+# query_icmp off
+
+# TAG: test_reachability on|off
+# When this is 'on', ICP MISS replies will be ICP_MISS_NOFETCH
+# instead of ICP_MISS if the target host is NOT in the ICMP
+# database, or has a zero RTT.
+#Default:
+# test_reachability off
+
+# TAG: icp_query_timeout (msec)
+# Normally Squid will automatically determine an optimal ICP
+# query timeout value based on the round-trip-time of recent ICP
+# queries. If you want to override the value determined by
+# Squid, set this 'icp_query_timeout' to a non-zero value. This
+# value is specified in MILLISECONDS, so, to use a 2-second
+# timeout (the old default), you would write:
+#
+# icp_query_timeout 2000
+#Default:
+# Dynamic detection.
+
+# TAG: maximum_icp_query_timeout (msec)
+# Normally the ICP query timeout is determined dynamically. But
+# sometimes it can lead to very large values (say 5 seconds).
+# Use this option to put an upper limit on the dynamic timeout
+# value. Do NOT use this option to always use a fixed (instead
+# of a dynamic) timeout value. To set a fixed timeout see the
+# 'icp_query_timeout' directive.
+#Default:
+# maximum_icp_query_timeout 2000
+
+# TAG: minimum_icp_query_timeout (msec)
+# Normally the ICP query timeout is determined dynamically. But
+# sometimes it can lead to very small timeouts, even lower than
+# the normal latency variance on your link due to traffic.
+# Use this option to put an lower limit on the dynamic timeout
+# value. Do NOT use this option to always use a fixed (instead
+# of a dynamic) timeout value. To set a fixed timeout see the
+# 'icp_query_timeout' directive.
+#Default:
+# minimum_icp_query_timeout 5
+
+# TAG: background_ping_rate time-units
+# Controls how often the ICP pings are sent to siblings that
+# have background-ping set.
+#Default:
+# background_ping_rate 10 seconds
+
+# MULTICAST ICP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: mcast_groups
+# This tag specifies a list of multicast groups which your server
+# should join to receive multicasted ICP queries.
+#
+# NOTE! Be very careful what you put here! Be sure you
+# understand the difference between an ICP _query_ and an ICP
+# _reply_. This option is to be set only if you want to RECEIVE
+# multicast queries. Do NOT set this option to SEND multicast
+# ICP (use cache_peer for that). ICP replies are always sent via
+# unicast, so this option does not affect whether or not you will
+# receive replies from multicast group members.
+#
+# You must be very careful to NOT use a multicast address which
+# is already in use by another group of caches.
+#
+# If you are unsure about multicast, please read the Multicast
+# chapter in the Squid FAQ (http://www.squid-cache.org/FAQ/).
+#
+# Usage: mcast_groups 239.128.16.128 224.0.1.20
+#
+# By default, Squid doesn't listen on any multicast groups.
+#Default:
+# none
+
+# TAG: mcast_miss_addr
+# Note: This option is only available if Squid is rebuilt with the
+# -DMULTICAST_MISS_STREAM define
+#
+# If you enable this option, every "cache miss" URL will
+# be sent out on the specified multicast address.
+#
+# Do not enable this option unless you are are absolutely
+# certain you understand what you are doing.
+#Default:
+# disabled.
+
+# TAG: mcast_miss_ttl
+# Note: This option is only available if Squid is rebuilt with the
+# -DMULTICAST_MISS_STREAM define
+#
+# This is the time-to-live value for packets multicasted
+# when multicasting off cache miss URLs is enabled. By
+# default this is set to 'site scope', i.e. 16.
+#Default:
+# mcast_miss_ttl 16
+
+# TAG: mcast_miss_port
+# Note: This option is only available if Squid is rebuilt with the
+# -DMULTICAST_MISS_STREAM define
+#
+# This is the port number to be used in conjunction with
+# 'mcast_miss_addr'.
+#Default:
+# mcast_miss_port 3135
+
+# TAG: mcast_miss_encode_key
+# Note: This option is only available if Squid is rebuilt with the
+# -DMULTICAST_MISS_STREAM define
+#
+# The URLs that are sent in the multicast miss stream are
+# encrypted. This is the encryption key.
+#Default:
+# mcast_miss_encode_key XXXXXXXXXXXXXXXX
+
+# TAG: mcast_icp_query_timeout (msec)
+# For multicast peers, Squid regularly sends out ICP "probes" to
+# count how many other peers are listening on the given multicast
+# address. This value specifies how long Squid should wait to
+# count all the replies. The default is 2000 msec, or 2
+# seconds.
+#Default:
+# mcast_icp_query_timeout 2000
+
+# INTERNAL ICON OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: icon_directory
+# Where the icons are stored. These are normally kept in
+# /usr/share/squid/icons
+#Default:
+# icon_directory /usr/share/squid/icons
+
+# TAG: global_internal_static
+# This directive controls is Squid should intercept all requests for
+# /squid-internal-static/ no matter which host the URL is requesting
+# (default on setting), or if nothing special should be done for
+# such URLs (off setting). The purpose of this directive is to make
+# icons etc work better in complex cache hierarchies where it may
+# not always be possible for all corners in the cache mesh to reach
+# the server generating a directory listing.
+#Default:
+# global_internal_static on
+
+# TAG: short_icon_urls
+# If this is enabled Squid will use short URLs for icons.
+# If disabled it will revert to the old behavior of including
+# it's own name and port in the URL.
+#
+# If you run a complex cache hierarchy with a mix of Squid and
+# other proxies you may need to disable this directive.
+#Default:
+# short_icon_urls on
+
+# ERROR PAGE OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: error_directory
+# If you wish to create your own versions of the default
+# error files to customize them to suit your company copy
+# the error/template files to another directory and point
+# this tag at them.
+#
+# WARNING: This option will disable multi-language support
+# on error pages if used.
+#
+# The squid developers are interested in making squid available in
+# a wide variety of languages. If you are making translations for a
+# language that Squid does not currently provide please consider
+# contributing your translation back to the project.
+# http://wiki.squid-cache.org/Translations
+#
+# The squid developers working on translations are happy to supply drop-in
+# translated error files in exchange for any new language contributions.
+#Default:
+# Send error pages in the clients preferred language
+
+# TAG: error_default_language
+# Set the default language which squid will send error pages in
+# if no existing translation matches the clients language
+# preferences.
+#
+# If unset (default) generic English will be used.
+#
+# The squid developers are interested in making squid available in
+# a wide variety of languages. If you are interested in making
+# translations for any language see the squid wiki for details.
+# http://wiki.squid-cache.org/Translations
+#Default:
+# Generate English language pages.
+
+# TAG: error_log_languages
+# Log to cache.log what languages users are attempting to
+# auto-negotiate for translations.
+#
+# Successful negotiations are not logged. Only failures
+# have meaning to indicate that Squid may need an upgrade
+# of its error page translations.
+#Default:
+# error_log_languages on
+
+# TAG: err_page_stylesheet
+# CSS Stylesheet to pattern the display of Squid default error pages.
+#
+# For information on CSS see http://www.w3.org/Style/CSS/
+#Default:
+# err_page_stylesheet /etc/squid/errorpage.css
+
+# TAG: err_html_text
+# HTML text to include in error messages. Make this a "mailto"
+# URL to your admin address, or maybe just a link to your
+# organizations Web page.
+#
+# To include this in your error messages, you must rewrite
+# the error template files (found in the "errors" directory).
+# Wherever you want the 'err_html_text' line to appear,
+# insert a %L tag in the error template file.
+#Default:
+# none
+
+# TAG: email_err_data on|off
+# If enabled, information about the occurred error will be
+# included in the mailto links of the ERR pages (if %W is set)
+# so that the email body contains the data.
+# Syntax is <A HREF="mailto:%w%W">%w</A>
+#Default:
+# email_err_data on
+
+# TAG: deny_info
+# Usage: deny_info err_page_name acl
+# or deny_info http://... acl
+# or deny_info TCP_RESET acl
+#
+# This can be used to return a ERR_ page for requests which
+# do not pass the 'http_access' rules. Squid remembers the last
+# acl it evaluated in http_access, and if a 'deny_info' line exists
+# for that ACL Squid returns a corresponding error page.
+#
+# The acl is typically the last acl on the http_access deny line which
+# denied access. The exceptions to this rule are:
+# - When Squid needs to request authentication credentials. It's then
+# the first authentication related acl encountered
+# - When none of the http_access lines matches. It's then the last
+# acl processed on the last http_access line.
+# - When the decision to deny access was made by an adaptation service,
+# the acl name is the corresponding eCAP or ICAP service_name.
+#
+# NP: If providing your own custom error pages with error_directory
+# you may also specify them by your custom file name:
+# Example: deny_info ERR_CUSTOM_ACCESS_DENIED bad_guys
+#
+# By defaut Squid will send "403 Forbidden". A different 4xx or 5xx
+# may be specified by prefixing the file name with the code and a colon.
+# e.g. 404:ERR_CUSTOM_ACCESS_DENIED
+#
+# Alternatively you can tell Squid to reset the TCP connection
+# by specifying TCP_RESET.
+#
+# Or you can specify an error URL or URL pattern. The browsers will
+# get redirected to the specified URL after formatting tags have
+# been replaced. Redirect will be done with 302 or 307 according to
+# HTTP/1.1 specs. A different 3xx code may be specified by prefixing
+# the URL. e.g. 303:http://example.com/
+#
+# URL FORMAT TAGS:
+# %a - username (if available. Password NOT included)
+# %B - FTP path URL
+# %e - Error number
+# %E - Error description
+# %h - Squid hostname
+# %H - Request domain name
+# %i - Client IP Address
+# %M - Request Method
+# %o - Message result from external ACL helper
+# %p - Request Port number
+# %P - Request Protocol name
+# %R - Request URL path
+# %T - Timestamp in RFC 1123 format
+# %U - Full canonical URL from client
+# (HTTPS URLs terminate with *)
+# %u - Full canonical URL from client
+# %w - Admin email from squid.conf
+# %x - Error name
+# %% - Literal percent (%) code
+#
+#Default:
+# none
+
+# OPTIONS INFLUENCING REQUEST FORWARDING
+# -----------------------------------------------------------------------------
+
+# TAG: nonhierarchical_direct
+# By default, Squid will send any non-hierarchical requests
+# (not cacheable request type) direct to origin servers.
+#
+# When this is set to "off", Squid will prefer to send these
+# requests to parents.
+#
+# Note that in most configurations, by turning this off you will only
+# add latency to these request without any improvement in global hit
+# ratio.
+#
+# This option only sets a preference. If the parent is unavailable a
+# direct connection to the origin server may still be attempted. To
+# completely prevent direct connections use never_direct.
+#Default:
+# nonhierarchical_direct on
+
+# TAG: prefer_direct
+# Normally Squid tries to use parents for most requests. If you for some
+# reason like it to first try going direct and only use a parent if
+# going direct fails set this to on.
+#
+# By combining nonhierarchical_direct off and prefer_direct on you
+# can set up Squid to use a parent as a backup path if going direct
+# fails.
+#
+# Note: If you want Squid to use parents for all requests see
+# the never_direct directive. prefer_direct only modifies how Squid
+# acts on cacheable requests.
+#Default:
+# prefer_direct off
+
+# TAG: cache_miss_revalidate on|off
+# RFC 7232 defines a conditional request mechanism to prevent
+# response objects being unnecessarily transferred over the network.
+# If that mechanism is used by the client and a cache MISS occurs
+# it can prevent new cache entries being created.
+#
+# This option determines whether Squid on cache MISS will pass the
+# client revalidation request to the server or tries to fetch new
+# content for caching. It can be useful while the cache is mostly
+# empty to more quickly have the cache populated by generating
+# non-conditional GETs.
+#
+# When set to 'on' (default), Squid will pass all client If-* headers
+# to the server. This permits server responses without a cacheable
+# payload to be delivered and on MISS no new cache entry is created.
+#
+# When set to 'off' and if the request is cacheable, Squid will
+# remove the clients If-Modified-Since and If-None-Match headers from
+# the request sent to the server. This requests a 200 status response
+# from the server to create a new cache entry with.
+#Default:
+# cache_miss_revalidate on
+
+# TAG: always_direct
+# Usage: always_direct allow|deny [!]aclname ...
+#
+# Here you can use ACL elements to specify requests which should
+# ALWAYS be forwarded by Squid to the origin servers without using
+# any peers. For example, to always directly forward requests for
+# local servers ignoring any parents or siblings you may have use
+# something like:
+#
+# acl local-servers dstdomain my.domain.net
+# always_direct allow local-servers
+#
+# To always forward FTP requests directly, use
+#
+# acl FTP proto FTP
+# always_direct allow FTP
+#
+# NOTE: There is a similar, but opposite option named
+# 'never_direct'. You need to be aware that "always_direct deny
+# foo" is NOT the same thing as "never_direct allow foo". You
+# may need to use a deny rule to exclude a more-specific case of
+# some other rule. Example:
+#
+# acl local-external dstdomain external.foo.net
+# acl local-servers dstdomain .foo.net
+# always_direct deny local-external
+# always_direct allow local-servers
+#
+# NOTE: If your goal is to make the client forward the request
+# directly to the origin server bypassing Squid then this needs
+# to be done in the client configuration. Squid configuration
+# can only tell Squid how Squid should fetch the object.
+#
+# NOTE: This directive is not related to caching. The replies
+# is cached as usual even if you use always_direct. To not cache
+# the replies see the 'cache' directive.
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Prevent any cache_peer being used for this request.
+
+# TAG: never_direct
+# Usage: never_direct allow|deny [!]aclname ...
+#
+# never_direct is the opposite of always_direct. Please read
+# the description for always_direct if you have not already.
+#
+# With 'never_direct' you can use ACL elements to specify
+# requests which should NEVER be forwarded directly to origin
+# servers. For example, to force the use of a proxy for all
+# requests, except those in your local domain use something like:
+#
+# acl local-servers dstdomain .foo.net
+# never_direct deny local-servers
+# never_direct allow all
+#
+# or if Squid is inside a firewall and there are local intranet
+# servers inside the firewall use something like:
+#
+# acl local-intranet dstdomain .foo.net
+# acl local-external dstdomain external.foo.net
+# always_direct deny local-external
+# always_direct allow local-intranet
+# never_direct allow all
+#
+# This clause supports both fast and slow acl types.
+# See http://wiki.squid-cache.org/SquidFaq/SquidAcl for details.
+#Default:
+# Allow DNS results to be used for this request.
+
+# ADVANCED NETWORKING OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: incoming_udp_average
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# incoming_udp_average 6
+
+# TAG: incoming_tcp_average
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# incoming_tcp_average 4
+
+# TAG: incoming_dns_average
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# incoming_dns_average 4
+
+# TAG: min_udp_poll_cnt
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# min_udp_poll_cnt 8
+
+# TAG: min_dns_poll_cnt
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# min_dns_poll_cnt 8
+
+# TAG: min_tcp_poll_cnt
+# Heavy voodoo here. I can't even believe you are reading this.
+# Are you crazy? Don't even think about adjusting these unless
+# you understand the algorithms in comm_select.c first!
+#Default:
+# min_tcp_poll_cnt 8
+
+# TAG: accept_filter
+# FreeBSD:
+#
+# The name of an accept(2) filter to install on Squid's
+# listen socket(s). This feature is perhaps specific to
+# FreeBSD and requires support in the kernel.
+#
+# The 'httpready' filter delays delivering new connections
+# to Squid until a full HTTP request has been received.
+# See the accf_http(9) man page for details.
+#
+# The 'dataready' filter delays delivering new connections
+# to Squid until there is some data to process.
+# See the accf_dataready(9) man page for details.
+#
+# Linux:
+#
+# The 'data' filter delays delivering of new connections
+# to Squid until there is some data to process by TCP_ACCEPT_DEFER.
+# You may optionally specify a number of seconds to wait by
+# 'data=N' where N is the number of seconds. Defaults to 30
+# if not specified. See the tcp(7) man page for details.
+#EXAMPLE:
+## FreeBSD
+#accept_filter httpready
+## Linux
+#accept_filter data
+#Default:
+# none
+
+# TAG: client_ip_max_connections
+# Set an absolute limit on the number of connections a single
+# client IP can use. Any more than this and Squid will begin to drop
+# new connections from the client until it closes some links.
+#
+# Note that this is a global limit. It affects all HTTP, HTCP, Gopher and FTP
+# connections from the client. For finer control use the ACL access controls.
+#
+# Requires client_db to be enabled (the default).
+#
+# WARNING: This may noticably slow down traffic received via external proxies
+# or NAT devices and cause them to rebound error messages back to their clients.
+#Default:
+# No limit.
+
+# TAG: tcp_recv_bufsize (bytes)
+# Size of receive buffer to set for TCP sockets. Probably just
+# as easy to change your kernel's default.
+# Omit from squid.conf to use the default buffer size.
+#Default:
+# Use operating system TCP defaults.
+
+# ICAP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: icap_enable on|off
+# If you want to enable the ICAP module support, set this to on.
+#Default:
+# icap_enable off
+
+# TAG: icap_connect_timeout
+# This parameter specifies how long to wait for the TCP connect to
+# the requested ICAP server to complete before giving up and either
+# terminating the HTTP transaction or bypassing the failure.
+#
+# The default for optional services is peer_connect_timeout.
+# The default for essential services is connect_timeout.
+# If this option is explicitly set, its value applies to all services.
+#Default:
+# none
+
+# TAG: icap_io_timeout time-units
+# This parameter specifies how long to wait for an I/O activity on
+# an established, active ICAP connection before giving up and
+# either terminating the HTTP transaction or bypassing the
+# failure.
+#Default:
+# Use read_timeout.
+
+# TAG: icap_service_failure_limit limit [in memory-depth time-units]
+# The limit specifies the number of failures that Squid tolerates
+# when establishing a new TCP connection with an ICAP service. If
+# the number of failures exceeds the limit, the ICAP service is
+# not used for new ICAP requests until it is time to refresh its
+# OPTIONS.
+#
+# A negative value disables the limit. Without the limit, an ICAP
+# service will not be considered down due to connectivity failures
+# between ICAP OPTIONS requests.
+#
+# Squid forgets ICAP service failures older than the specified
+# value of memory-depth. The memory fading algorithm
+# is approximate because Squid does not remember individual
+# errors but groups them instead, splitting the option
+# value into ten time slots of equal length.
+#
+# When memory-depth is 0 and by default this option has no
+# effect on service failure expiration.
+#
+# Squid always forgets failures when updating service settings
+# using an ICAP OPTIONS transaction, regardless of this option
+# setting.
+#
+# For example,
+# # suspend service usage after 10 failures in 5 seconds:
+# icap_service_failure_limit 10 in 5 seconds
+#Default:
+# icap_service_failure_limit 10
+
+# TAG: icap_service_revival_delay
+# The delay specifies the number of seconds to wait after an ICAP
+# OPTIONS request failure before requesting the options again. The
+# failed ICAP service is considered "down" until fresh OPTIONS are
+# fetched.
+#
+# The actual delay cannot be smaller than the hardcoded minimum
+# delay of 30 seconds.
+#Default:
+# icap_service_revival_delay 180
+
+# TAG: icap_preview_enable on|off
+# The ICAP Preview feature allows the ICAP server to handle the
+# HTTP message by looking only at the beginning of the message body
+# or even without receiving the body at all. In some environments,
+# previews greatly speedup ICAP processing.
+#
+# During an ICAP OPTIONS transaction, the server may tell Squid what
+# HTTP messages should be previewed and how big the preview should be.
+# Squid will not use Preview if the server did not request one.
+#
+# To disable ICAP Preview for all ICAP services, regardless of
+# individual ICAP server OPTIONS responses, set this option to "off".
+#Example:
+#icap_preview_enable off
+#Default:
+# icap_preview_enable on
+
+# TAG: icap_preview_size
+# The default size of preview data to be sent to the ICAP server.
+# This value might be overwritten on a per server basis by OPTIONS requests.
+#Default:
+# No preview sent.
+
+# TAG: icap_206_enable on|off
+# 206 (Partial Content) responses is an ICAP extension that allows the
+# ICAP agents to optionally combine adapted and original HTTP message
+# content. The decision to combine is postponed until the end of the
+# ICAP response. Squid supports Partial Content extension by default.
+#
+# Activation of the Partial Content extension is negotiated with each
+# ICAP service during OPTIONS exchange. Most ICAP servers should handle
+# negotation correctly even if they do not support the extension, but
+# some might fail. To disable Partial Content support for all ICAP
+# services and to avoid any negotiation, set this option to "off".
+#
+# Example:
+# icap_206_enable off
+#Default:
+# icap_206_enable on
+
+# TAG: icap_default_options_ttl
+# The default TTL value for ICAP OPTIONS responses that don't have
+# an Options-TTL header.
+#Default:
+# icap_default_options_ttl 60
+
+# TAG: icap_persistent_connections on|off
+# Whether or not Squid should use persistent connections to
+# an ICAP server.
+#Default:
+# icap_persistent_connections on
+
+# TAG: adaptation_send_client_ip on|off
+# If enabled, Squid shares HTTP client IP information with adaptation
+# services. For ICAP, Squid adds the X-Client-IP header to ICAP requests.
+# For eCAP, Squid sets the libecap::metaClientIp transaction option.
+#
+# See also: adaptation_uses_indirect_client
+#Default:
+# adaptation_send_client_ip off
+
+# TAG: adaptation_send_username on|off
+# This sends authenticated HTTP client username (if available) to
+# the adaptation service.
+#
+# For ICAP, the username value is encoded based on the
+# icap_client_username_encode option and is sent using the header
+# specified by the icap_client_username_header option.
+#Default:
+# adaptation_send_username off
+
+# TAG: icap_client_username_header
+# ICAP request header name to use for adaptation_send_username.
+#Default:
+# icap_client_username_header X-Client-Username
+
+# TAG: icap_client_username_encode on|off
+# Whether to base64 encode the authenticated client username.
+#Default:
+# icap_client_username_encode off
+
+# TAG: icap_service
+# Defines a single ICAP service using the following format:
+#
+# icap_service id vectoring_point uri [option ...]
+#
+# id: ID
+# an opaque identifier or name which is used to direct traffic to
+# this specific service. Must be unique among all adaptation
+# services in squid.conf.
+#
+# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
+# This specifies at which point of transaction processing the
+# ICAP service should be activated. *_postcache vectoring points
+# are not yet supported.
+#
+# uri: icap://servername:port/servicepath
+# ICAP server and service location.
+#
+# ICAP does not allow a single service to handle both REQMOD and RESPMOD
+# transactions. Squid does not enforce that requirement. You can specify
+# services with the same service_url and different vectoring_points. You
+# can even specify multiple identical services as long as their
+# service_names differ.
+#
+# To activate a service, use the adaptation_access directive. To group
+# services, use adaptation_service_chain and adaptation_service_set.
+#
+# Service options are separated by white space. ICAP services support
+# the following name=value options:
+#
+# bypass=on|off|1|0
+# If set to 'on' or '1', the ICAP service is treated as
+# optional. If the service cannot be reached or malfunctions,
+# Squid will try to ignore any errors and process the message as
+# if the service was not enabled. No all ICAP errors can be
+# bypassed. If set to 0, the ICAP service is treated as
+# essential and all ICAP errors will result in an error page
+# returned to the HTTP client.
+#
+# Bypass is off by default: services are treated as essential.
+#
+# routing=on|off|1|0
+# If set to 'on' or '1', the ICAP service is allowed to
+# dynamically change the current message adaptation plan by
+# returning a chain of services to be used next. The services
+# are specified using the X-Next-Services ICAP response header
+# value, formatted as a comma-separated list of service names.
+# Each named service should be configured in squid.conf. Other
+# services are ignored. An empty X-Next-Services value results
+# in an empty plan which ends the current adaptation.
+#
+# Dynamic adaptation plan may cross or cover multiple supported
+# vectoring points in their natural processing order.
+#
+# Routing is not allowed by default: the ICAP X-Next-Services
+# response header is ignored.
+#
+# ipv6=on|off
+# Only has effect on split-stack systems. The default on those systems
+# is to use IPv4-only connections. When set to 'on' this option will
+# make Squid use IPv6-only connections to contact this ICAP service.
+#
+# on-overload=block|bypass|wait|force
+# If the service Max-Connections limit has been reached, do
+# one of the following for each new ICAP transaction:
+# * block: send an HTTP error response to the client
+# * bypass: ignore the "over-connected" ICAP service
+# * wait: wait (in a FIFO queue) for an ICAP connection slot
+# * force: proceed, ignoring the Max-Connections limit
+#
+# In SMP mode with N workers, each worker assumes the service
+# connection limit is Max-Connections/N, even though not all
+# workers may use a given service.
+#
+# The default value is "bypass" if service is bypassable,
+# otherwise it is set to "wait".
+#
+#
+# max-conn=number
+# Use the given number as the Max-Connections limit, regardless
+# of the Max-Connections value given by the service, if any.
+#
+# Older icap_service format without optional named parameters is
+# deprecated but supported for backward compatibility.
+#
+#Example:
+#icap_service svcBlocker reqmod_precache icap://icap1.mydomain.net:1344/reqmod bypass=0
+#icap_service svcLogger reqmod_precache icap://icap2.mydomain.net:1344/respmod routing=on
+#Default:
+# none
+
+# TAG: icap_class
+# This deprecated option was documented to define an ICAP service
+# chain, even though it actually defined a set of similar, redundant
+# services, and the chains were not supported.
+#
+# To define a set of redundant services, please use the
+# adaptation_service_set directive. For service chains, use
+# adaptation_service_chain.
+#Default:
+# none
+
+# TAG: icap_access
+# This option is deprecated. Please use adaptation_access, which
+# has the same ICAP functionality, but comes with better
+# documentation, and eCAP support.
+#Default:
+# none
+
+# eCAP OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: ecap_enable on|off
+# Controls whether eCAP support is enabled.
+#Default:
+# ecap_enable off
+
+# TAG: ecap_service
+# Defines a single eCAP service
+#
+# ecap_service id vectoring_point uri [option ...]
+#
+# id: ID
+# an opaque identifier or name which is used to direct traffic to
+# this specific service. Must be unique among all adaptation
+# services in squid.conf.
+#
+# vectoring_point: reqmod_precache|reqmod_postcache|respmod_precache|respmod_postcache
+# This specifies at which point of transaction processing the
+# eCAP service should be activated. *_postcache vectoring points
+# are not yet supported.
+#
+# uri: ecap://vendor/service_name?custom&cgi=style&parameters=optional
+# Squid uses the eCAP service URI to match this configuration
+# line with one of the dynamically loaded services. Each loaded
+# eCAP service must have a unique URI. Obtain the right URI from
+# the service provider.
+#
+# To activate a service, use the adaptation_access directive. To group
+# services, use adaptation_service_chain and adaptation_service_set.
+#
+# Service options are separated by white space. eCAP services support
+# the following name=value options:
+#
+# bypass=on|off|1|0
+# If set to 'on' or '1', the eCAP service is treated as optional.
+# If the service cannot be reached or malfunctions, Squid will try
+# to ignore any errors and process the message as if the service
+# was not enabled. No all eCAP errors can be bypassed.
+# If set to 'off' or '0', the eCAP service is treated as essential
+# and all eCAP errors will result in an error page returned to the
+# HTTP client.
+#
+# Bypass is off by default: services are treated as essential.
+#
+# routing=on|off|1|0
+# If set to 'on' or '1', the eCAP service is allowed to
+# dynamically change the current message adaptation plan by
+# returning a chain of services to be used next.
+#
+# Dynamic adaptation plan may cross or cover multiple supported
+# vectoring points in their natural processing order.
+#
+# Routing is not allowed by default.
+#
+# Older ecap_service format without optional named parameters is
+# deprecated but supported for backward compatibility.
+#
+#
+#Example:
+#ecap_service s1 reqmod_precache ecap://filters.R.us/leakDetector?on_error=block bypass=off
+#ecap_service s2 respmod_precache ecap://filters.R.us/virusFilter config=/etc/vf.cfg bypass=on
+#Default:
+# none
+
+# TAG: loadable_modules
+# Instructs Squid to load the specified dynamic module(s) or activate
+# preloaded module(s).
+#Example:
+#loadable_modules /usr/lib/MinimalAdapter.so
+#Default:
+# none
+
+# MESSAGE ADAPTATION OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: adaptation_service_set
+#
+# Configures an ordered set of similar, redundant services. This is
+# useful when hot standby or backup adaptation servers are available.
+#
+# adaptation_service_set set_name service_name1 service_name2 ...
+#
+# The named services are used in the set declaration order. The first
+# applicable adaptation service from the set is used first. The next
+# applicable service is tried if and only if the transaction with the
+# previous service fails and the message waiting to be adapted is still
+# intact.
+#
+# When adaptation starts, broken services are ignored as if they were
+# not a part of the set. A broken service is a down optional service.
+#
+# The services in a set must be attached to the same vectoring point
+# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
+#
+# If all services in a set are optional then adaptation failures are
+# bypassable. If all services in the set are essential, then a
+# transaction failure with one service may still be retried using
+# another service from the set, but when all services fail, the master
+# transaction fails as well.
+#
+# A set may contain a mix of optional and essential services, but that
+# is likely to lead to surprising results because broken services become
+# ignored (see above), making previously bypassable failures fatal.
+# Technically, it is the bypassability of the last failed service that
+# matters.
+#
+# See also: adaptation_access adaptation_service_chain
+#
+#Example:
+#adaptation_service_set svcBlocker urlFilterPrimary urlFilterBackup
+#adaptation service_set svcLogger loggerLocal loggerRemote
+#Default:
+# none
+
+# TAG: adaptation_service_chain
+#
+# Configures a list of complementary services that will be applied
+# one-by-one, forming an adaptation chain or pipeline. This is useful
+# when Squid must perform different adaptations on the same message.
+#
+# adaptation_service_chain chain_name service_name1 svc_name2 ...
+#
+# The named services are used in the chain declaration order. The first
+# applicable adaptation service from the chain is used first. The next
+# applicable service is applied to the successful adaptation results of
+# the previous service in the chain.
+#
+# When adaptation starts, broken services are ignored as if they were
+# not a part of the chain. A broken service is a down optional service.
+#
+# Request satisfaction terminates the adaptation chain because Squid
+# does not currently allow declaration of RESPMOD services at the
+# "reqmod_precache" vectoring point (see icap_service or ecap_service).
+#
+# The services in a chain must be attached to the same vectoring point
+# (e.g., pre-cache) and use the same adaptation method (e.g., REQMOD).
+#
+# A chain may contain a mix of optional and essential services. If an
+# essential adaptation fails (or the failure cannot be bypassed for
+# other reasons), the master transaction fails. Otherwise, the failure
+# is bypassed as if the failed adaptation service was not in the chain.
+#
+# See also: adaptation_access adaptation_service_set
+#
+#Example:
+#adaptation_service_chain svcRequest requestLogger urlFilter leakDetector
+#Default:
+# none
+
+# TAG: adaptation_access
+# Sends an HTTP transaction to an ICAP or eCAP adaptation service.
+#
+# adaptation_access service_name allow|deny [!]aclname...
+# adaptation_access set_name allow|deny [!]aclname...
+#
+# At each supported vectoring point, the adaptation_access
+# statements are processed in the order they appear in this
+# configuration file. Statements pointing to the following services
+# are ignored (i.e., skipped without checking their ACL):
+#
+# - services serving different vectoring points
+# - "broken-but-bypassable" services
+# - "up" services configured to ignore such transactions
+# (e.g., based on the ICAP Transfer-Ignore header).
+#
+# When a set_name is used, all services in the set are checked
+# using the same rules, to find the first applicable one. See
+# adaptation_service_set for details.
+#
+# If an access list is checked and there is a match, the
+# processing stops: For an "allow" rule, the corresponding
+# adaptation service is used for the transaction. For a "deny"
+# rule, no adaptation service is activated.
+#
+# It is currently not possible to apply more than one adaptation
+# service at the same vectoring point to the same HTTP transaction.
+#
+# See also: icap_service and ecap_service
+#
+#Example:
+#adaptation_access service_1 allow all
+#Default:
+# Allow, unless rules exist in squid.conf.
+
+# TAG: adaptation_service_iteration_limit
+# Limits the number of iterations allowed when applying adaptation
+# services to a message. If your longest adaptation set or chain
+# may have more than 16 services, increase the limit beyond its
+# default value of 16. If detecting infinite iteration loops sooner
+# is critical, make the iteration limit match the actual number
+# of services in your longest adaptation set or chain.
+#
+# Infinite adaptation loops are most likely with routing services.
+#
+# See also: icap_service routing=1
+#Default:
+# adaptation_service_iteration_limit 16
+
+# TAG: adaptation_masterx_shared_names
+# For each master transaction (i.e., the HTTP request and response
+# sequence, including all related ICAP and eCAP exchanges), Squid
+# maintains a table of metadata. The table entries are (name, value)
+# pairs shared among eCAP and ICAP exchanges. The table is destroyed
+# with the master transaction.
+#
+# This option specifies the table entry names that Squid must accept
+# from and forward to the adaptation transactions.
+#
+# An ICAP REQMOD or RESPMOD transaction may set an entry in the
+# shared table by returning an ICAP header field with a name
+# specified in adaptation_masterx_shared_names.
+#
+# An eCAP REQMOD or RESPMOD transaction may set an entry in the
+# shared table by implementing the libecap::visitEachOption() API
+# to provide an option with a name specified in
+# adaptation_masterx_shared_names.
+#
+# Squid will store and forward the set entry to subsequent adaptation
+# transactions within the same master transaction scope.
+#
+# Only one shared entry name is supported at this time.
+#
+#Example:
+## share authentication information among ICAP services
+#adaptation_masterx_shared_names X-Subscriber-ID
+#Default:
+# none
+
+# TAG: adaptation_meta
+# This option allows Squid administrator to add custom ICAP request
+# headers or eCAP options to Squid ICAP requests or eCAP transactions.
+# Use it to pass custom authentication tokens and other
+# transaction-state related meta information to an ICAP/eCAP service.
+#
+# The addition of a meta header is ACL-driven:
+# adaptation_meta name value [!]aclname ...
+#
+# Processing for a given header name stops after the first ACL list match.
+# Thus, it is impossible to add two headers with the same name. If no ACL
+# lists match for a given header name, no such header is added. For
+# example:
+#
+# # do not debug transactions except for those that need debugging
+# adaptation_meta X-Debug 1 needs_debugging
+#
+# # log all transactions except for those that must remain secret
+# adaptation_meta X-Log 1 !keep_secret
+#
+# # mark transactions from users in the "G 1" group
+# adaptation_meta X-Authenticated-Groups "G 1" authed_as_G1
+#
+# The "value" parameter may be a regular squid.conf token or a "double
+# quoted string". Within the quoted string, use backslash (\) to escape
+# any character, which is currently only useful for escaping backslashes
+# and double quotes. For example,
+# "this string has one backslash (\\) and two \"quotes\""
+#
+# Used adaptation_meta header values may be logged via %note
+# logformat code. If multiple adaptation_meta headers with the same name
+# are used during master transaction lifetime, the header values are
+# logged in the order they were used and duplicate values are ignored
+# (only the first repeated value will be logged).
+#Default:
+# none
+
+# TAG: icap_retry
+# This ACL determines which retriable ICAP transactions are
+# retried. Transactions that received a complete ICAP response
+# and did not have to consume or produce HTTP bodies to receive
+# that response are usually retriable.
+#
+# icap_retry allow|deny [!]aclname ...
+#
+# Squid automatically retries some ICAP I/O timeouts and errors
+# due to persistent connection race conditions.
+#
+# See also: icap_retry_limit
+#Default:
+# icap_retry deny all
+
+# TAG: icap_retry_limit
+# Limits the number of retries allowed.
+#
+# Communication errors due to persistent connection race
+# conditions are unavoidable, automatically retried, and do not
+# count against this limit.
+#
+# See also: icap_retry
+#Default:
+# No retries are allowed.
+
+# DNS OPTIONS
+# -----------------------------------------------------------------------------
+
+# TAG: check_hostnames
+# For security and stability reasons Squid can check
+# hostnames for Internet standard RFC compliance. If you want
+# Squid to perform these checks turn this directive on.
+#Default:
+# check_hostnames off
+
+# TAG: allow_underscore
+# Underscore characters is not strictly allowed in Internet hostnames
+# but nevertheless used by many sites. Set this to off if you want
+# Squid to be strict about the standard.
+# This check is performed only when check_hostnames is set to on.
+#Default:
+# allow_underscore on
+
+# TAG: dns_retransmit_interval
+# Initial retransmit interval for DNS queries. The interval is
+# doubled each time all configured DNS servers have been tried.
+#Default:
+# dns_retransmit_interval 5 seconds
+
+# TAG: dns_timeout
+# DNS Query timeout. If no response is received to a DNS query
+# within this time all DNS servers for the queried domain
+# are assumed to be unavailable.
+#Default:
+# dns_timeout 30 seconds
+
+# TAG: dns_packet_max
+# Maximum number of bytes packet size to advertise via EDNS.
+# Set to "none" to disable EDNS large packet support.
+#
+# For legacy reasons DNS UDP replies will default to 512 bytes which
+# is too small for many responses. EDNS provides a means for Squid to
+# negotiate receiving larger responses back immediately without having
+# to failover with repeat requests. Responses larger than this limit
+# will retain the old behaviour of failover to TCP DNS.
+#
+# Squid has no real fixed limit internally, but allowing packet sizes
+# over 1500 bytes requires network jumbogram support and is usually not
+# necessary.
+#
+# WARNING: The RFC also indicates that some older resolvers will reply
+# with failure of the whole request if the extension is added. Some
+# resolvers have already been identified which will reply with mangled
+# EDNS response on occasion. Usually in response to many-KB jumbogram
+# sizes being advertised by Squid.
+# Squid will currently treat these both as an unable-to-resolve domain
+# even if it would be resolvable without EDNS.
+#Default:
+# EDNS disabled
+
+# TAG: dns_defnames on|off
+# Normally the RES_DEFNAMES resolver option is disabled
+# (see res_init(3)). This prevents caches in a hierarchy
+# from interpreting single-component hostnames locally. To allow
+# Squid to handle single-component names, enable this option.
+#Default:
+# Search for single-label domain names is disabled.
+
+# TAG: dns_multicast_local on|off
+# When set to on, Squid sends multicast DNS lookups on the local
+# network for domains ending in .local and .arpa.
+# This enables local servers and devices to be contacted in an
+# ad-hoc or zero-configuration network environment.
+#Default:
+# Search for .local and .arpa names is disabled.
+
+# TAG: dns_nameservers
+# Use this if you want to specify a list of DNS name servers
+# (IP addresses) to use instead of those given in your
+# /etc/resolv.conf file.
+#
+# On Windows platforms, if no value is specified here or in
+# the /etc/resolv.conf file, the list of DNS name servers are
+# taken from the Windows registry, both static and dynamic DHCP
+# configurations are supported.
+#
+# Example: dns_nameservers 10.0.0.1 192.172.0.4
+#Default:
+# Use operating system definitions
+
+# TAG: hosts_file
+# Location of the host-local IP name-address associations
+# database. Most Operating Systems have such a file on different
+# default locations:
+# - Un*X & Linux: /etc/hosts
+# - Windows NT/2000: %SystemRoot%\system32\drivers\etc\hosts
+# (%SystemRoot% value install default is c:\winnt)
+# - Windows XP/2003: %SystemRoot%\system32\drivers\etc\hosts
+# (%SystemRoot% value install default is c:\windows)
+# - Windows 9x/Me: %windir%\hosts
+# (%windir% value is usually c:\windows)
+# - Cygwin: /etc/hosts
+#
+# The file contains newline-separated definitions, in the
+# form ip_address_in_dotted_form name [name ...] names are
+# whitespace-separated. Lines beginning with an hash (#)
+# character are comments.
+#
+# The file is checked at startup and upon configuration.
+# If set to 'none', it won't be checked.
+# If append_domain is used, that domain will be added to
+# domain-local (i.e. not containing any dot character) host
+# definitions.
+#Default:
+# hosts_file /etc/hosts
+
+# TAG: append_domain
+# Appends local domain name to hostnames without any dots in
+# them. append_domain must begin with a period.
+#
+# Be warned there are now Internet names with no dots in
+# them using only top-domain names, so setting this may
+# cause some Internet sites to become unavailable.
+#
+#Example:
+# append_domain .yourdomain.com
+#Default:
+# Use operating system definitions
+
+# TAG: ignore_unknown_nameservers
+# By default Squid checks that DNS responses are received
+# from the same IP addresses they are sent to. If they
+# don't match, Squid ignores the response and writes a warning
+# message to cache.log. You can allow responses from unknown
+# nameservers by setting this option to 'off'.
+#Default:
+# ignore_unknown_nameservers on
+
+# TAG: dns_v4_first
+# With the IPv6 Internet being as fast or faster than IPv4 Internet
+# for most networks Squid prefers to contact websites over IPv6.
+#
+# This option reverses the order of preference to make Squid contact
+# dual-stack websites over IPv4 first. Squid will still perform both
+# IPv6 and IPv4 DNS lookups before connecting.
+#
+# WARNING:
+# This option will restrict the situations under which IPv6
+# connectivity is used (and tested), potentially hiding network
+# problems which would otherwise be detected and warned about.
+#Default:
+# dns_v4_first off
+
+# TAG: ipcache_size (number of entries)
+# Maximum number of DNS IP cache entries.
+#Default:
+# ipcache_size 1024
+
+# TAG: ipcache_low (percent)
+#Default:
+# ipcache_low 90
+
+# TAG: ipcache_high (percent)
+# The size, low-, and high-water marks for the IP cache.
+#Default:
+# ipcache_high 95
+
+# TAG: fqdncache_size (number of entries)
+# Maximum number of FQDN cache entries.
+#Default:
+# fqdncache_size 1024
+
+# MISCELLANEOUS
+# -----------------------------------------------------------------------------
+
+# TAG: configuration_includes_quoted_values on|off
+# If set, Squid will recognize each "quoted string" after a configuration
+# directive as a single parameter. The quotes are stripped before the
+# parameter value is interpreted or used.
+# See "Values with spaces, quotes, and other special characters"
+# section for more details.
+#Default:
+# configuration_includes_quoted_values off
+
+# TAG: memory_pools on|off
+# If set, Squid will keep pools of allocated (but unused) memory
+# available for future use. If memory is a premium on your
+# system and you believe your malloc library outperforms Squid
+# routines, disable this.
+#Default:
+# memory_pools on
+
+# TAG: memory_pools_limit (bytes)
+# Used only with memory_pools on:
+# memory_pools_limit 50 MB
+#
+# If set to a non-zero value, Squid will keep at most the specified
+# limit of allocated (but unused) memory in memory pools. All free()
+# requests that exceed this limit will be handled by your malloc
+# library. Squid does not pre-allocate any memory, just safe-keeps
+# objects that otherwise would be free()d. Thus, it is safe to set
+# memory_pools_limit to a reasonably high value even if your
+# configuration will use less memory.
+#
+# If set to none, Squid will keep all memory it can. That is, there
+# will be no limit on the total amount of memory used for safe-keeping.
+#
+# To disable memory allocation optimization, do not set
+# memory_pools_limit to 0 or none. Set memory_pools to "off" instead.
+#
+# An overhead for maintaining memory pools is not taken into account
+# when the limit is checked. This overhead is close to four bytes per
+# object kept. However, pools may actually _save_ memory because of
+# reduced memory thrashing in your malloc library.
+#Default:
+# memory_pools_limit 5 MB
+
+# TAG: forwarded_for on|off|transparent|truncate|delete
+# If set to "on", Squid will append your client's IP address
+# in the HTTP requests it forwards. By default it looks like:
+#
+# X-Forwarded-For: 192.1.2.3
+#
+# If set to "off", it will appear as
+#
+# X-Forwarded-For: unknown
+#
+# If set to "transparent", Squid will not alter the
+# X-Forwarded-For header in any way.
+#
+# If set to "delete", Squid will delete the entire
+# X-Forwarded-For header.
+#
+# If set to "truncate", Squid will remove all existing
+# X-Forwarded-For entries, and place the client IP as the sole entry.
+#Default:
+# forwarded_for on
+
+# TAG: cachemgr_passwd
+# Specify passwords for cachemgr operations.
+#
+# Usage: cachemgr_passwd password action action ...
+#
+# Some valid actions are (see cache manager menu for a full list):
+# 5min
+# 60min
+# asndb
+# authenticator
+# cbdata
+# client_list
+# comm_incoming
+# config *
+# counters
+# delay
+# digest_stats
+# dns
+# events
+# filedescriptors
+# fqdncache
+# histograms
+# http_headers
+# info
+# io
+# ipcache
+# mem
+# menu
+# netdb
+# non_peers
+# objects
+# offline_toggle *
+# pconn
+# peer_select
+# reconfigure *
+# redirector
+# refresh
+# server_list
+# shutdown *
+# store_digest
+# storedir
+# utilization
+# via_headers
+# vm_objects
+#
+# * Indicates actions which will not be performed without a
+# valid password, others can be performed if not listed here.
+#
+# To disable an action, set the password to "disable".
+# To allow performing an action without a password, set the
+# password to "none".
+#
+# Use the keyword "all" to set the same password for all actions.
+#
+#Example:
+# cachemgr_passwd secret shutdown
+# cachemgr_passwd lesssssssecret info stats/objects
+# cachemgr_passwd disable all
+#Default:
+# No password. Actions which require password are denied.
+
+# TAG: client_db on|off
+# If you want to disable collecting per-client statistics,
+# turn off client_db here.
+#Default:
+# client_db on
+
+# TAG: refresh_all_ims on|off
+# When you enable this option, squid will always check
+# the origin server for an update when a client sends an
+# If-Modified-Since request. Many browsers use IMS
+# requests when the user requests a reload, and this
+# ensures those clients receive the latest version.
+#
+# By default (off), squid may return a Not Modified response
+# based on the age of the cached version.
+#Default:
+# refresh_all_ims off
+
+# TAG: reload_into_ims on|off
+# When you enable this option, client no-cache or ``reload''
+# requests will be changed to If-Modified-Since requests.
+# Doing this VIOLATES the HTTP standard. Enabling this
+# feature could make you liable for problems which it
+# causes.
+#
+# see also refresh_pattern for a more selective approach.
+#Default:
+# reload_into_ims off
+
+# TAG: connect_retries
+# This sets the maximum number of connection attempts made for each
+# TCP connection. The connect_retries attempts must all still
+# complete within the connection timeout period.
+#
+# The default is not to re-try if the first connection attempt fails.
+# The (not recommended) maximum is 10 tries.
+#
+# A warning message will be generated if it is set to a too-high
+# value and the configured value will be over-ridden.
+#
+# Note: These re-tries are in addition to forward_max_tries
+# which limit how many different addresses may be tried to find
+# a useful server.
+#Default:
+# Do not retry failed connections.
+
+# TAG: retry_on_error
+# If set to ON Squid will automatically retry requests when
+# receiving an error response with status 403 (Forbidden),
+# 500 (Internal Error), 501 or 503 (Service not available).
+# Status 502 and 504 (Gateway errors) are always retried.
+#
+# This is mainly useful if you are in a complex cache hierarchy to
+# work around access control errors.
+#
+# NOTE: This retry will attempt to find another working destination.
+# Which is different from the server which just failed.
+#Default:
+# retry_on_error off
+
+# TAG: as_whois_server
+# WHOIS server to query for AS numbers. NOTE: AS numbers are
+# queried only when Squid starts up, not for every request.
+#Default:
+# as_whois_server whois.ra.net
+
+# TAG: offline_mode
+# Enable this option and Squid will never try to validate cached
+# objects.
+#Default:
+# offline_mode off
+
+# TAG: uri_whitespace
+# What to do with requests that have whitespace characters in the
+# URI. Options:
+#
+# strip: The whitespace characters are stripped out of the URL.
+# This is the behavior recommended by RFC2396 and RFC3986
+# for tolerant handling of generic URI.
+# NOTE: This is one difference between generic URI and HTTP URLs.
+#
+# deny: The request is denied. The user receives an "Invalid
+# Request" message.
+# This is the behaviour recommended by RFC2616 for safe
+# handling of HTTP request URL.
+#
+# allow: The request is allowed and the URI is not changed. The
+# whitespace characters remain in the URI. Note the
+# whitespace is passed to redirector processes if they
+# are in use.
+# Note this may be considered a violation of RFC2616
+# request parsing where whitespace is prohibited in the
+# URL field.
+#
+# encode: The request is allowed and the whitespace characters are
+# encoded according to RFC1738.
+#
+# chop: The request is allowed and the URI is chopped at the
+# first whitespace.
+#
+#
+# NOTE the current Squid implementation of encode and chop violates
+# RFC2616 by not using a 301 redirect after altering the URL.
+#Default:
+# uri_whitespace strip
+
+# TAG: chroot
+# Specifies a directory where Squid should do a chroot() while
+# initializing. This also causes Squid to fully drop root
+# privileges after initializing. This means, for example, if you
+# use a HTTP port less than 1024 and try to reconfigure, you may
+# get an error saying that Squid can not open the port.
+#Default:
+# none
+
+# TAG: balance_on_multiple_ip
+# Modern IP resolvers in squid sort lookup results by preferred access.
+# By default squid will use these IP in order and only rotates to
+# the next listed when the most preffered fails.
+#
+# Some load balancing servers based on round robin DNS have been
+# found not to preserve user session state across requests
+# to different IP addresses.
+#
+# Enabling this directive Squid rotates IP's per request.
+#Default:
+# balance_on_multiple_ip off
+
+# TAG: pipeline_prefetch
+# HTTP clients may send a pipeline of 1+N requests to Squid using a
+# single connection, without waiting for Squid to respond to the first
+# of those requests. This option limits the number of concurrent
+# requests Squid will try to handle in parallel. If set to N, Squid
+# will try to receive and process up to 1+N requests on the same
+# connection concurrently.
+#
+# Defaults to 0 (off) for bandwidth management and access logging
+# reasons.
+#
+# NOTE: pipelining requires persistent connections to clients.
+#
+# WARNING: pipelining breaks NTLM and Negotiate/Kerberos authentication.
+#Default:
+# Do not pre-parse pipelined requests.
+
+# TAG: high_response_time_warning (msec)
+# If the one-minute median response time exceeds this value,
+# Squid prints a WARNING with debug level 0 to get the
+# administrators attention. The value is in milliseconds.
+#Default:
+# disabled.
+
+# TAG: high_page_fault_warning
+# If the one-minute average page fault rate exceeds this
+# value, Squid prints a WARNING with debug level 0 to get
+# the administrators attention. The value is in page faults
+# per second.
+#Default:
+# disabled.
+
+# TAG: high_memory_warning
+# Note: This option is only available if Squid is rebuilt with the
+# GNU Malloc with mstats()
+#
+# If the memory usage (as determined by gnumalloc, if available and used)
+# exceeds this amount, Squid prints a WARNING with debug level 0 to get
+# the administrators attention.
+#Default:
+# disabled.
+
+# TAG: sleep_after_fork (microseconds)
+# When this is set to a non-zero value, the main Squid process
+# sleeps the specified number of microseconds after a fork()
+# system call. This sleep may help the situation where your
+# system reports fork() failures due to lack of (virtual)
+# memory. Note, however, if you have a lot of child
+# processes, these sleep delays will add up and your
+# Squid will not service requests for some amount of time
+# until all the child processes have been started.
+# On Windows value less then 1000 (1 milliseconds) are
+# rounded to 1000.
+#Default:
+# sleep_after_fork 0
+
+# TAG: windows_ipaddrchangemonitor on|off
+# Note: This option is only available if Squid is rebuilt with the
+# MS Windows
+#
+# On Windows Squid by default will monitor IP address changes and will
+# reconfigure itself after any detected event. This is very useful for
+# proxies connected to internet with dial-up interfaces.
+# In some cases (a Proxy server acting as VPN gateway is one) it could be
+# desiderable to disable this behaviour setting this to 'off'.
+# Note: after changing this, Squid service must be restarted.
+#Default:
+# windows_ipaddrchangemonitor on
+
+# TAG: eui_lookup
+# Whether to lookup the EUI or MAC address of a connected client.
+#Default:
+# eui_lookup on
+
+# TAG: max_filedescriptors
+# Reduce the maximum number of filedescriptors supported below
+# the usual operating system defaults.
+#
+# Remove from squid.conf to inherit the current ulimit setting.
+#
+# Note: Changing this requires a restart of Squid. Also
+# not all I/O types supports large values (eg on Windows).
+#Default:
+# Use operating system limits set by ulimit.