From c97d877af2a9e70bc5757c762d69a59eb5f99d89 Mon Sep 17 00:00:00 2001 From: GI_Jack Date: Fri, 15 Apr 2022 18:46:11 -0700 Subject: [PATCH] * added missing agent conf file with user parameters * fixed templates with inappropriate use of $1 and $2 variables for 5.4 and .60 someone else can wrangle with the XML for 5.0 I am not --- .../5.0/ZoL_with_sudo.conf | 41 ++++++++++++++++ .../5.0/userparams_zol_without_sudo.conf | 41 ++++++++++++++++ .../5.4/template_zfs_on_linux.yaml | 48 +++++++++---------- .../5.4/userparams_zol_with_sudo.conf | 41 ++++++++++++++++ .../5.4/userparams_zol_without_sudo.conf | 41 ++++++++++++++++ .../6.0/ZoL_with_sudo.conf | 41 ++++++++++++++++ .../6.0/template_zfs_on_linux.yaml | 48 +++++++++---------- .../6.0/userparams_zol_without_sudo.conf | 41 ++++++++++++++++ 8 files changed, 294 insertions(+), 48 deletions(-) create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/5.0/ZoL_with_sudo.conf create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/5.0/userparams_zol_without_sudo.conf create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_with_sudo.conf create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_without_sudo.conf create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf create mode 100644 Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf diff --git a/Operating_Systems/Linux/template_zfs_on_linux/5.0/ZoL_with_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/5.0/ZoL_with_sudo.conf new file mode 100644 index 000000000..9cd46ecbe --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/5.0/ZoL_with_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/usr/bin/sudo /sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/usr/bin/sudo /sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/usr/bin/sudo /sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/usr/bin/sudo /sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/usr/bin/sudo /sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/usr/bin/sudo /sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/usr/bin/sudo /sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si diff --git a/Operating_Systems/Linux/template_zfs_on_linux/5.0/userparams_zol_without_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/5.0/userparams_zol_without_sudo.conf new file mode 100644 index 000000000..561178982 --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/5.0/userparams_zol_without_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si diff --git a/Operating_Systems/Linux/template_zfs_on_linux/5.4/template_zfs_on_linux.yaml b/Operating_Systems/Linux/template_zfs_on_linux/5.4/template_zfs_on_linux.yaml index 630a48cdb..b8ff3ad30 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/5.4/template_zfs_on_linux.yaml +++ b/Operating_Systems/Linux/template_zfs_on_linux/5.4/template_zfs_on_linux.yaml @@ -35,7 +35,7 @@ zabbix_export: priority: INFO - uuid: 6b5fc935fe194d30badea64eaf3f317f - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_dnode_limit"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_dnode_limit]' history: 30d @@ -49,7 +49,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 0b7d673688e3429d92aa349762729f83 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_meta_limit"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_limit]' history: 30d @@ -63,7 +63,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: b0b5004458494182bf874545f8eb4e41 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_meta_used"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_used]' history: 30d @@ -78,7 +78,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 795ab079ba13461c872ee1d5c0295704 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "bonus_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[bonus_size]' history: 30d @@ -120,7 +120,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 5e12dd98f1644f5a87cc5ded5d2e55d8 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "data_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[data_size]' history: 30d @@ -134,7 +134,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 522a0f33c90047bab4f55b7214f51dea - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "dbuf_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[dbuf_size]' history: 30d @@ -148,7 +148,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: a3d10ebb57984a829f780a229fc9617c - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "dnode_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[dnode_size]' history: 30d @@ -162,7 +162,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 184eef57aa034cf8acaf6a8f0e02395b - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "hdr_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[hdr_size]' history: 30d @@ -176,7 +176,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: cb7bcc02dfc14329a361e194145871c0 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[hits]' history: 30d @@ -194,7 +194,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 8df273b6e0904c9ab140f8f13f6ca973 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "metadata_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[metadata_size]' history: 30d @@ -208,7 +208,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: dcd96743ed984018bff5d16105693606 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mfu_hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_hits]' history: 30d @@ -226,7 +226,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 1015ebe8ef6f4626ae7967bf6358f1b3 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mfu_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_size]' history: 30d @@ -240,7 +240,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 1298a265a6784e63a166b768e1faf67e - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "misses"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[misses]' history: 30d @@ -258,7 +258,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: c85d0e9e1b464748a20148e2f2507609 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mru_hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_hits]' history: 30d @@ -276,7 +276,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 50954c7b43d745d09990011df4d7448c - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mru_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_size]' history: 30d @@ -335,7 +335,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: ebfb742fb123451c9632d12bde0957c4 - name: 'ZFS parameter $1' + name: 'ZFS parameter zfs_arc_dnode_limit_percent' type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_dnode_limit_percent]' delay: 1h @@ -350,7 +350,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 18d8b817852848929f4e0b421cb21532 - name: 'ZFS parameter $1' + name: 'ZFS parameter zfs_arc_meta_limit_percent' type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_meta_limit_percent]' delay: 1h @@ -386,7 +386,7 @@ zabbix_export: item_prototypes: - uuid: 4d7c96bd10b44754b2c8790b90c12046 - name: 'Zfs dataset $1 compressratio' + name: 'Zfs dataset {#FILESETNAME} compressratio' type: ZABBIX_ACTIVE key: 'zfs.get.compressratio[{#FILESETNAME}]' delay: 30m @@ -407,7 +407,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: e9df401ae71e45c8a3fdbbd146cdd57b - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} available' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},available]' delay: 5m @@ -422,7 +422,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: ed63bb6942364281bcea80c54b6f8fcc - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} referenced' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},referenced]' delay: 5m @@ -437,7 +437,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: 7ef4530ddf464defb2a64ce674a82c8c - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbychildren' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]' delay: 5m @@ -452,7 +452,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: 3c7f982147be49629c78aa67a1d8d56e - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbydataset' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]' delay: 1h @@ -467,7 +467,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: cc0e02c58b28443eb78eeacc81095966 - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbysnapshots' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]' delay: 5m @@ -482,7 +482,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: a54feffafdb34ba08f1474ab4710088d - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} used' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},used]' delay: 5m diff --git a/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_with_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_with_sudo.conf new file mode 100644 index 000000000..9cd46ecbe --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_with_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/usr/bin/sudo /sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/usr/bin/sudo /sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/usr/bin/sudo /sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/usr/bin/sudo /sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/usr/bin/sudo /sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/usr/bin/sudo /sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/usr/bin/sudo /sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si diff --git a/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_without_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_without_sudo.conf new file mode 100644 index 000000000..561178982 --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/5.4/userparams_zol_without_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf new file mode 100644 index 000000000..9cd46ecbe --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/usr/bin/sudo /sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/usr/bin/sudo /sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/usr/bin/sudo /sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/usr/bin/sudo /sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/usr/bin/sudo /sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/usr/bin/sudo /sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/usr/bin/sudo /sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml index f38113944..98439beb9 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml @@ -35,7 +35,7 @@ zabbix_export: priority: INFO - uuid: 6b5fc935fe194d30badea64eaf3f317f - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_dnode_limit"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_dnode_limit]' history: 30d @@ -49,7 +49,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 0b7d673688e3429d92aa349762729f83 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_meta_limit"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_limit]' history: 30d @@ -63,7 +63,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: b0b5004458494182bf874545f8eb4e41 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "arc_meta_used"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_used]' history: 30d @@ -78,7 +78,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 795ab079ba13461c872ee1d5c0295704 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "bonus_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[bonus_size]' history: 30d @@ -120,7 +120,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 5e12dd98f1644f5a87cc5ded5d2e55d8 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "data_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[data_size]' history: 30d @@ -134,7 +134,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 522a0f33c90047bab4f55b7214f51dea - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "dbuf_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[dbuf_size]' history: 30d @@ -148,7 +148,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: a3d10ebb57984a829f780a229fc9617c - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "dnode_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[dnode_size]' history: 30d @@ -162,7 +162,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 184eef57aa034cf8acaf6a8f0e02395b - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "hdr_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[hdr_size]' history: 30d @@ -176,7 +176,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: cb7bcc02dfc14329a361e194145871c0 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[hits]' history: 30d @@ -194,7 +194,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 8df273b6e0904c9ab140f8f13f6ca973 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "metadata_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[metadata_size]' history: 30d @@ -208,7 +208,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: dcd96743ed984018bff5d16105693606 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mfu_hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_hits]' history: 30d @@ -226,7 +226,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 1015ebe8ef6f4626ae7967bf6358f1b3 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mfu_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_size]' history: 30d @@ -240,7 +240,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 1298a265a6784e63a166b768e1faf67e - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "misses"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[misses]' history: 30d @@ -258,7 +258,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: c85d0e9e1b464748a20148e2f2507609 - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mru_hits"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_hits]' history: 30d @@ -276,7 +276,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 50954c7b43d745d09990011df4d7448c - name: 'ZFS ARC stat "$1"' + name: 'ZFS ARC stat "mru_size"' type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_size]' history: 30d @@ -335,7 +335,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: ebfb742fb123451c9632d12bde0957c4 - name: 'ZFS parameter $1' + name: 'ZFS parameter zfs_arc_dnode_limit_percent' type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_dnode_limit_percent]' delay: 1h @@ -350,7 +350,7 @@ zabbix_export: value: 'ZFS ARC' - uuid: 18d8b817852848929f4e0b421cb21532 - name: 'ZFS parameter $1' + name: 'ZFS parameter zfs_arc_meta_limit_percent' type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_meta_limit_percent]' delay: 1h @@ -386,7 +386,7 @@ zabbix_export: item_prototypes: - uuid: 4d7c96bd10b44754b2c8790b90c12046 - name: 'Zfs dataset $1 compressratio' + name: 'Zfs dataset {#FILESETNAME} compressratio' type: ZABBIX_ACTIVE key: 'zfs.get.compressratio[{#FILESETNAME}]' delay: 30m @@ -407,7 +407,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: e9df401ae71e45c8a3fdbbd146cdd57b - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} available' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},available]' delay: 5m @@ -422,7 +422,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: ed63bb6942364281bcea80c54b6f8fcc - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} referenced' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},referenced]' delay: 5m @@ -437,7 +437,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: 7ef4530ddf464defb2a64ce674a82c8c - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbychildren' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]' delay: 5m @@ -452,7 +452,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: 3c7f982147be49629c78aa67a1d8d56e - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbydataset' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]' delay: 1h @@ -467,7 +467,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: cc0e02c58b28443eb78eeacc81095966 - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset {#FILESETNAME} usedbysnapshots' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]' delay: 5m @@ -482,7 +482,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: a54feffafdb34ba08f1474ab4710088d - name: 'Zfs dataset $1 $2' + name: 'Zfs dataset #FILESETNAME} used' type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},used]' delay: 5m diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf new file mode 100644 index 000000000..561178982 --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf @@ -0,0 +1,41 @@ +# ZFS discovery and configuration +# original template from pbergbolt (source = https://www.zabbix.com/forum/showthread.php?t=43347), modified by Slash + + +# pool discovery +UserParameter=zfs.pool.discovery,/sbin/zpool list -H -o name | sed -e '$ ! s/\(.*\)/{"{#POOLNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#POOLNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# dataset discovery, called "fileset" in the zabbix template for legacy reasons +UserParameter=zfs.fileset.discovery,/sbin/zfs list -H -o name | sed -e '$ ! s/\(.*\)/{"{#FILESETNAME}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#FILESETNAME}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' +# vdev discovery +UserParameter=zfs.vdev.discovery,/sbin/zpool list -Hv | grep '^[[:blank:]]' | egrep -v 'mirror|raidz' | awk '{print $1}' | sed -e '$ ! s/\(.*\)/{"{#VDEV}":"\1"},/' | sed -e '$ s/\(.*\)/{"{#VDEV}":"\1"}]}/' | sed -e '1 s/\(.*\)/{ \"data\":[\1/' + +# pool health +UserParameter=zfs.zpool.health[*],/sbin/zpool list -H -o health $1 + +# get any fs option +UserParameter=zfs.get.fsinfo[*],/sbin/zfs get -o value -Hp $2 $1 + +# compressratio need special treatment because of the "x" at the end of the number +UserParameter=zfs.get.compressratio[*],/sbin/zfs get -o value -Hp compressratio $1 | sed "s/x//" + +# memory used by ZFS: sum of the SPL slab allocator's statistics +# "There are a few things not included in that, like the page cache used by mmap(). But you can expect it to be relatively accurate." +UserParameter=zfs.memory.used,echo $(( `cat /proc/spl/kmem/slab | tail -n +3 | awk '{ print $3 }' | tr "\n" "+" | sed "s/$/0/"` )) + +# get any global zfs parameters +UserParameter=zfs.get.param[*],cat /sys/module/zfs/parameters/$1 + +# ARC stats from /proc/spl/kstat/zfs/arcstats +UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcstats + +# detect if a scrub is in progress, 0 = in progress, 1 = not in progress +UserParameter=zfs.zpool.scrub[*],/sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? + +# vdev state +UserParameter=zfs.vdev.state[*],/sbin/zpool status | grep "$1" | awk '{ print $$2 }' +# vdev READ error counter +UserParameter=zfs.vdev.error_counter.read[*],/sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +# vdev WRITE error counter +UserParameter=zfs.vdev.error_counter.write[*],/sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +# vdev CHECKSUM error counter +UserParameter=zfs.vdev.error_counter.cksum[*],/sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si