diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/README.md b/Operating_Systems/Linux/template_zfs_on_linux/6.0/README.md index 86e897c96..aaeb128e5 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/6.0/README.md +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/README.md @@ -11,6 +11,8 @@ |{$ZPOOL_AVERAGE_ALERT}|

-

|`85`|Text macro| |{$ZPOOL_DISASTER_ALERT}|

-

|`99`|Text macro| |{$ZPOOL_HIGH_ALERT}|

-

|`90`|Text macro| +|{$ZFS_FSNAME_MATCHES}|

Determine datasets to discover

|`/`|Text macro| +|{$ZFS_FSNAME_NOTMATCHES}|

Determine datasets to ignore

|`([a-z-0-9]{64}$\|[a-z-0-9]{64}-init$)`|Text macro| ## Template links @@ -60,7 +62,7 @@ There are no template links in this template. |Zfs dataset $1 $2|

-

|`Zabbix agent (active)`|zfs.get.fsinfo[{#FILESETNAME},usedbydataset]

Update: 1h

LLD

| |Zfs dataset $1 $2|

-

|`Zabbix agent (active)`|zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]

Update: 5m

LLD

| |Zfs dataset $1 $2|

-

|`Zabbix agent (active)`|zfs.get.fsinfo[{#FILESETNAME},used]

Update: 5m

LLD

| -|Zpool {#POOLNAME}: Get iostats|

-

|`Zabbix agent (active)`|vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]

Update: 1m

LLD

| +|Zpool {#POOLNAME}: Get iostats|

-

|`Zabbix agent (active)`|zfs.zpool.iostat[{#POOLNAME}]

Update: 1m

LLD

| |Zpool {#POOLNAME} available|

-

|`Zabbix agent (active)`|zfs.get.fsinfo[{#POOLNAME},available]

Update: 5m

LLD

| |Zpool {#POOLNAME} used|

-

|`Zabbix agent (active)`|zfs.get.fsinfo[{#POOLNAME},used]

Update: 5m

LLD

| |Zpool {#POOLNAME} Health|

-

|`Zabbix agent (active)`|zfs.zpool.health[{#POOLNAME}]

Update: 5m

LLD

| diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml index 98439beb9..984ee7253 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux.yaml @@ -1,7 +1,7 @@ zabbix_export: version: '6.0' - date: '2021-11-21T21:15:34Z' - groups: + date: '2022-11-16T18:44:30Z' + template_groups: - uuid: 7df96b18c230490a9a0a9e2307226338 name: Templates @@ -17,7 +17,6 @@ zabbix_export: - uuid: 4ecabdcb2104460f83c2ad5f18fd98f9 name: 'ZFS on Linux version' - type: ZABBIX_ACTIVE key: 'vfs.file.contents[/sys/module/zfs/version]' delay: 1h history: 30d @@ -36,7 +35,6 @@ zabbix_export: - uuid: 6b5fc935fe194d30badea64eaf3f317f name: 'ZFS ARC stat "arc_dnode_limit"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_dnode_limit]' history: 30d units: B @@ -50,7 +48,6 @@ zabbix_export: - uuid: 0b7d673688e3429d92aa349762729f83 name: 'ZFS ARC stat "arc_meta_limit"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_limit]' history: 30d units: B @@ -64,7 +61,6 @@ zabbix_export: - uuid: b0b5004458494182bf874545f8eb4e41 name: 'ZFS ARC stat "arc_meta_used"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[arc_meta_used]' history: 30d units: B @@ -79,7 +75,6 @@ zabbix_export: - uuid: 795ab079ba13461c872ee1d5c0295704 name: 'ZFS ARC stat "bonus_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[bonus_size]' history: 30d units: B @@ -93,7 +88,6 @@ zabbix_export: - uuid: 34a1fb79b2b64ce08ec5b377211372d7 name: 'ZFS ARC max size' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[c_max]' history: 30d units: B @@ -107,7 +101,6 @@ zabbix_export: - uuid: d60b8e4f7a3d4bea972e7fe04c3bb5ca name: 'ZFS ARC minimum size' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[c_min]' history: 30d units: B @@ -121,7 +114,6 @@ zabbix_export: - uuid: 5e12dd98f1644f5a87cc5ded5d2e55d8 name: 'ZFS ARC stat "data_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[data_size]' history: 30d units: B @@ -135,7 +127,6 @@ zabbix_export: - uuid: 522a0f33c90047bab4f55b7214f51dea name: 'ZFS ARC stat "dbuf_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[dbuf_size]' history: 30d units: B @@ -149,7 +140,6 @@ zabbix_export: - uuid: a3d10ebb57984a829f780a229fc9617c name: 'ZFS ARC stat "dnode_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[dnode_size]' history: 30d units: B @@ -163,7 +153,6 @@ zabbix_export: - uuid: 184eef57aa034cf8acaf6a8f0e02395b name: 'ZFS ARC stat "hdr_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[hdr_size]' history: 30d units: B @@ -177,7 +166,6 @@ zabbix_export: - uuid: cb7bcc02dfc14329a361e194145871c0 name: 'ZFS ARC stat "hits"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[hits]' history: 30d preprocessing: @@ -195,7 +183,6 @@ zabbix_export: - uuid: 8df273b6e0904c9ab140f8f13f6ca973 name: 'ZFS ARC stat "metadata_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[metadata_size]' history: 30d units: B @@ -209,7 +196,6 @@ zabbix_export: - uuid: dcd96743ed984018bff5d16105693606 name: 'ZFS ARC stat "mfu_hits"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_hits]' history: 30d preprocessing: @@ -227,7 +213,6 @@ zabbix_export: - uuid: 1015ebe8ef6f4626ae7967bf6358f1b3 name: 'ZFS ARC stat "mfu_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[mfu_size]' history: 30d units: B @@ -241,7 +226,6 @@ zabbix_export: - uuid: 1298a265a6784e63a166b768e1faf67e name: 'ZFS ARC stat "misses"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[misses]' history: 30d preprocessing: @@ -259,7 +243,6 @@ zabbix_export: - uuid: c85d0e9e1b464748a20148e2f2507609 name: 'ZFS ARC stat "mru_hits"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_hits]' history: 30d preprocessing: @@ -277,7 +260,6 @@ zabbix_export: - uuid: 50954c7b43d745d09990011df4d7448c name: 'ZFS ARC stat "mru_size"' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[mru_size]' history: 30d units: B @@ -291,7 +273,6 @@ zabbix_export: - uuid: cd225da5a02346a58dbe0c9808a628eb name: 'ZFS ARC current size' - type: ZABBIX_ACTIVE key: 'zfs.arcstats[size]' history: 30d units: B @@ -336,7 +317,6 @@ zabbix_export: - uuid: ebfb742fb123451c9632d12bde0957c4 name: 'ZFS parameter zfs_arc_dnode_limit_percent' - type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_dnode_limit_percent]' delay: 1h history: 30d @@ -351,7 +331,6 @@ zabbix_export: - uuid: 18d8b817852848929f4e0b421cb21532 name: 'ZFS parameter zfs_arc_meta_limit_percent' - type: ZABBIX_ACTIVE key: 'zfs.get.param[zfs_arc_meta_limit_percent]' delay: 1h history: 30d @@ -367,7 +346,6 @@ zabbix_export: - uuid: a82a1b7067904fecb06bcf5b88457192 name: 'Zfs Dataset discovery' - type: ZABBIX_ACTIVE key: zfs.fileset.discovery delay: 30m filter: @@ -375,11 +353,12 @@ zabbix_export: conditions: - macro: '{#FILESETNAME}' - value: '@ZFS fileset' + value: '{$ZFS_FSNAME_MATCHES}' formulaid: A - macro: '{#FILESETNAME}' - value: '@not docker ZFS dataset' + value: '{$ZFS_FSNAME_NOTMATCHES}' + operator: NOT_MATCHES_REGEX formulaid: B lifetime: 2d description: 'Discover ZFS dataset. Dataset names must contain a "/" else it''s a zpool.' @@ -387,7 +366,6 @@ zabbix_export: - uuid: 4d7c96bd10b44754b2c8790b90c12046 name: 'Zfs dataset {#FILESETNAME} compressratio' - type: ZABBIX_ACTIVE key: 'zfs.get.compressratio[{#FILESETNAME}]' delay: 30m history: 30d @@ -408,7 +386,6 @@ zabbix_export: - uuid: e9df401ae71e45c8a3fdbbd146cdd57b name: 'Zfs dataset {#FILESETNAME} available' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},available]' delay: 5m history: 30d @@ -423,7 +400,6 @@ zabbix_export: - uuid: ed63bb6942364281bcea80c54b6f8fcc name: 'Zfs dataset {#FILESETNAME} referenced' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},referenced]' delay: 5m history: 30d @@ -438,7 +414,6 @@ zabbix_export: - uuid: 7ef4530ddf464defb2a64ce674a82c8c name: 'Zfs dataset {#FILESETNAME} usedbychildren' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]' delay: 5m history: 30d @@ -453,7 +428,6 @@ zabbix_export: - uuid: 3c7f982147be49629c78aa67a1d8d56e name: 'Zfs dataset {#FILESETNAME} usedbydataset' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]' delay: 1h history: 30d @@ -468,7 +442,6 @@ zabbix_export: - uuid: cc0e02c58b28443eb78eeacc81095966 name: 'Zfs dataset {#FILESETNAME} usedbysnapshots' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]' delay: 5m history: 30d @@ -482,8 +455,7 @@ zabbix_export: value: 'ZFS dataset' - uuid: a54feffafdb34ba08f1474ab4710088d - name: 'Zfs dataset #FILESETNAME} used' - type: ZABBIX_ACTIVE + name: 'Zfs dataset {#FILESETNAME} used' key: 'zfs.get.fsinfo[{#FILESETNAME},used]' delay: 5m history: 30d @@ -553,36 +525,13 @@ zabbix_export: - uuid: 08039e570bd7417294d043f4f7bf960f name: 'Zfs Pool discovery' - type: ZABBIX_ACTIVE key: zfs.pool.discovery delay: 1h lifetime: 3d item_prototypes: - - - uuid: 7142e6f1eceb4dc29e3a03d494230564 - name: 'Zpool {#POOLNAME}: Get iostats' - type: ZABBIX_ACTIVE - key: 'vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]' - history: '0' - trends: '0' - value_type: TEXT - preprocessing: - - - type: REGEX - parameters: - - '([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+)\s+([0-9]+).*$' - - '["\1", "\2", "\3", "\4"]' - tags: - - - tag: Application - value: ZFS - - - tag: Application - value: 'ZFS zpool' - uuid: 9f889e9529934fdfbf47a29de32468f0 name: 'Zpool {#POOLNAME} available' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#POOLNAME},available]' delay: 5m history: 30d @@ -597,7 +546,6 @@ zabbix_export: - uuid: 1993c04b00bc428bbdf43c909967afd2 name: 'Zpool {#POOLNAME} used' - type: ZABBIX_ACTIVE key: 'zfs.get.fsinfo[{#POOLNAME},used]' delay: 5m history: 30d @@ -612,7 +560,6 @@ zabbix_export: - uuid: 472e21c79759476984cbf4ce9f12580a name: 'Zpool {#POOLNAME} Health' - type: ZABBIX_ACTIVE key: 'zfs.zpool.health[{#POOLNAME}]' delay: 5m history: 30d @@ -642,15 +589,15 @@ zabbix_export: units: Bps preprocessing: - - type: JSONPATH - parameters: - - '$[0]' - - - type: CHANGE_PER_SECOND + type: JAVASCRIPT parameters: - - '' + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[5]; master_item: - key: 'vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]' + key: 'zfs.zpool.iostat[{#POOLNAME}]' tags: - tag: Application @@ -669,15 +616,15 @@ zabbix_export: units: Bps preprocessing: - - type: JSONPATH + type: JAVASCRIPT parameters: - - '$[1]' - - - type: CHANGE_PER_SECOND - parameters: - - '' + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[6]; master_item: - key: 'vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]' + key: 'zfs.zpool.iostat[{#POOLNAME}]' tags: - tag: Application @@ -696,15 +643,15 @@ zabbix_export: units: iops preprocessing: - - type: JSONPATH + type: JAVASCRIPT parameters: - - '$[2]' - - - type: CHANGE_PER_SECOND - parameters: - - '' + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[3]; master_item: - key: 'vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]' + key: 'zfs.zpool.iostat[{#POOLNAME}]' tags: - tag: Application @@ -723,15 +670,29 @@ zabbix_export: units: iops preprocessing: - - type: JSONPATH - parameters: - - '$[3]' - - - type: CHANGE_PER_SECOND + type: JAVASCRIPT parameters: - - '' + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[4]; master_item: - key: 'vfs.file.contents[/proc/spl/kstat/zfs/{#POOLNAME}/io]' + key: 'zfs.zpool.iostat[{#POOLNAME}]' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 7142e6f1eceb4dc29e3a03d494230564 + name: 'Zpool {#POOLNAME}: Get iostats' + key: 'zfs.zpool.iostat[{#POOLNAME}]' + history: '0' + trends: '0' + value_type: TEXT tags: - tag: Application @@ -742,7 +703,6 @@ zabbix_export: - uuid: 867075d6eb1743069be868007472192b name: 'Zpool {#POOLNAME} scrub status' - type: ZABBIX_ACTIVE key: 'zfs.zpool.scrub[{#POOLNAME}]' delay: 5m history: 30d @@ -854,7 +814,6 @@ zabbix_export: - uuid: 6c96e092f08f4b98af9a377782180689 name: 'Zfs vdev discovery' - type: ZABBIX_ACTIVE key: zfs.vdev.discovery delay: 1h lifetime: 3d @@ -862,7 +821,6 @@ zabbix_export: - uuid: 9f63161726774a28905c87aac92cf1e9 name: 'vdev {#VDEV}: CHECKSUM error counter' - type: ZABBIX_ACTIVE key: 'zfs.vdev.error_counter.cksum[{#VDEV}]' delay: 5m history: 30d @@ -882,7 +840,6 @@ zabbix_export: - uuid: 48a02eb060fd4b73bdde08a2795c4717 name: 'vdev {#VDEV}: READ error counter' - type: ZABBIX_ACTIVE key: 'zfs.vdev.error_counter.read[{#VDEV}]' delay: 5m history: 30d @@ -902,7 +859,6 @@ zabbix_export: - uuid: 15953ba38fde4b8c8681955a27d9204a name: 'vdev {#VDEV}: WRITE error counter' - type: ZABBIX_ACTIVE key: 'zfs.vdev.error_counter.write[{#VDEV}]' delay: 5m history: 30d @@ -987,6 +943,14 @@ zabbix_export: - macro: '{$ZFS_DISASTER_ALERT}' value: '99' + - + macro: '{$ZFS_FSNAME_MATCHES}' + value: / + description: 'Use this to determine the datasets to autodiscover defaults to all datasets with a ''/'' in the name' + - + macro: '{$ZFS_FSNAME_NOTMATCHES}' + value: '([a-z-0-9]{64}$|[a-z-0-9]{64}-init$)' + description: 'Use this to determine the datasets to not autodiscover. Ignore docker created datasets by default' - macro: '{$ZFS_HIGH_ALERT}' value: '95' @@ -1019,8 +983,8 @@ zabbix_export: type: GRAPH name: graphid value: - name: 'ZFS ARC memory usage' host: 'ZFS on Linux' + name: 'ZFS ARC memory usage' - type: GRAPH_CLASSIC 'y': '5' @@ -1035,8 +999,8 @@ zabbix_export: type: GRAPH name: graphid value: - name: 'ZFS ARC Cache Hit Ratio' host: 'ZFS on Linux' + name: 'ZFS ARC Cache Hit Ratio' - type: GRAPH_CLASSIC 'y': '10' @@ -1051,8 +1015,8 @@ zabbix_export: type: GRAPH name: graphid value: - name: 'ZFS ARC breakdown' host: 'ZFS on Linux' + name: 'ZFS ARC breakdown' - type: GRAPH_CLASSIC 'y': '15' @@ -1067,8 +1031,8 @@ zabbix_export: type: GRAPH name: graphid value: - name: 'ZFS ARC arc_meta_used breakdown' host: 'ZFS on Linux' + name: 'ZFS ARC arc_meta_used breakdown' - uuid: 442dda5c36c04fc78c3a73eacf26bc7f name: 'ZFS zpools' @@ -1096,8 +1060,8 @@ zabbix_export: type: GRAPH_PROTOTYPE name: graphid value: - name: 'ZFS zpool {#POOLNAME} IOPS' host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} IOPS' - type: GRAPH_PROTOTYPE x: '8' @@ -1120,8 +1084,8 @@ zabbix_export: type: GRAPH_PROTOTYPE name: graphid value: - name: 'ZFS zpool {#POOLNAME} throughput' host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} throughput' - type: GRAPH_PROTOTYPE x: '16' @@ -1144,8 +1108,8 @@ zabbix_export: type: GRAPH_PROTOTYPE name: graphid value: - name: 'ZFS zpool {#POOLNAME} space usage' host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} space usage' valuemaps: - uuid: d1d7b0898d06481dbcec8b02d915fb1c diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux_active.yaml b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux_active.yaml new file mode 100644 index 000000000..134964fe4 --- /dev/null +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/template_zfs_on_linux_active.yaml @@ -0,0 +1,1289 @@ +zabbix_export: + version: '6.2' + date: '2022-11-16T19:00:14Z' + template_groups: + - + uuid: 7df96b18c230490a9a0a9e2307226338 + name: Templates + templates: + - + uuid: 47d3c2ff933947368d4bee4b1184d69b + template: 'ZFS on Linux' + name: 'ZFS on Linux Active' + groups: + - + name: Templates + items: + - + uuid: 4ecabdcb2104460f83c2ad5f18fd98f9 + name: 'ZFS on Linux version' + type: ZABBIX_ACTIVE + key: 'vfs.file.contents[/sys/module/zfs/version]' + delay: 1h + history: 30d + trends: '0' + value_type: TEXT + tags: + - + tag: Application + value: ZFS + triggers: + - + uuid: 879881de9f8b4b97b5270df192d86850 + expression: '(last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#1)<>last(/ZFS on Linux/vfs.file.contents[/sys/module/zfs/version],#2))>0' + name: 'Version of ZoL is now {ITEM.VALUE} on {HOST.NAME}' + priority: INFO + - + uuid: 6b5fc935fe194d30badea64eaf3f317f + name: 'ZFS ARC stat "arc_dnode_limit"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[arc_dnode_limit]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 0b7d673688e3429d92aa349762729f83 + name: 'ZFS ARC stat "arc_meta_limit"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[arc_meta_limit]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: b0b5004458494182bf874545f8eb4e41 + name: 'ZFS ARC stat "arc_meta_used"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[arc_meta_used]' + history: 30d + units: B + description: 'arc_meta_used = hdr_size + metadata_size + dbuf_size + dnode_size + bonus_size' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 795ab079ba13461c872ee1d5c0295704 + name: 'ZFS ARC stat "bonus_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[bonus_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 34a1fb79b2b64ce08ec5b377211372d7 + name: 'ZFS ARC max size' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[c_max]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: d60b8e4f7a3d4bea972e7fe04c3bb5ca + name: 'ZFS ARC minimum size' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[c_min]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 5e12dd98f1644f5a87cc5ded5d2e55d8 + name: 'ZFS ARC stat "data_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[data_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 522a0f33c90047bab4f55b7214f51dea + name: 'ZFS ARC stat "dbuf_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[dbuf_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: a3d10ebb57984a829f780a229fc9617c + name: 'ZFS ARC stat "dnode_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[dnode_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 184eef57aa034cf8acaf6a8f0e02395b + name: 'ZFS ARC stat "hdr_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[hdr_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: cb7bcc02dfc14329a361e194145871c0 + name: 'ZFS ARC stat "hits"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[hits]' + history: 30d + preprocessing: + - + type: CHANGE_PER_SECOND + parameters: + - '' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 8df273b6e0904c9ab140f8f13f6ca973 + name: 'ZFS ARC stat "metadata_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[metadata_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: dcd96743ed984018bff5d16105693606 + name: 'ZFS ARC stat "mfu_hits"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[mfu_hits]' + history: 30d + preprocessing: + - + type: CHANGE_PER_SECOND + parameters: + - '' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 1015ebe8ef6f4626ae7967bf6358f1b3 + name: 'ZFS ARC stat "mfu_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[mfu_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 1298a265a6784e63a166b768e1faf67e + name: 'ZFS ARC stat "misses"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[misses]' + history: 30d + preprocessing: + - + type: CHANGE_PER_SECOND + parameters: + - '' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: c85d0e9e1b464748a20148e2f2507609 + name: 'ZFS ARC stat "mru_hits"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[mru_hits]' + history: 30d + preprocessing: + - + type: CHANGE_PER_SECOND + parameters: + - '' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 50954c7b43d745d09990011df4d7448c + name: 'ZFS ARC stat "mru_size"' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[mru_size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: cd225da5a02346a58dbe0c9808a628eb + name: 'ZFS ARC current size' + type: ZABBIX_ACTIVE + key: 'zfs.arcstats[size]' + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 8c8129f814fe47ae9c71e636599acd90 + name: 'ZFS ARC Cache Hit Ratio' + type: CALCULATED + key: zfs.arcstats_hit_ratio + history: 30d + value_type: FLOAT + units: '%' + params: '100*(last(//zfs.arcstats[hits])/(last(//zfs.arcstats[hits])+count(//zfs.arcstats[hits],#1,,"0")+last(//zfs.arcstats[misses])))' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: e644390a9c9743f2844dbc9ef8806a8f + name: 'ZFS ARC total read' + type: CALCULATED + key: zfs.arcstats_total_read + history: 30d + units: B + params: 'last(//zfs.arcstats[hits])+last(//zfs.arcstats[misses])' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: ebfb742fb123451c9632d12bde0957c4 + name: 'ZFS parameter zfs_arc_dnode_limit_percent' + type: ZABBIX_ACTIVE + key: 'zfs.get.param[zfs_arc_dnode_limit_percent]' + delay: 1h + history: 30d + units: '%' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + - + uuid: 18d8b817852848929f4e0b421cb21532 + name: 'ZFS parameter zfs_arc_meta_limit_percent' + type: ZABBIX_ACTIVE + key: 'zfs.get.param[zfs_arc_meta_limit_percent]' + delay: 1h + history: 30d + units: '%' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS ARC' + discovery_rules: + - + uuid: a82a1b7067904fecb06bcf5b88457192 + name: 'Zfs Dataset discovery' + type: ZABBIX_ACTIVE + key: zfs.fileset.discovery + delay: 30m + filter: + evaltype: AND + conditions: + - + macro: '{#FILESETNAME}' + value: '{$ZFS_FSNAME_MATCHES}' + formulaid: A + - + macro: '{#FILESETNAME}' + value: '{$ZFS_FSNAME_NOTMATCHES}' + operator: NOT_MATCHES_REGEX + formulaid: B + lifetime: 2d + description: 'Discover ZFS dataset. Dataset names must contain a "/" else it''s a zpool.' + item_prototypes: + - + uuid: 4d7c96bd10b44754b2c8790b90c12046 + name: 'Zfs dataset {#FILESETNAME} compressratio' + type: ZABBIX_ACTIVE + key: 'zfs.get.compressratio[{#FILESETNAME}]' + delay: 30m + history: 30d + value_type: FLOAT + units: '%' + preprocessing: + - + type: MULTIPLIER + parameters: + - '100' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: e9df401ae71e45c8a3fdbbd146cdd57b + name: 'Zfs dataset {#FILESETNAME} available' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},available]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: ed63bb6942364281bcea80c54b6f8fcc + name: 'Zfs dataset {#FILESETNAME} referenced' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},referenced]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: 7ef4530ddf464defb2a64ce674a82c8c + name: 'Zfs dataset {#FILESETNAME} usedbychildren' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: 3c7f982147be49629c78aa67a1d8d56e + name: 'Zfs dataset {#FILESETNAME} usedbydataset' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]' + delay: 1h + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: cc0e02c58b28443eb78eeacc81095966 + name: 'Zfs dataset {#FILESETNAME} usedbysnapshots' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + - + uuid: a54feffafdb34ba08f1474ab4710088d + name: 'Zfs dataset {#FILESETNAME} used' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#FILESETNAME},used]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS dataset' + trigger_prototypes: + - + uuid: cc0b0756d2fe42779b62adf63e38681d + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_AVERAGE_ALERT}/100)' + name: 'More than {$ZFS_AVERAGE_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}' + priority: AVERAGE + dependencies: + - + name: 'More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}' + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)' + - + uuid: 8bfb157ac42845c0b340e28ae510833c + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)' + name: 'More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}' + priority: DISASTER + - + uuid: 9b592a2cba084bec9ceb4f82367e758b + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_HIGH_ALERT}/100)' + name: 'More than {$ZFS_HIGH_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}' + priority: HIGH + dependencies: + - + name: 'More than {$ZFS_DISASTER_ALERT}% used on dataset {#FILESETNAME} on {HOST.NAME}' + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#FILESETNAME},used]) ) ) > ({$ZFS_DISASTER_ALERT}/100)' + graph_prototypes: + - + uuid: 5213684719404718b8956d6faf0e6b71 + name: 'ZFS dataset {#FILESETNAME} usage' + type: STACKED + ymin_type_1: FIXED + graph_items: + - + sortorder: '1' + color: 3333FF + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbydataset]' + - + sortorder: '2' + color: FF33FF + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbysnapshots]' + - + sortorder: '3' + color: FF3333 + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#FILESETNAME},usedbychildren]' + - + sortorder: '4' + color: 33FF33 + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#FILESETNAME},available]' + - + uuid: 08039e570bd7417294d043f4f7bf960f + name: 'Zfs Pool discovery' + type: ZABBIX_ACTIVE + key: zfs.pool.discovery + delay: 1h + lifetime: 3d + item_prototypes: + - + uuid: 9f889e9529934fdfbf47a29de32468f0 + name: 'Zpool {#POOLNAME} available' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#POOLNAME},available]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 1993c04b00bc428bbdf43c909967afd2 + name: 'Zpool {#POOLNAME} used' + type: ZABBIX_ACTIVE + key: 'zfs.get.fsinfo[{#POOLNAME},used]' + delay: 5m + history: 30d + units: B + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 472e21c79759476984cbf4ce9f12580a + name: 'Zpool {#POOLNAME} Health' + type: ZABBIX_ACTIVE + key: 'zfs.zpool.health[{#POOLNAME}]' + delay: 5m + history: 30d + trends: '0' + value_type: TEXT + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + trigger_prototypes: + - + uuid: 4855fe0ed61b444daad73aa6090b46af + expression: 'find(/ZFS on Linux/zfs.zpool.health[{#POOLNAME}],,"like","ONLINE")=0' + name: 'Zpool {#POOLNAME} is {ITEM.VALUE} on {HOST.NAME}' + priority: HIGH + - + uuid: 3207e6ffd0fa40c4a1d6e607e4e12375 + name: 'Zpool {#POOLNAME} read throughput' + type: DEPENDENT + key: 'zfs.zpool.iostat.nread[{#POOLNAME}]' + delay: '0' + history: 30d + value_type: FLOAT + units: Bps + preprocessing: + - + type: JAVASCRIPT + parameters: + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[5]; + master_item: + key: 'zfs.zpool.iostat[{#POOLNAME}]' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 78b418605f9b45b29bbd33b93a6b2e82 + name: 'Zpool {#POOLNAME} write throughput' + type: DEPENDENT + key: 'zfs.zpool.iostat.nwritten[{#POOLNAME}]' + delay: '0' + history: 30d + value_type: FLOAT + units: Bps + preprocessing: + - + type: JAVASCRIPT + parameters: + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[6]; + master_item: + key: 'zfs.zpool.iostat[{#POOLNAME}]' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 6b35bf06bf4542318a7999ac4d7952f7 + name: 'Zpool {#POOLNAME} IOPS: reads' + type: DEPENDENT + key: 'zfs.zpool.iostat.reads[{#POOLNAME}]' + delay: '0' + history: 30d + value_type: FLOAT + units: iops + preprocessing: + - + type: JAVASCRIPT + parameters: + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[3]; + master_item: + key: 'zfs.zpool.iostat[{#POOLNAME}]' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: b99d5ab922324536bc2e013ac1fca306 + name: 'Zpool {#POOLNAME} IOPS: writes' + type: DEPENDENT + key: 'zfs.zpool.iostat.writes[{#POOLNAME}]' + delay: '0' + history: 30d + value_type: FLOAT + units: iops + preprocessing: + - + type: JAVASCRIPT + parameters: + - | + 'use strict'; + var text = value; + const myArray = text.split(" "); + return myArray[4]; + master_item: + key: 'zfs.zpool.iostat[{#POOLNAME}]' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 7142e6f1eceb4dc29e3a03d494230564 + name: 'Zpool {#POOLNAME}: Get iostats' + type: ZABBIX_ACTIVE + key: 'zfs.zpool.iostat[{#POOLNAME}]' + history: '0' + trends: '0' + value_type: TEXT + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + - + uuid: 867075d6eb1743069be868007472192b + name: 'Zpool {#POOLNAME} scrub status' + type: ZABBIX_ACTIVE + key: 'zfs.zpool.scrub[{#POOLNAME}]' + delay: 5m + history: 30d + description: | + Detect if the pool is currently scrubbing itself. + + This is not a bad thing itself, but it slows down the entire pool and should be terminated when on production server during business hours if it causes a noticeable slowdown. + valuemap: + name: 'ZFS zpool scrub status' + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS zpool' + trigger_prototypes: + - + uuid: 792be07c555c4ae6a9819d69d332357b + expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],12h)=0' + name: 'Zpool {#POOLNAME} is scrubbing for more than 12h on {HOST.NAME}' + priority: AVERAGE + dependencies: + - + name: 'Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}' + expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0' + - + uuid: 04cac9633f164227b1f9b2fe26923609 + expression: 'max(/ZFS on Linux/zfs.zpool.scrub[{#POOLNAME}],24h)=0' + name: 'Zpool {#POOLNAME} is scrubbing for more than 24h on {HOST.NAME}' + priority: HIGH + trigger_prototypes: + - + uuid: 82fce07b30114c7e8645689317e2c1b4 + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_AVERAGE_ALERT}/100)' + name: 'More than {$ZPOOL_AVERAGE_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}' + priority: AVERAGE + dependencies: + - + name: 'More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}' + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)' + - + uuid: ab56a2a8eb3d4b4294707e2a8aa94e22 + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)' + name: 'More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}' + priority: DISASTER + - + uuid: c9c22e6617af4ad09970d2988c4a7fe7 + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_HIGH_ALERT}/100)' + name: 'More than {$ZPOOL_HIGH_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}' + priority: HIGH + dependencies: + - + name: 'More than {$ZPOOL_DISASTER_ALERT}% used on zpool {#POOLNAME} on {HOST.NAME}' + expression: '( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) / ( last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},available]) + last(/ZFS on Linux/zfs.get.fsinfo[{#POOLNAME},used]) ) ) > ({$ZPOOL_DISASTER_ALERT}/100)' + graph_prototypes: + - + uuid: 926abae3e18144f0899711fdfd16e808 + name: 'ZFS zpool {#POOLNAME} IOPS' + ymin_type_1: FIXED + graph_items: + - + sortorder: '1' + color: 5C6BC0 + item: + host: 'ZFS on Linux' + key: 'zfs.zpool.iostat.reads[{#POOLNAME}]' + - + sortorder: '2' + color: EF5350 + item: + host: 'ZFS on Linux' + key: 'zfs.zpool.iostat.writes[{#POOLNAME}]' + - + uuid: 63ae2d7acd4d4d15b4c5e7a5a90a063a + name: 'ZFS zpool {#POOLNAME} space usage' + type: STACKED + graph_items: + - + sortorder: '1' + color: 00EE00 + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#POOLNAME},available]' + - + sortorder: '2' + color: EE0000 + item: + host: 'ZFS on Linux' + key: 'zfs.get.fsinfo[{#POOLNAME},used]' + - + uuid: aa35d164bacd45c5983fd2856781da88 + name: 'ZFS zpool {#POOLNAME} throughput' + ymin_type_1: FIXED + graph_items: + - + sortorder: '1' + color: 5C6BC0 + item: + host: 'ZFS on Linux' + key: 'zfs.zpool.iostat.nread[{#POOLNAME}]' + - + sortorder: '2' + drawtype: BOLD_LINE + color: EF5350 + item: + host: 'ZFS on Linux' + key: 'zfs.zpool.iostat.nwritten[{#POOLNAME}]' + - + uuid: 6c96e092f08f4b98af9a377782180689 + name: 'Zfs vdev discovery' + type: ZABBIX_ACTIVE + key: zfs.vdev.discovery + delay: 1h + lifetime: 3d + item_prototypes: + - + uuid: 9f63161726774a28905c87aac92cf1e9 + name: 'vdev {#VDEV}: CHECKSUM error counter' + type: ZABBIX_ACTIVE + key: 'zfs.vdev.error_counter.cksum[{#VDEV}]' + delay: 5m + history: 30d + description: | + This device has experienced an unrecoverable error. Determine if the device needs to be replaced. + + If yes, use 'zpool replace' to replace the device. + + If not, clear the error with 'zpool clear'. + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS vdev' + - + uuid: 48a02eb060fd4b73bdde08a2795c4717 + name: 'vdev {#VDEV}: READ error counter' + type: ZABBIX_ACTIVE + key: 'zfs.vdev.error_counter.read[{#VDEV}]' + delay: 5m + history: 30d + description: | + This device has experienced an unrecoverable error. Determine if the device needs to be replaced. + + If yes, use 'zpool replace' to replace the device. + + If not, clear the error with 'zpool clear'. + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS vdev' + - + uuid: 15953ba38fde4b8c8681955a27d9204a + name: 'vdev {#VDEV}: WRITE error counter' + type: ZABBIX_ACTIVE + key: 'zfs.vdev.error_counter.write[{#VDEV}]' + delay: 5m + history: 30d + description: | + This device has experienced an unrecoverable error. Determine if the device needs to be replaced. + + If yes, use 'zpool replace' to replace the device. + + If not, clear the error with 'zpool clear'. + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS vdev' + - + uuid: 3e64a59d2a154a89a3bc43483942302d + name: 'vdev {#VDEV}: total number of errors' + type: CALCULATED + key: 'zfs.vdev.error_total[{#VDEV}]' + delay: 5m + history: 30d + params: 'last(//zfs.vdev.error_counter.cksum[{#VDEV}])+last(//zfs.vdev.error_counter.read[{#VDEV}])+last(//zfs.vdev.error_counter.write[{#VDEV}])' + description: | + This device has experienced an unrecoverable error. Determine if the device needs to be replaced. + + If yes, use 'zpool replace' to replace the device. + + If not, clear the error with 'zpool clear'. + tags: + - + tag: Application + value: ZFS + - + tag: Application + value: 'ZFS vdev' + trigger_prototypes: + - + uuid: 44f7667c275d4a04891bc4f1d00e668b + expression: 'last(/ZFS on Linux/zfs.vdev.error_total[{#VDEV}])>0' + name: 'vdev {#VDEV} has encountered {ITEM.VALUE} errors on {HOST.NAME}' + priority: HIGH + description: | + This device has experienced an unrecoverable error. Determine if the device needs to be replaced. + + If yes, use 'zpool replace' to replace the device. + + If not, clear the error with 'zpool clear'. + + You may also run a zpool scrub to check if some other undetected errors are present on this vdev. + graph_prototypes: + - + uuid: ab78dba991ba4311a04740fc69b30381 + name: 'ZFS vdev {#VDEV} errors' + ymin_type_1: FIXED + graph_items: + - + color: CC00CC + item: + host: 'ZFS on Linux' + key: 'zfs.vdev.error_counter.cksum[{#VDEV}]' + - + sortorder: '1' + color: F63100 + item: + host: 'ZFS on Linux' + key: 'zfs.vdev.error_counter.read[{#VDEV}]' + - + sortorder: '2' + color: BBBB00 + item: + host: 'ZFS on Linux' + key: 'zfs.vdev.error_counter.write[{#VDEV}]' + macros: + - + macro: '{$ZFS_ARC_META_ALERT}' + value: '90' + - + macro: '{$ZFS_AVERAGE_ALERT}' + value: '90' + - + macro: '{$ZFS_DISASTER_ALERT}' + value: '99' + - + macro: '{$ZFS_FSNAME_MATCHES}' + value: / + description: 'Use this to determine the datasets to autodiscover defaults to all datasets with a ''/'' in the name' + - + macro: '{$ZFS_FSNAME_NOTMATCHES}' + value: '([a-z-0-9]{64}$|[a-z-0-9]{64}-init$)' + description: 'Use this to determine the datasets to not autodiscover. Ignore docker created datasets by default' + - + macro: '{$ZFS_HIGH_ALERT}' + value: '95' + - + macro: '{$ZPOOL_AVERAGE_ALERT}' + value: '85' + - + macro: '{$ZPOOL_DISASTER_ALERT}' + value: '99' + - + macro: '{$ZPOOL_HIGH_ALERT}' + value: '90' + dashboards: + - + uuid: 180e8c0dc05946e4b8552e3a01df347f + name: 'ZFS ARC' + pages: + - + widgets: + - + type: GRAPH_CLASSIC + width: '24' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '0' + - + type: GRAPH + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS ARC memory usage' + - + type: GRAPH_CLASSIC + 'y': '5' + width: '24' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '0' + - + type: GRAPH + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS ARC Cache Hit Ratio' + - + type: GRAPH_CLASSIC + 'y': '10' + width: '24' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '0' + - + type: GRAPH + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS ARC breakdown' + - + type: GRAPH_CLASSIC + 'y': '15' + width: '24' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '0' + - + type: GRAPH + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS ARC arc_meta_used breakdown' + - + uuid: 442dda5c36c04fc78c3a73eacf26bc7f + name: 'ZFS zpools' + pages: + - + widgets: + - + type: GRAPH_PROTOTYPE + width: '8' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '2' + - + type: INTEGER + name: columns + value: '1' + - + type: INTEGER + name: rows + value: '1' + - + type: GRAPH_PROTOTYPE + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} IOPS' + - + type: GRAPH_PROTOTYPE + x: '8' + width: '8' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '2' + - + type: INTEGER + name: columns + value: '1' + - + type: INTEGER + name: rows + value: '1' + - + type: GRAPH_PROTOTYPE + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} throughput' + - + type: GRAPH_PROTOTYPE + x: '16' + width: '8' + height: '5' + fields: + - + type: INTEGER + name: source_type + value: '2' + - + type: INTEGER + name: columns + value: '1' + - + type: INTEGER + name: rows + value: '1' + - + type: GRAPH_PROTOTYPE + name: graphid + value: + host: 'ZFS on Linux' + name: 'ZFS zpool {#POOLNAME} space usage' + valuemaps: + - + uuid: d1d7b0898d06481dbcec8b02d915fb1c + name: 'ZFS zpool scrub status' + mappings: + - + value: '0' + newvalue: 'Scrub in progress' + - + value: '1' + newvalue: 'No scrub in progress' + triggers: + - + uuid: 1daac44b853b4b6da767c9c3af96b774 + expression: 'last(/ZFS on Linux/zfs.arcstats[dnode_size])>(last(/ZFS on Linux/zfs.arcstats[arc_dnode_limit])*0.9)' + name: 'ZFS ARC dnode size > 90% dnode max size on {HOST.NAME}' + priority: HIGH + - + uuid: 69c18b7ceb3d4da2bda0e05f9a12453f + expression: 'last(/ZFS on Linux/zfs.arcstats[arc_meta_used])>(last(/ZFS on Linux/zfs.arcstats[arc_meta_limit])*0.01*{$ZFS_ARC_META_ALERT})' + name: 'ZFS ARC meta size > {$ZFS_ARC_META_ALERT}% meta max size on {HOST.NAME}' + priority: HIGH + graphs: + - + uuid: 1510111dc5414e6d80a5230ce6a81f1d + name: 'ZFS ARC arc_meta_used breakdown' + type: STACKED + ymin_type_1: FIXED + graph_items: + - + color: 3333FF + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[metadata_size]' + - + sortorder: '1' + color: 00EE00 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[dnode_size]' + - + sortorder: '2' + color: EE0000 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[hdr_size]' + - + sortorder: '3' + color: EEEE00 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[dbuf_size]' + - + sortorder: '4' + color: EE00EE + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[bonus_size]' + - + uuid: 203eeeaadc9444ccbbc31cf043e836cb + name: 'ZFS ARC breakdown' + type: STACKED + ymin_type_1: FIXED + graph_items: + - + color: 3333FF + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[data_size]' + - + sortorder: '1' + color: 00AA00 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[metadata_size]' + - + sortorder: '2' + color: EE0000 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[dnode_size]' + - + sortorder: '3' + color: CCCC00 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[hdr_size]' + - + sortorder: '4' + color: A54F10 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[dbuf_size]' + - + sortorder: '5' + color: '888888' + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[bonus_size]' + - + uuid: 4c493303be4a45a7a96d3ef7246843c0 + name: 'ZFS ARC Cache Hit Ratio' + ymin_type_1: FIXED + ymax_type_1: FIXED + graph_items: + - + color: 00CC00 + item: + host: 'ZFS on Linux' + key: zfs.arcstats_hit_ratio + - + uuid: b2fce9515a7d4218a5e9015f212c2a60 + name: 'ZFS ARC memory usage' + ymin_type_1: FIXED + ymax_type_1: ITEM + ymax_item_1: + host: 'ZFS on Linux' + key: 'zfs.arcstats[c_max]' + graph_items: + - + drawtype: GRADIENT_LINE + color: 0000EE + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[size]' + - + sortorder: '1' + drawtype: BOLD_LINE + color: DD0000 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[c_max]' + - + sortorder: '2' + color: 00BB00 + item: + host: 'ZFS on Linux' + key: 'zfs.arcstats[c_min]' diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_with_sudo.conf similarity index 86% rename from Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf rename to Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_with_sudo.conf index 9cd46ecbe..480ad1a08 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/6.0/ZoL_with_sudo.conf +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_with_sudo.conf @@ -32,10 +32,13 @@ UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcs UserParameter=zfs.zpool.scrub[*],/usr/bin/sudo /sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? # vdev state -UserParameter=zfs.vdev.state[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$2 }' +UserParameter=zfs.vdev.state[*],/usr/bin/sudo /sbin/zpool status | grep -m 1 "$1" | awk '{ print $$2 }' # vdev READ error counter -UserParameter=zfs.vdev.error_counter.read[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.read[*],/usr/bin/sudo /sbin/zpool status | grep -m 1 "$1" | awk '{ print $$3 }' | numfmt --from=si # vdev WRITE error counter -UserParameter=zfs.vdev.error_counter.write[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.write[*],/usr/bin/sudo /sbin/zpool status | grep -m 1 "$1" | awk '{ print $$4 }' | numfmt --from=si # vdev CHECKSUM error counter -UserParameter=zfs.vdev.error_counter.cksum[*],/usr/bin/sudo /sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.cksum[*],/usr/bin/sudo /sbin/zpool status | grep -m 1 "$1" | awk '{ print $$5 }' | numfmt --from=si + +# Get zpool iostats +UserParameter=zfs.zpool.iostat[*],/usr/bin/sudo /sbin/zpool iostat $1 -H -p 1 2 | tail -n 1 diff --git a/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf index 561178982..920b7aee3 100644 --- a/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf +++ b/Operating_Systems/Linux/template_zfs_on_linux/6.0/userparams_zol_without_sudo.conf @@ -32,10 +32,13 @@ UserParameter=zfs.arcstats[*],awk '/^$1/ {printf $$3;}' /proc/spl/kstat/zfs/arcs UserParameter=zfs.zpool.scrub[*],/sbin/zpool status $1 | grep "scrub in progress" > /dev/null ; echo $? # vdev state -UserParameter=zfs.vdev.state[*],/sbin/zpool status | grep "$1" | awk '{ print $$2 }' +UserParameter=zfs.vdev.state[*],/sbin/zpool status | grep -m 1 "$1" | awk '{ print $$2 }' # vdev READ error counter -UserParameter=zfs.vdev.error_counter.read[*],/sbin/zpool status | grep "$1" | awk '{ print $$3 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.read[*],/sbin/zpool status | grep -m 1 "$1" | awk '{ print $$3 }' | numfmt --from=si # vdev WRITE error counter -UserParameter=zfs.vdev.error_counter.write[*],/sbin/zpool status | grep "$1" | awk '{ print $$4 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.write[*],/sbin/zpool status | grep -m 1 "$1" | awk '{ print $$4 }' | numfmt --from=si # vdev CHECKSUM error counter -UserParameter=zfs.vdev.error_counter.cksum[*],/sbin/zpool status | grep "$1" | awk '{ print $$5 }' | numfmt --from=si +UserParameter=zfs.vdev.error_counter.cksum[*],/sbin/zpool status | grep -m 1 "$1" | awk '{ print $$5 }' | numfmt --from=si + +# Get zpool iostats +UserParameter=zfs.zpool.iostat[*],/sbin/zpool iostat $1 -H -p 1 2 | tail -n 1