Execution
Date 11 Sep 2025 11:14:57 +0100
Duration 00:00:06.13
Controller ssh-gw-4.layershift.com
User root
Versions
Ansible 2.16.11
ara 1.7.3 / 1.7.3
Python 3.10.10
Summary
1 Hosts
10 Tasks
10 Results
1 Plays
1 Files
0 Records

File: /home/ssh-gateway/ansible/kuly/validate_provider_metrics_output.yaml

---
- name: Validate provider_metrics output from agent360 (using clean shell filter)
  hosts: all
  gather_facts: false
  vars:
    expected_types:
      firewall_up: int
      http_response_code: int
      http_response_time_ms: float
      imunify360_and_fail2ban: int
      imunify360_license: int
      imunify360_services: int
      imunify360_underlicensed: int
      iptables_entries: int
      kcare_status: int
      litespeed_nginx_parallel: int
      litespeed_running_ok: int
      monitoring360_ip_dropped: int
      mysql_free_connections: int
      mysql_iptables_drop: int
      mysql_response_time_ms: float
      oom_kills_since_reboot: int
      queue_size: int
      ram_upgrade_required: int
      recent_oom_kills: int
      smtp_response_time_ms: float
      ssh_port_checksum_ok: int
      time_sync_enabled: int

  tasks:
    - name: Run agent360 test provider_metrics and clean output
      ansible.builtin.shell: |
        /usr/local/bin/agent360 test provider_metrics 2>/dev/null | egrep -v "provider_metrics:|#"
      register: metrics_output
      changed_when: false

    - name: DEBUG - Show cleaned output
      ansible.builtin.debug:
        var: metrics_output.stdout

    - name: Fail if stdout is empty after filtering
      ansible.builtin.fail:
        msg: |
          ❌ No JSON output after filtering. Command may have failed or output format changed.
          Try running manually on host:
          /usr/bin/agent360 test provider_metrics 2>/dev/null | egrep -v "provider_metrics:|#"
      when: metrics_output.stdout | trim == ""

    - name: Parse JSON output
      ansible.builtin.set_fact:
        metrics_json: "{{ metrics_output.stdout | trim | from_json }}"
      ignore_errors: yes
      register: parse_result

    - name: Fail if JSON parsing failed
      ansible.builtin.fail:
        msg: |
          ❌ Output is not valid JSON:
          {{ metrics_output.stdout | trim }}
      when: parse_result is failed

    - name: Initialize list for type mismatches
      ansible.builtin.set_fact:
        type_mismatches: []

    - name: Check each key against expected type
      ansible.builtin.set_fact:
        type_mismatches: >-
          {{
            type_mismatches + [{
              'key': item,
              'expected': expected_types[item],
              'actual_type': metrics_json[item] | type_debug,
              'actual_value': metrics_json[item]
            }]
          }}
      when:
        - metrics_json[item] is defined
        - (expected_types[item] == 'int' and metrics_json[item] | type_debug != 'int') or
          (expected_types[item] == 'float' and metrics_json[item] | type_debug not in ['float', 'int'])
      loop: "{{ expected_types.keys() | list }}"

    - name: Display mismatches if any
      ansible.builtin.debug:
        msg: |
          ⚠️ Type mismatch found:
          Key: {{ item.key }}
          Expected: {{ item.expected }}
          Actual Type: {{ item.actual_type }}
          Actual Value: {{ item.actual_value | to_nice_json }}
      loop: "{{ type_mismatches }}"
      when: type_mismatches | length > 0
      loop_control:
        label: "{{ item.key }}"

    - name: Fail if any mismatches found
      ansible.builtin.fail:
        msg: "❌ Found {{ type_mismatches | length }} type mismatch(es) in provider_metrics output."
      when: type_mismatches | length > 0

    - name: Success message if all OK
      ansible.builtin.debug:
        msg: "✅ All metrics have correct types. Output is valid."
      when: type_mismatches | length == 0