diff --git a/inv-end-7.3/group_vars/all.yml b/inv-end-7.3/group_vars/all.yml index a2ddf94..86a0c51 100644 --- a/inv-end-7.3/group_vars/all.yml +++ b/inv-end-7.3/group_vars/all.yml @@ -60,3 +60,10 @@ ip_master_2: '{{ cluster["nodes"]["control-2"]["ip"] }}' ip_worker_0: '{{ cluster["nodes"]["compute-0"]["ip"] }}' ip_worker_1: '{{ cluster["nodes"]["compute-1"]["ip"] }}' ip_worker_2: '{{ cluster["nodes"]["compute-2"]["ip"] }}' +# +mqttgate_id: mqttgate +mqttgate_pass: mqttgate +mqtt_promtail_id: grafanaloki01 +mqtt_promtail_pass: grafanaloki01 +mqtt_consend_id: consend01 +mqtt_consend_pass: consend01 diff --git a/inv-end-7.3/host_vars/lxocpb01-7.3.yml b/inv-end-7.3/host_vars/lxocpb01-7.3.yml index d1083d3..c96a4cf 100644 --- a/inv-end-7.3/host_vars/lxocpb01-7.3.yml +++ b/inv-end-7.3/host_vars/lxocpb01-7.3.yml @@ -6,11 +6,11 @@ guest_name: "LXOCPB01" guest_pwd: "LBYONLY" guest_dasd_grp_linux: "LINUX" guest_install_hostname: "lxocpb01" -guest_temp_ipaddr: "9.60.86.74" -guest_install_ipaddr: "9.60.86.74" -guest_install_netmask: "255.255.254.0" -guest_install_gateway: "9.60.86.1" -guest_install_nameserver: "9.60.70.82" +guest_temp_ipaddr: "9.33.124.31" +guest_install_ipaddr: "9.33.124.31" +guest_install_netmask: "255.255.255.0" +guest_install_gateway: "9.33.124.1" +guest_install_nameserver: "9.0.0.1" guest_install_znet: "qeth,0.0.0ad0,0.0.0ad1,0.0.0ad2,layer2=1,portname=none,portno=0" guest_install_dasd: "0.0.0200" guest_install_nicid: "encad0" @@ -28,7 +28,7 @@ guest_install_repo_appstream: "AppStream" # https_proxy: http://{{ upstream_proxy_ip }}:{{ upstream_proxy_port }} # # smapi parameters -smapi_host: "9.60.86.73" +smapi_host: "9.33.124.30" smapi_user: "IBMAUTO" smapi_password: "jTghTGinJupD63yh" # diff --git a/inv-end-7.3/host_vars/lxocpb01-fba-7.3.yml b/inv-end-7.3/host_vars/lxocpb01-fba-7.3.yml index e028cfd..a9f2e28 100644 --- a/inv-end-7.3/host_vars/lxocpb01-fba-7.3.yml +++ b/inv-end-7.3/host_vars/lxocpb01-fba-7.3.yml @@ -6,11 +6,11 @@ guest_name: "LXOCPB01" guest_pwd: "LBYONLY" guest_dasd_grp_linux: "LINUX" guest_install_hostname: "lxocpb01-fba" -guest_temp_ipaddr: "9.60.87.254" -guest_install_ipaddr: "9.60.87.254" -guest_install_netmask: "255.255.254.0" -guest_install_gateway: "9.60.86.1" -guest_install_nameserver: "9.60.70.82" +guest_temp_ipaddr: "9.33.124.33" +guest_install_ipaddr: "9.33.124.33" +guest_install_netmask: "255.255.255.0" +guest_install_gateway: "9.33.124.1" +guest_install_nameserver: "9.0.0.1" guest_install_znet: "qeth,0.0.0ad0,0.0.0ad1,0.0.0ad2,layer2=1,portname=none,portno=0" guest_install_dasd: "0.0.0200" guest_install_nicid: "encad0" @@ -28,7 +28,7 @@ guest_install_repo_appstream: "AppStream" # https_proxy: http://{{ upstream_proxy_ip }}:{{ upstream_proxy_port }} # # smapi parameters -smapi_host: "9.60.87.253" +smapi_host: "9.33.124.32" smapi_user: "IBMAUTO" smapi_password: "jTghTGinJupD63yh" # diff --git a/local-playbooks/build-a-bastion.yml b/local-playbooks/build-a-bastion.yml index 29b8695..063ca04 100644 --- a/local-playbooks/build-a-bastion.yml +++ b/local-playbooks/build-a-bastion.yml @@ -2,12 +2,12 @@ - name: Create a CA for the Bastion hosts: localhost roles: - - create-local-ca + - { role: 'create-local-ca', tags: 'local-ca' } - name: Copy Intermediate CA cert to the ESI ELAN system hosts: s390x_bastion_workstation roles: - - copy-ca-to-bastion + - { role: 'copy-ca-to-bastion', tags: 'copy-ca-to-bastion' } vars: root_ca_key_path: "{{ hostvars['localhost']['ca_key_path'] }}" root_ca_csr_path: "{{ hostvars['localhost']['ca_csr_path'] }}" @@ -16,18 +16,20 @@ - name: Configure services on the ESI ELAN system hosts: s390x_bastion_workstation roles: - - configure-internal-net - - almalinux-gpg - - install-base-packages - - configure-squid - - setup-firstboot-ipconf - - configure-dns - - configure-nfs - - configure-apache - - setup-web-resources - - configure-haproxy - - configure-cockpit + - { role: 'configure-internal-net', tags: 'internal-net' } + - { role: 'almalinux-gpg', tags: 'almalinux-gpg' } + - { role: 'install-base-packages', tags: 'base-packages' } + - { role: 'configure-squid', tags: 'squid' } + - { role: 'setup-firstboot-ipconf', tags: 'firstboot' } + - { role: 'configure-dns', tags: 'dns' } + - { role: 'configure-nfs', tags: 'nfs' } + - { role: 'configure-apache', tags: 'apache' } + - { role: 'setup-web-resources', tags: 'web-resources' } + - { role: 'configure-haproxy', tags: 'haproxy' } + - { role: 'configure-cockpit', tags: 'cockpit' } # - configure-ignition - - setup-ocp-deployer - - setup-icic-deployer - - setup-finna-response + - { role: 'setup-grafana-log-viewer', tags: 'grafana' } + - { role: 'setup-ocp-deployer', tags: 'ocp-deployer' } + - { role: 'setup-icic-deployer', tags: 'icic-deployer' } + - { role: 'setup-podman-and-registry', tags: 'podman-registry' } + - { role: 'setup-finna-response', tags: 'finna-response' } diff --git a/local-playbooks/prepare-elans-for-dump.yaml b/local-playbooks/prepare-elans-for-dump.yaml new file mode 100644 index 0000000..a8ba3f9 --- /dev/null +++ b/local-playbooks/prepare-elans-for-dump.yaml @@ -0,0 +1,48 @@ +--- +- name: Clean up the ELAN disks + hosts: s390x_bastion_workstation + tasks: + - name: Find rotated log files + find: + paths: /var/log + recurse: true + patterns: + - '*-2*' + - 'dnf*.log.*' + - 'audit.log.*' + - 'grafana.log.*' + register: find_results + - name: Clean the found logs + file: + path: "{{ item['path'] }}" + state: absent + with_items: "{{ find_results['files'] }}" + - name: Zero the empty space + shell: + cmd: dd if=/dev/zero of=/zerofile bs=1k || rm -f /zerofile + +- name: Shut down the ELANs to prepare for dump + hosts: localhost + vars: + smapi_ckd_ip: "{{ hostvars['lxocpb01-7.3'].smapi_host }}" + smapi_fba_ip: "{{ hostvars['lxocpb01-fba-7.3'].smapi_host }}" + smapi_ckd_user: "{{ hostvars['lxocpb01-7.3'].smapi_user }}" + smapi_fba_user: "{{ hostvars['lxocpb01-fba-7.3'].smapi_user }}" + smapi_ckd_pass: "{{ hostvars['lxocpb01-7.3'].smapi_password }}" + smapi_fba_pass: "{{ hostvars['lxocpb01-fba-7.3'].smapi_password }}" + tasks: + - name: Shut down the ELANS + shell: + cmd: smcli id -T LXOCPB01 -H {{ item.ip }}/44444 -U {{ item.user }} -P {{ item.password }} + loop: + - { ip: "{{ smapi_ckd_ip }}", user: "{{ smapi_ckd_user }}", password: "{{ smapi_ckd_pass }}" } + - { ip: "{{ smapi_fba_ip }}", user: "{{ smapi_fba_user }}", password: "{{ smapi_fba_pass }}" } + - name: Prompt to continue + pause: + prompt: Perform the dump, then resume + - name: Start up the ELANs + shell: + cmd: smcli ia -T LXOCPB01 -H {{ item.ip }}/44444 -U {{ item.user }} -P {{ item.password }} + loop: + - { ip: "{{ smapi_ckd_ip }}", user: "{{ smapi_ckd_user }}", password: "{{ smapi_ckd_pass }}" } + - { ip: "{{ smapi_fba_ip }}", user: "{{ smapi_fba_user }}", password: "{{ smapi_fba_pass }}" } diff --git a/local-playbooks/roles/almalinux-gpg/tasks/main.yml b/local-playbooks/roles/almalinux-gpg/tasks/main.yml index e0022d6..35a7d5a 100644 --- a/local-playbooks/roles/almalinux-gpg/tasks/main.yml +++ b/local-playbooks/roles/almalinux-gpg/tasks/main.yml @@ -2,8 +2,8 @@ - name: Set up AlmaLinux GPG key rpm_key: state: present - key: https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux + key: "https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux{{ '-9' if ansible_distribution_major_version == 9 }}" - name: Set up EPEL GPG key rpm_key: state: present - key: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-8 + key: "https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{{ ansible_distribution_major_version }}" diff --git a/local-playbooks/roles/configure-apache/tasks/main.yml b/local-playbooks/roles/configure-apache/tasks/main.yml index 72856c3..4eae90b 100644 --- a/local-playbooks/roles/configure-apache/tasks/main.yml +++ b/local-playbooks/roles/configure-apache/tasks/main.yml @@ -86,7 +86,6 @@ - internal - public notify: - - Restart firewalld - Restart httpd - name: Allow traffic at 8443 for apache @@ -101,5 +100,4 @@ - internal - public notify: - - Restart firewalld - Restart httpd diff --git a/local-playbooks/roles/configure-dns/tasks/main.yml b/local-playbooks/roles/configure-dns/tasks/main.yml index 129abf9..22bd370 100644 --- a/local-playbooks/roles/configure-dns/tasks/main.yml +++ b/local-playbooks/roles/configure-dns/tasks/main.yml @@ -23,48 +23,48 @@ # notify: # - Restart dns -#- name: Create DNS Forward zone (internal) -# template: -# src: var/named/ibmpoc_internal.zone.j2 -# dest: /var/named/ibmpoc_internal.zone -# owner: named -# group: named -# mode: 0640 +- name: Create DNS Forward zone (internal) + template: + src: var/named/ibmpoc_internal.zone.j2 + dest: /var/named/ibmpoc_internal.zone + owner: named + group: named + mode: 0640 -#- name: Unmanage resolve.conf in NetworkManager -# lineinfile: -# line: dns=none -# dest: /etc/NetworkManager/NetworkManager.conf -# insertafter: "\\[main\\].*" -# regexp: "^dns=.*" +- name: Unmanage resolve.conf in NetworkManager + lineinfile: + line: dns=none + dest: /etc/NetworkManager/NetworkManager.conf + insertafter: "\\[main\\].*" + regexp: "^dns=.*" -#- name: Restart NetworkManager to have DNS change take effect -# systemd: -# name: NetworkManager -# state: restarted +- name: Restart NetworkManager to have DNS change take effect + systemd: + name: NetworkManager + state: restarted -#- name: Use local dns in resolv.conf -# blockinfile: -# dest: /etc/resolv.conf -# insertbefore: BOF -# block: | -# search {{ cluster_domain_name }} -# nameserver {{ bastion_private_ip_address }} +- name: Use local dns in resolv.conf + blockinfile: + dest: /etc/resolv.conf + insertbefore: BOF + block: | + search {{ cluster_domain_name }} + nameserver {{ bastion_private_ip_address }} -#- name: Create DNS Reverse zone -# template: -# src: var/named/subnet.in-addr.arpa.zone.j2 -# dest: /var/named/{{ subnet_in_addr_name }}.in-addr.arpa.zone -# owner: named -# group: named -# mode: 0640 +- name: Create DNS Reverse zone + template: + src: var/named/subnet.in-addr.arpa.zone.j2 + dest: /var/named/{{ subnet_in_addr_name }}.in-addr.arpa.zone + owner: named + group: named + mode: 0640 -#- name: Create /etc/named.conf -# template: -# src: etc/named.conf.j2 -# dest: /etc/named.conf -# group: named -# mode: 0640 +- name: Create /etc/named.conf + template: + src: etc/named.conf.j2 + dest: /etc/named.conf + group: named + mode: 0640 #- name: Create /etc/named.conf.source # template: @@ -89,20 +89,19 @@ - name: Add dns to firewall firewalld: permanent: true + immediate: true service: dns state: enabled zone: "{{ item }}" with_items: - internal - public - notify: - - Restart firewalld -#- name: Restart named-chroot.service -# service: -# name: named-chroot.service -# state: restarted -# enabled: true +- name: Restart named-chroot.service + service: + name: named-chroot.service + state: restarted + enabled: true #- name: Restart firewalld.service # service: diff --git a/local-playbooks/roles/configure-dns/templates/var/named/subnet.in-addr.arpa.zone.j2 b/local-playbooks/roles/configure-dns/templates/var/named/subnet.in-addr.arpa.zone.j2 index 1c01e65..eca00c0 100644 --- a/local-playbooks/roles/configure-dns/templates/var/named/subnet.in-addr.arpa.zone.j2 +++ b/local-playbooks/roles/configure-dns/templates/var/named/subnet.in-addr.arpa.zone.j2 @@ -1,9 +1,3 @@ -{% set bootstrap = cluster_nodes['bootstrap'] %} -{% set masters = cluster_nodes['masters'] %} -{% set workers = cluster_nodes['workers'] %} -{% if cluster_nodes['bootworker'] is defined %} -{% set bootworker = cluster_nodes['bootworker'] %} -{% endif %} $TTL 900 @ IN SOA bastion-int.{{ cluster_domain_name }} hostmaster.{{ cluster_domain_name }}. ( @@ -14,22 +8,3 @@ $TTL 900 {{ zvm_internal_ip_address.split('.')[3] }} IN PTR zVM.ibmpoc.internal. {{ zvm_internal_ip_address.split('.')[3] }} IN PTR LDAPSRV.ibmpoc.internal. {{ bastion_private_ip_address.split('.')[3] }} IN PTR bastion-int.{{ cluster_domain_name }}. - -{% for item in masters.keys() %} -{{ masters[item].ip.split('.')[3] }} IN PTR {{ item }}.{{ cluster_domain_name }}. -{% endfor %} - -{% if workers is defined %} -{% for item in workers.keys() %} -{{ workers[item].ip.split('.')[3] }} IN PTR {{ item }}.{{ cluster_domain_name }}. -{% endfor %} -{% endif %} -{% if bootworker is defined %} -{% for item in bootworker.keys() %} -{{ bootworker[item].ip.split('.')[3] }} IN PTR {{ item }}.{{ cluster_domain_name }}. -{% endfor %} -{% endif %} - -{% for item in bootstrap.keys() %} -{{ bootstrap[item].ip.split('.')[3] }} IN PTR {{ item }}.{{ cluster_domain_name }}. -{% endfor %} diff --git a/local-playbooks/roles/configure-haproxy/tasks/main.yml b/local-playbooks/roles/configure-haproxy/tasks/main.yml index 2980aad..58d333a 100644 --- a/local-playbooks/roles/configure-haproxy/tasks/main.yml +++ b/local-playbooks/roles/configure-haproxy/tasks/main.yml @@ -7,11 +7,11 @@ - python3-libsemanage state: present -- name: Patch HAProxy service script - lineinfile: - dest: /usr/lib/systemd/system/haproxy.service - regex: ^ExecStartPre= - line: ExecStartPre=/bin/bash -c 'until host api.{{ cluster_domain_name }}; do sleep 1; done' && /usr/sbin/haproxy -f $CONFIG -c -q +#- name: Patch HAProxy service script +# lineinfile: +# dest: /usr/lib/systemd/system/haproxy.service +# regex: ^ExecStartPre= +# line: ExecStartPre=/bin/bash -c 'until host api.{{ cluster_domain_name }}; do sleep 1; done' && /usr/sbin/haproxy -f $CONFIG -c -q - name: Create certificate for Stats page block: @@ -68,7 +68,8 @@ - name: Allow http traffic firewalld: service: http - permanent: yes + permanent: true + immediate: true zone: "{{ item }}" state: enabled with_items: @@ -78,7 +79,8 @@ - name: Allow https traffic firewalld: service: https - permanent: yes + permanent: true + immediate: true zone: "{{ item }}" state: enabled with_items: @@ -88,7 +90,8 @@ - name: Allow traffic at port 6443 firewalld: port: 6443/tcp - permanent: yes + permanent: true + immediate: true zone: "{{ item }}" state: enabled with_items: @@ -98,7 +101,8 @@ - name: Allow traffic at port 22623 firewalld: port: 22623/tcp - permanent: yes + permanent: true + immediate: true zone: "{{ item }}" state: enabled with_items: @@ -118,11 +122,15 @@ dest: /etc/haproxy/haproxy.cfg group: haproxy mode: 0644 - notify: - - Restart firewalld - name: Turn on haproxy_connect_any seboolean: name: haproxy_connect_any state: yes persistent: yes + +- name: Start haproxy.service + systemd: + name: haproxy + state: restarted + enabled: true diff --git a/local-playbooks/roles/configure-haproxy/templates/etc/haproxy/haproxy.cfg.j2 b/local-playbooks/roles/configure-haproxy/templates/etc/haproxy/haproxy.cfg.j2 index 3f759d5..c30b262 100644 --- a/local-playbooks/roles/configure-haproxy/templates/etc/haproxy/haproxy.cfg.j2 +++ b/local-playbooks/roles/configure-haproxy/templates/etc/haproxy/haproxy.cfg.j2 @@ -1,7 +1,4 @@ -#jinja2:block_start_string:'[%', block_end_string:'%]', variable_start_string:'[[', variable_end_string:']]' -{# ------------------------------------ CRITICAL -------------------------------------- #} -{# This file must be synchronised between configure-haproxy and setup-firstboot-ipconf! #} -{# ------------------------------------ CRITICAL -------------------------------------- #} +# HAProxy default configuration for ELAN global log 127.0.0.1 local2 @@ -38,28 +35,21 @@ frontend elan-router-http mode http option tcplog bind {{ bastion_public_ip_address }}:80 - tcp-request inspect-delay 5s - acl elanweb hdr(host) -i {{ elan_host_name }}.{{ zvmesi_domain_name }} - acl elanweb hdr(host) -i {{ elan_host_name }}.{{ cluster_base_domain }} - use_backend elan-http if elanweb + default_backend elan-http frontend elan-router-https mode tcp option tcplog bind {{ bastion_public_ip_address }}:443 - tcp-request inspect-delay 5s - tcp-request content accept if { req_ssl_hello_type 1 } - acl elanweb req_ssl_sni -i {{ elan_host_name }}.{{ zvmesi_domain_name }} - acl elanweb req_ssl_sni -i {{ elan_host_name }}.{{ cluster_base_domain }} - use_backend elan-https if elanweb + default_backend elan-https backend elan-http mode http - server elan {{ elan_host_name }}.{{ zvmesi_domain_name }}:8080 check + server elan 172.24.26.1:8080 check backend elan-https mode tcp - server elan {{ elan_host_name }}.{{ zvmesi_domain_name }}:8443 check + server elan 172.24.26.1:8443 check frontend stats bind *:8404 ssl crt /etc/haproxy/haproxy-combined.pem diff --git a/local-playbooks/roles/configure-squid/tasks/main.yml b/local-playbooks/roles/configure-squid/tasks/main.yml index dc12172..73bdf40 100644 --- a/local-playbooks/roles/configure-squid/tasks/main.yml +++ b/local-playbooks/roles/configure-squid/tasks/main.yml @@ -26,7 +26,8 @@ - name: Allow traffic at port 3128 firewalld: port: 3128/tcp - permanent: yes + permanent: true + immediate: true zone: "{{ item }}" state: enabled with_items: diff --git a/local-playbooks/roles/copy-pkcs12-to-zvm/tasks/main.yml b/local-playbooks/roles/copy-pkcs12-to-zvm/tasks/main.yml index 7132e26..32e7704 100644 --- a/local-playbooks/roles/copy-pkcs12-to-zvm/tasks/main.yml +++ b/local-playbooks/roles/copy-pkcs12-to-zvm/tasks/main.yml @@ -25,3 +25,8 @@ - name: Stop the FTP server on z/VM # noqa command-instead-of-shell no-changed-when shell: /usr/local/bin/smcli id -T FTPSERVE -H {{ smapi_host | quote }}/44444 -U {{ smapi_user | quote }} -P {{ smapi_password | quote }} + +- name: Run the certificate import task # noqa command-instead-of-shell no-changed-when + shell: + chdir: /usr/local/bin + cmd: LDAPSRVcert.expect Y:{{ zvm_internal_ip_address }} "GSKADMIN BY {{ smapi_user }}" {{ smapi_password }} diff --git a/local-playbooks/roles/create-guest-for-linux/tasks/main.yml b/local-playbooks/roles/create-guest-for-linux/tasks/main.yml index c4ff86e..d4106f7 100644 --- a/local-playbooks/roles/create-guest-for-linux/tasks/main.yml +++ b/local-playbooks/roles/create-guest-for-linux/tasks/main.yml @@ -18,7 +18,7 @@ dest: "/srv/install/rhelks/{{ guest_install_hostname }}.ks" mode: 0644 -- name: Write the parmfile to the reader # noqa no-changed-when command-instead-of-shell +- name: Write the parmfile to the reader # noqa no-changed-when command-instead-of-shell command-instead-of-module shell: curl -Q "SITE FIX 132" -T {{ znetboot_cfg_path }}/{{ guest_name }}.znetboot -B ftp://MAINT.BY.{{ smapi_user | quote }}:{{ smapi_password | quote }}@{{ smapi_host | quote }}/MAINT.200/ # noqa 204 args: warn: false diff --git a/local-playbooks/roles/elanhandlers/handlers/main.yml b/local-playbooks/roles/elanhandlers/handlers/main.yml index 2ddf8c1..dc889c7 100644 --- a/local-playbooks/roles/elanhandlers/handlers/main.yml +++ b/local-playbooks/roles/elanhandlers/handlers/main.yml @@ -32,7 +32,7 @@ enabled: yes listen: "Restart httpd" -- name: Restart haproxy +- name: Restart haproxy.service service: name: haproxy.service state: restarted @@ -44,7 +44,7 @@ state: restarted listen: "Restart chronyd" -- name: Enable incrond service +- name: Enable incrond.service systemd: name: incrond state: stopped @@ -56,3 +56,13 @@ cmd: update-ca-trust listen: "Update ca trust" +- name: Restart rsyslog.service + systemd: + name: rsyslog + state: restarted + listen: "Restart syslog" + +- name: Reload systemd + systemd: + daemon_reload: true + listen: "Reload systemd" diff --git a/local-playbooks/roles/install-base-packages/tasks/main.yml b/local-playbooks/roles/install-base-packages/tasks/main.yml index e90584f..2e1161a 100644 --- a/local-playbooks/roles/install-base-packages/tasks/main.yml +++ b/local-playbooks/roles/install-base-packages/tasks/main.yml @@ -8,6 +8,14 @@ mode: 0755 when: ansible_architecture != "s390x" +- name: Add or modify the internal YUM repo + yum_repository: + name: zvmesi + description: z/VM ESI Support Code + baseurl: "{{ local_repo_uri }}" + gpgcheck: no + sslverify: no + - name: Install EPEL repository block: - name: Install EPEL RPM @@ -57,14 +65,6 @@ # dest: /etc/dnf/plugins/local.conf # regexp: "repodir" -- name: Add the internal YUM repo - yum_repository: - name: zvmesi - description: z/VM ESI Support Code - baseurl: "{{ local_repo_uri }}" - gpgcheck: no - sslverify: no - - name: Install base prerequisites yum: name: diff --git a/local-playbooks/roles/setup-finna-response/templates/finna-add-host.yml.j2 b/local-playbooks/roles/setup-finna-response/templates/finna-add-host.yml.j2 index e5514ec..67c2080 100644 --- a/local-playbooks/roles/setup-finna-response/templates/finna-add-host.yml.j2 +++ b/local-playbooks/roles/setup-finna-response/templates/finna-add-host.yml.j2 @@ -1,13 +1,22 @@ #jinja2:variable_start_string:'[%', variable_end_string:'%]', trim_blocks: False --- -- name: Create configuration to update DNS for finna host +- name: Create configuration to update DNS and NFS for finna host hosts: s390x_bastion_workstation vars: zvmesi_domain: "{{zvmesi_domain_name }}." group_domain: "{{ esigroup | lower }}.{{cluster_base_domain }}." - ns_zvmesi: "{{ lookup('dig', lookup('vars', 'zvmesi_domain'), qtype='NS', wantlist=true) }}" + ns_zvmesi: "{{ lookup('dig', lookup('vars', 'zvmesi_domain'), qtype='NS', wantlist=true) }}" ns_group: "{{ lookup('dig', lookup('vars', 'group_domain'), qtype='NS', wantlist=true) }}" - ns_hostname: "{{ sys['hostname'] }}." + ns_hostname: "{{ sys['hostname'] }}." + nfs_network: "{{ ansible_default_ipv4['network'] }}/{{ ansible_default_ipv4['netmask'] }}" + + handlers: + - name: Restart nfs-server + service: + name: nfs-server.service + state: restarted + enabled: yes + listen: "Restart nfs" tasks: - name: Read in the variable file @@ -15,6 +24,12 @@ file: /var/spool/finnad/{{ sysname }} name: sys + - name: Add NFS share of the content directory + lineinfile: + line: /opt/content {{ nfs_network }}(rw,sync,no_root_squash,no_subtree_check) + path: /etc/exports + notify: "Restart nfs" + - name: Add or modify TXT for system release nsupdate: key_name: "{{ esigroup | lower }}-key" @@ -115,4 +130,3 @@ line: "{{ sys['hostname'].split('.')[0] }} ansible_user=root ansible_ssh_common_args='-o StrictHostKeyChecking=no'" when: zvm_host_name != sys['sysname'] | lower - diff --git a/local-playbooks/roles/setup-finna-response/templates/finna-host-add.sh.j2 b/local-playbooks/roles/setup-finna-response/templates/finna-host-add.sh.j2 index 6cddcf1..5a994da 100644 --- a/local-playbooks/roles/setup-finna-response/templates/finna-host-add.sh.j2 +++ b/local-playbooks/roles/setup-finna-response/templates/finna-host-add.sh.j2 @@ -9,8 +9,8 @@ SYSNAME=$1; pushd /opt/ansible && ansible-playbook -i inventory -e sysname=${SYSNAME} finna-add-host.yml && popd # Is this a remote ELAN? Push BIND updates if yes -LOCALSYS=$(finna self name) +LOCALSYS=$(/usr/local/bin/finna self name) if [ ${SYSNAME} != ${LOCALSYS} ]; then - MYIP=$(finna self ip) + MYIP=$(/usr/local/bin/finna self ip) pushd /opt/ansible && ansible-playbook -i inventory -e masterip=${MYIP} finna-remote-dns.yml && popd fi diff --git a/local-playbooks/roles/setup-finna-response/templates/finna-remote-dns.yml.j2 b/local-playbooks/roles/setup-finna-response/templates/finna-remote-dns.yml.j2 index 5ab532b..840fc74 100644 --- a/local-playbooks/roles/setup-finna-response/templates/finna-remote-dns.yml.j2 +++ b/local-playbooks/roles/setup-finna-response/templates/finna-remote-dns.yml.j2 @@ -1,5 +1,5 @@ --- -- name: Playbook to push DNS reconfiguration to remote ELANs +- name: Playbook to push DNS and NFS reconfiguration to remote ELANs hosts: s390x_remote_elans handlers: - name: Restart BIND named @@ -7,6 +7,11 @@ name: named-chroot.service state: restarted listen: "Restart named" + - name: Restart Apache + systemd: + name: httpd.service + state: restarted + listen: "Restart httpd" tasks: - name: Update the BIND config fragments @@ -22,3 +27,20 @@ - "{% raw %}{{ esigroup | lower }}{% endraw %}.zvmesi" notify: - "Restart named" + - name: Get the coordinator IP address + shell: + cmd: finna coord ip + register: coord_ip + - name: Add the NFS mount for ESI content + mount: + src: "{% raw %}{{ coord_ip.stdout }}{% endraw %}:/opt/content" + path: /opt/content + opts: rw,sync,hard + state: mounted + fstype: nfs + - name: Permit Apache to read NFS content + seboolean: + name: httpd_use_nfs + state: yes + persistent: yes + notify: Restart httpd \ No newline at end of file diff --git a/local-playbooks/roles/setup-firstboot-ipconf/tasks/main.yml b/local-playbooks/roles/setup-firstboot-ipconf/tasks/main.yml index b4870d2..4c3843c 100644 --- a/local-playbooks/roles/setup-firstboot-ipconf/tasks/main.yml +++ b/local-playbooks/roles/setup-firstboot-ipconf/tasks/main.yml @@ -123,3 +123,5 @@ - { src: "haproxy/haproxy.cfg.j2", dest: "/opt/ansible/templates/haproxy/haproxy.cfg.j2" } - { src: "haproxy/rhocp-front.cfg.j2", dest: "/opt/ansible/templates/haproxy/rhocp-front.cfg.j2" } - { src: "settings.env.j2", dest: "/etc/zvmesi/settings.env" } + - { src: "grafana.ini.j2", dest: "/opt/ansible/templates/grafana.ini.j2" } + - { src: "create-grafana-ini.yml.j2", dest: "/opt/ansible/create-grafana-ini.yml" } diff --git a/local-playbooks/roles/setup-firstboot-ipconf/templates/create-grafana-ini.yml.j2 b/local-playbooks/roles/setup-firstboot-ipconf/templates/create-grafana-ini.yml.j2 new file mode 100644 index 0000000..8041df4 --- /dev/null +++ b/local-playbooks/roles/setup-firstboot-ipconf/templates/create-grafana-ini.yml.j2 @@ -0,0 +1,40 @@ +#jinja2:block_start_string:'[%', block_end_string:'%]', variable_start_string:'[[', variable_end_string:']]' +--- +- name: Create the Grafana INI file + hosts: s390x_bastion_workstation + tasks: + - name: Create grafana.ini + template: + src: grafana.ini.j2 + dest: /etc/grafana/grafana.ini + owner: grafana + group: grafana + mode: 0640 + + - name: Configure Mosquitto passwords + block: + - name: Set password for MQTTGATE + shell: + cmd: mosquitto_passwd -c -b /etc/mosquitto/passwd {{ mqttgate_id }} {{ mqttgate_pass }} + creates: /etc/mosquitto/passwd + - name: Set password for Promtail sub + shell: + cmd: mosquitto_passwd -b /etc/mosquitto/passwd {{ mqtt_promtail_id }} {{ mqtt_promtail_pass }} + - name: Set password for a command sender (WIP) + shell: + cmd: mosquitto_passwd -b /etc/mosquitto/passwd {{ mqtt_consend_id }} {{ mqtt_consend_pass }} + rescue: + - name: Remove a possibly invalid passwd file + file: + path: /etc/mosquitto/passwd + state: absent + + - name: Create environment file for promtail-mqtt + copy: + dest: /etc/zvmesi/promtail-mqtt.env + content: | + MQTTID={{ mqtt_promtail_id }} + MQTTPWD={{ mqtt_promtail_pass }} + mode: 0644 + owner: root + group: root \ No newline at end of file diff --git a/local-playbooks/roles/setup-firstboot-ipconf/templates/grafana.ini.j2 b/local-playbooks/roles/setup-firstboot-ipconf/templates/grafana.ini.j2 new file mode 100644 index 0000000..03521d2 --- /dev/null +++ b/local-playbooks/roles/setup-firstboot-ipconf/templates/grafana.ini.j2 @@ -0,0 +1,25 @@ +#jinja2:block_start_string:'[%', block_end_string:'%]', variable_start_string:'[[', variable_end_string:']]' +[server] +http_addr = +http_port = 3000 +domain = {{ cluster_base_domain }} +root_url = https://{{ elan_host_name }}.{{ esigroup | lower }}.{{ cluster_base_domain | lower }}/grafana +enforce_domain = False +protocol = http +serve_from_sub_path = true + +[paths] +provisioning = /etc/grafana/provisioning + +[security] +allow_embedding = true +csrf_trusted_origins = {{ elan_host_name }}.{{ esigroup | lower }}.{{ cluster_base_domain | lower }} + +[dashboards] +min_refresh_interval = 1s + +[plugins] +allow_loading_unsigned_plugins = performancecopilot-pcp-app,pcp-redis-datasource,pcp-vector-datasource,pcp-bpftrace-datasource,pcp-flamegraph-panel,pcp-breadcrumbs-panel,pcp-troubleshooting-panel,performancecopilot-redis-datasource,performancecopilot-vector-datasource,performancecopilot-bpftrace-datasource,performancecopilot-flamegraph-panel,performancecopilot-breadcrumbs-panel,performancecopilot-troubleshooting-panel + +[feature_toggles] +publicDashboards = true diff --git a/local-playbooks/roles/setup-firstboot-ipconf/templates/ipconf.sh.j2 b/local-playbooks/roles/setup-firstboot-ipconf/templates/ipconf.sh.j2 index 929ec1f..64609e5 100644 --- a/local-playbooks/roles/setup-firstboot-ipconf/templates/ipconf.sh.j2 +++ b/local-playbooks/roles/setup-firstboot-ipconf/templates/ipconf.sh.j2 @@ -135,12 +135,15 @@ EOF # # Update the bastion_public_ip_address in the group_vars # sed -i.bak '/bastion_public_ip_address/c \bastion_public_ip_address: '\"${IPADDR}\" /opt/ansible/inventory/group_vars/all.yml # Run Ansible playbook to update the BIND configuration - pushd /opt/ansible && ansible-playbook -i inventory update-dns-domain.yml && popd + (cd /opt/ansible && ansible-playbook -i inventory update-dns-domain.yml) # start named systemctl start named-chroot.service # Regenerate all the certificates - pushd /opt/ansible && ansible-playbook -i inventory regen-certificates.yml && popd + (cd /opt/ansible && ansible-playbook -i inventory regen-certificates.yml) + + # Configure Grafana + (cd /opt/ansible && ansible-playbook -i inventory create-grafana-ini.yml) # update Apache configuration sed -i.bak -e "s/wsc.ibm/${DOMAIN}/g" -e "s/ocp-z-poc/${CLUSTER_NAME}/g" \ @@ -171,6 +174,23 @@ EOF systemctl enable haproxy.service systemctl restart haproxy.service echo "HA-Proxy restarted" + # restart Grafana + systemctl enable grafana-server.service + systemctl restart grafana-server.service + echo "Grafana restarted" + # restart Mosquitto + systemctl enable mosquitto.service + systemctl restart mosquitto.service + echo "Mosquitto restarted" + # restart Loki + systemctl enable loki.service + systemctl restart loki.service + echo "Loki restarted" + # restart Promtail + systemctl enable promtail-mqtt.service + systemctl restart promtail-mqtt.service + echo "Promtail restarted" + ### This is the old DNS change, comment this before removal # # upstream DNS is harder, have to change the bind config... # if [ -v DNS ]; then @@ -188,8 +208,6 @@ EOF # Start incrond systemctl enable --now incrond.service - # Start up the DASD configuration script because we're at a new site - systemctl start ocp-dasd.service # Prevent this service from starting next time systemctl disable zvm-ipconf.service # Announce we've finished diff --git a/local-playbooks/roles/setup-firstboot-ipconf/templates/regen-certificates.yml.j2 b/local-playbooks/roles/setup-firstboot-ipconf/templates/regen-certificates.yml.j2 index d89ea6b..91bbea0 100644 --- a/local-playbooks/roles/setup-firstboot-ipconf/templates/regen-certificates.yml.j2 +++ b/local-playbooks/roles/setup-firstboot-ipconf/templates/regen-certificates.yml.j2 @@ -1,8 +1,8 @@ --- -- name: regenerate certificates for the new domain name +- name: Regenerate certificates for the new domain name hosts: s390x_bastion_workstation tasks: - - name: generate the certificates + - name: Generate the certificates include_tasks: tasks/create-certificate.yml with_items: - { filename: "cockpit", common_name: "{% raw %}{{ elan_host_name }}.{{ zvmesi_domain_name }}{% endraw %}", subject_alt_name: ",DNS:{% raw %}{{ elan_host_name }}.{{ cluster_base_domain }}{% endraw %}" } @@ -12,21 +12,26 @@ - { filename: "zVMLDAP", common_name: "LDAPSRV.ibmpoc.internal", subject_alt_name: ",DNS:{% raw %}{{ zvm_host_name }}.{{ zvmesi_domain_name }},DNS:{{ zvm_host_name }}.{{ cluster_base_domain }},IP:{{ zvm_ip_address }}{% endraw %},IP:{{ zvm_internal_ip_address }}" } - { filename: "registry", common_name: "registry.{% raw %}{{ cluster_base_domain }}{% endraw %}" } - - name: create combined PEM file for Cockpit + - name: Create combined PEM file for Cockpit shell: cmd: cat certs/cockpit.cert certs/oqsCA.cert private/cockpit.pem > /etc/cockpit/ws-certs.d/cockpit-combined.cert chdir: /etc/pki/tls/ - - name: create combined PEM file for HAProxy + - name: Create combined PEM file for HAProxy shell: cmd: cat certs/haproxy.cert certs/oqsCA.cert private/haproxy.pem > /etc/haproxy/haproxy-combined.pem chdir: /etc/pki/tls/ - - name: create the bundle for registry + - name: Create the bundle for registry shell: cmd: cat certs/registry.cert certs/oqsCA.cert private/cockpit.pem > /opt/registry/certs/registry.cert chdir: /etc/pki/tls + - name: Create the bundle for Grafana + shell: + cmd: cat certs/httpd.cert certs/oqsCA.cert certs/oqsRootCA.cert > /etc/grafana/grafana.crt + chdir: /etc/pki/tls + - name: start the FTP server on z/VM # noqa no-changed-when shell: /usr/local/bin/smcli ia -T FTPSERVE -H {{ zvm_internal_ip_address|quote }}/44444 -U {{ smapi_user|quote }} -P {{ smapi_password|quote }} && sleep 2 register: ret diff --git a/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-icic.json b/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-icic.json new file mode 100644 index 0000000..ea96f73 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-icic.json @@ -0,0 +1,88 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 3, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "description": "Log for the automated deployment of ICIC", + "gridPos": { + "h": 18, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Ascending", + "wrapLogMessage": false + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "editorMode": "builder", + "expr": "{job=\"icicbuild\"} |= ``", + "queryType": "range", + "refId": "A" + } + ], + "title": "ICIC Build Log", + "type": "logs" + } + ], + "refresh": "5s", + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "ICIC Build Log", + "uid": "DMWh7XUSk", + "version": 1, + "weekStart": "" + } + \ No newline at end of file diff --git a/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-logs.json b/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-logs.json new file mode 100644 index 0000000..843e0f6 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/files/dashboard-logs.json @@ -0,0 +1,213 @@ +{ + "__inputs": [ + { + "name": "elanLoki", + "label": "Loki", + "description": "", + "type": "datasource", + "pluginId": "loki", + "pluginName": "Loki" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "9.2.10" + }, + { + "type": "panel", + "id": "logs", + "name": "Logs", + "version": "" + }, + { + "type": "datasource", + "id": "loki", + "name": "Loki", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [], + "title": "Log messages", + "type": "row" + }, + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "description": "Output from consoles of z/VM IDs", + "gridPos": { + "h": 13, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 4, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Ascending", + "wrapLogMessage": false + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "editorMode": "builder", + "expr": "{application=\"VMCONS\", zvm_guest=~\"${guestname}\"} |= `` | line_format \"{{ .zvm_guest }} {{__line__}}\"", + "queryType": "range", + "refId": "A" + } + ], + "title": "Console Messages", + "type": "logs" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 14 + }, + "id": 6, + "panels": [], + "title": "VMEVENT entries", + "type": "row" + }, + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "description": "Messages from the *VMEVENT service", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 15 + }, + "id": 8, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Ascending", + "wrapLogMessage": false + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "editorMode": "builder", + "expr": "{application=\"VMEVENT\", zvm_guest=~\"${guestname}\"} |= ``", + "queryType": "range", + "refId": "A" + } + ], + "title": "VMEVENT messages", + "type": "logs" + } + ], + "refresh": "5s", + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "loki", + "uid": "elanLoki" + }, + "definition": "", + "description": "Filter on z/VM guest name", + "hide": 0, + "includeAll": true, + "label": "z/VM guest name", + "multi": true, + "name": "guestname", + "options": [], + "query": { + "label": "zvm_guest", + "refId": "LokiVariableQueryEditor-VariableQuery", + "stream": "", + "type": 1 + }, + "refresh": 2, + "regex": "/^(?!(?:localhost)$)(.+)/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "z/VM ESI System Messages", + "uid": "RfzH9eoSz", + "version": 4, + "weekStart": "" + } \ No newline at end of file diff --git a/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-journal.yaml b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-journal.yaml new file mode 100644 index 0000000..8dcc285 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-journal.yaml @@ -0,0 +1,18 @@ +server: + http_listen_port: 0 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions_journal.yaml + +clients: + - url: http://localhost:3100/loki/api/v1/push + +scrape_configs: + - job_name: journal + journal: + labels: + job: journal + relabel_configs: + - source_labels: ['__journal__systemd_unit'] + target_label: 'unit' diff --git a/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-syslog.yaml b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-syslog.yaml new file mode 100644 index 0000000..76117d3 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail-cfg-syslog.yaml @@ -0,0 +1,49 @@ +server: + http_listen_port: 0 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions_syslog.yaml + +clients: + - url: http://localhost:3100/loki/api/v1/push + +scrape_configs: + - job_name: syslog + syslog: + listen_address: 127.0.0.1:20514 + label_structured_data: false + labels: + job: syslog + use_incoming_timestamp: true + idle_timeout: 12h + relabel_configs: + - source_labels: [__syslog_message_hostname] + target_label: hostname + - source_labels: [__syslog_message_severity] + target_label: level + - source_labels: [__syslog_message_app_name] + target_label: application + - source_labels: [__syslog_message_facility] + target_label: facility + pipeline_stages: + - match: + pipeline_name: vmcons + selector: '{application="VMCONS"}' + stages: + - labels: + zvm_guest: hostname + - labeldrop: + - hostname + - level + - facility + - match: + pipeline_name: vmevent + selector: '{application="VMEVENT"}' + stages: + - labels: + zvm_guest: hostname + - labeldrop: + - hostname + - level + - facility diff --git a/local-playbooks/roles/setup-grafana-log-viewer/files/promtail.service b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail.service new file mode 100644 index 0000000..d73b358 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/files/promtail.service @@ -0,0 +1,11 @@ +[Unit] +Description=Promtail service for %I +Requires=network-online.target loki.service +After=network-online.target named-chroot.service loki.service +[Service] +Type=exec +ExecStart=/usr/local/bin/promtail -log.level=error -config.file=/etc/loki/promtail-%i.yaml +RemainAfterExit=false +StandardOutput=journal +[Install] +WantedBy=default.target diff --git a/local-playbooks/roles/setup-grafana-log-viewer/meta/main.yml b/local-playbooks/roles/setup-grafana-log-viewer/meta/main.yml new file mode 100644 index 0000000..09a1bed --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - elanhandlers diff --git a/local-playbooks/roles/setup-grafana-log-viewer/tasks/main.yml b/local-playbooks/roles/setup-grafana-log-viewer/tasks/main.yml new file mode 100644 index 0000000..4f81f20 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/tasks/main.yml @@ -0,0 +1,218 @@ +--- +- name: Install Grafana and Mosquitto and Loki + dnf: + name: "{{ item }}" + state: present + loop: + - grafana + - mosquitto + - elan-loki + +- name: Create basic grafana.ini + copy: + content: | + [server] + http_addr = + http_port = 3000 + domain = {{ cluster_base_domain }} + root_url = http://{{ bastion_public_ip_address }}:3000/ + enforce_domain = False + protocol = http + ; serve_from_sub_path = true + + [paths] + provisioning = /etc/grafana/provisioning + + [security] + allow_embedding = true + + [dashboards] + min_refresh_interval = 1s + + [plugins] + allow_loading_unsigned_plugins = performancecopilot-pcp-app,pcp-redis-datasource,pcp-vector-datasource,pcp-bpftrace-datasource,pcp-flamegraph-panel,pcp-breadcrumbs-panel,pcp-troubleshooting-panel,performancecopilot-redis-datasource,performancecopilot-vector-datasource,performancecopilot-bpftrace-datasource,performancecopilot-flamegraph-panel,performancecopilot-breadcrumbs-panel,performancecopilot-troubleshooting-panel + + [feature_toggles] + publicDashboards = true + dest: /etc/grafana/grafana.ini + owner: grafana + mode: 0644 + +- name: Allow traffic at port 1883 for Mosquitto + firewalld: + port: 1883/tcp + permanent: true + immediate: true + zone: "{{ item }}" + state: enabled + with_items: + - internal + - public + +- name: Configure Mosquitto + copy: + dest: /etc/mosquitto/mosquitto.conf + content: | + # Configuration of Mosquitto + password_file /etc/mosquitto/passwd + listener 1883 + protocol mqtt + listener 9001 + protocol websockets + mode: 0644 + owner: mosquitto + group: mosquitto + backup: yes + +- name: Create provisioning file for Loki datasource + copy: + dest: /etc/grafana/provisioning/datasources/default.yaml + content: | + apiVersion: 1 + + datasources: + - name: Loki + uid: elanLoki + type: loki + url: http://localhost:3100 + mode: 0644 + owner: grafana + group: grafana + +- name: Create provisioning file for Loki dashboard + copy: + dest: /etc/grafana/provisioning/dashboards/default.yaml + content: | + apiVersion: 1 + + providers: + - name: Default + folder: "z/VM Logging dashboards" + type: file + options: + path: + /etc/grafana/dashboards + mode: 0644 + owner: grafana + group: grafana + +- name: Create provisioning files for dashboard + copy: + src: dashboard-{{ item }}.json + dest: /etc/grafana/dashboards/ + mode: 0644 + owner: grafana + group: grafana + loop: + - logs + - icic + +- name: Create the simple Promtail instance configs + template: + src: promtail-cfg.yaml.j2 + dest: /etc/loki/promtail-{{ item.job }}.yaml + mode: 0644 + owner: root + group: root + loop: + - { job: "httpd", path: "/var/log/httpd/*_log" } + - { job: "phpfpm", path: "/var/log/php-fpm/*.log" } + - { job: "squid", path: "/var/log/squid/*.log" } + +- name: Create the sd Promtail instance configs + template: + src: promtail-cfg-sd.yaml.j2 + dest: /etc/loki/promtail-{{ item.job }}.yaml + mode: 0644 + owner: root + group: root + loop: + - { job: "icicbuild" } + - { job: "varlog" } + +- name: Create the YAML files for Promtail sd instances + template: + src: promtail-cfg-sd-multi.yaml.j2 + dest: /etc/loki/promtail-{{ item.file }}-sd.yaml + mode: 0644 + owner: root + group: root + loop: + - file: "icicbuild" + jobs: + - { name: "icicbuild", path: "/var/www/html/iciclog*.txt" } + - file: "varlog" + jobs: + - { name: "dnflogs", path: "/var/log/dnf*log" } + - { name: "hawkeylog", path: "/var/log/hawkey.log" } + - { name: "kdumplog", path: "/var/log/kdump.log" } + - { name: "lastlog", path: "/var/log/lastlog" } + +- name: Create Promtail config for journald and syslog + copy: + src: promtail-cfg-{{ item }}.yaml + dest: /etc/loki/promtail-{{ item }}.yaml + mode: 0644 + owner: root + group: root + loop: + - journal + - syslog + +- name: Create systemd unit template + copy: + src: promtail.service + dest: /usr/local/lib/systemd/system/promtail@.service + mode: 0644 + owner: root + group: root + +- name: Define Loki port with correct SElinux type + seport: + ports: 3100 + proto: tcp + setype: http_port_t + state: present + +- name: Create and enable systemd units for Promtail + systemd: + name: promtail@{{ item }} + state: started + enabled: true + daemon_reload: true + loop: + - httpd + - icicbuild + - journal + - phpfpm + - squid + - syslog + - varlog + +- name: Configure firewalld for syslog + firewalld: + service: syslog + state: enabled + permanent: true + immediate: true + zone: "{{ item }}" + loop: + - internal + - public + +- name: Configure local Rsyslog for forwarding + copy: + content: | + # Rsyslog config file + module(load="imudp") + + ruleset(name="promtail"){ + *.* action(type="omfwd" protocol="tcp" target="127.0.0.1" port="20514" Template="RSYSLOG_SyslogProtocol23Format" TCP_Framing="octet-counted" KeepAlive="on") + } + + input(type="imudp" port="514" ruleset="promtail") + dest: /etc/rsyslog.d/promtail.conf + mode: 0644 + owner: root + group: root + notify: Restart syslog diff --git a/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd-multi.yaml.j2 b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd-multi.yaml.j2 new file mode 100644 index 0000000..b8e667c --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd-multi.yaml.j2 @@ -0,0 +1,7 @@ +{% for line in item.jobs %} +- targets: + - localhost + labels: + job: "{{ line.name }}" + __path__: "{{ line.path }}" +{% endfor %} diff --git a/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd.yaml.j2 b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd.yaml.j2 new file mode 100644 index 0000000..38bb598 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg-sd.yaml.j2 @@ -0,0 +1,15 @@ +server: + http_listen_port: 0 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions_{{ item.job }}.yaml + +clients: + - url: http://localhost:3100/loki/api/v1/push + +scrape_configs: +- job_name: "{{ item.job }}" + file_sd_configs: + - files: + - /etc/loki/promtail-{{ item.job }}-sd.yaml diff --git a/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg.yaml.j2 b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg.yaml.j2 new file mode 100644 index 0000000..de8b9a2 --- /dev/null +++ b/local-playbooks/roles/setup-grafana-log-viewer/templates/promtail-cfg.yaml.j2 @@ -0,0 +1,18 @@ +server: + http_listen_port: 0 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions_{{ item.job }}.yaml + +clients: + - url: http://localhost:3100/loki/api/v1/push + +scrape_configs: +- job_name: "{{ item.job }}logs" + static_configs: + - targets: + - localhost + labels: + job: "{{ item.job }}" + __path__: "{{ item.path }}" diff --git a/local-playbooks/roles/setup-icic-deployer/tasks/main.yml b/local-playbooks/roles/setup-icic-deployer/tasks/main.yml index e4f2875..2bc3d54 100644 --- a/local-playbooks/roles/setup-icic-deployer/tasks/main.yml +++ b/local-playbooks/roles/setup-icic-deployer/tasks/main.yml @@ -20,20 +20,26 @@ - { src: "icic-cert-bundle.j2", dest: "/opt/ansible/templates/icic-cert-bundle.j2"} - { src: "icic-config-properties.j2", dest: "/opt/ansible/templates/icic-config-properties.j2"} - { src: "icic-ldap.exp.j2", dest: "/opt/ansible/files/icic-ldap.exp" } - - { src: "ICICCMP0.direct.j2", dest: "/opt/ansible/files/ICICCMP0.direct" } - { src: "ICICMGT0.direct.j2", dest: "/opt/ansible/files/ICICMGT0.direct" } + - { src: "ICICCMP1.direct.j2", dest: "/opt/ansible/files/ICICCMP1.direct" } - { src: "icicdflt.direct.j2", dest: "/opt/ansible/files/icicdflt.direct" } - { src: "setup-icic-compute.yml.j2", dest: "/opt/ansible/setup-icic-compute.yml" } + - { src: "setup-icic-compute-remote.yml.j2", dest: "/opt/ansible/setup-icic-compute-remote.yml" } - { src: "setup-icic-ldap.yml.j2", dest: "/opt/ansible/setup-icic-ldap.yml" } - { src: "setup-icic-management.yml.j2", dest: "/opt/ansible/setup-icic-management.yml" } - name: Set up the build script and incron stuff block: - - name: Copy the script + - name: Copy the main script template: src: icic-setup.sh.j2 dest: /usr/local/bin/icic-setup.sh mode: 0750 + - name: Copy the remote-deployer script + template: + src: icic-setup-remote.sh.j2 + dest: /usr/local/bin/icic-setup-remote.sh + mode: 0750 - name: Copy the incrontab file template: src: incrontab-icic-build.j2 diff --git a/local-playbooks/roles/setup-icic-deployer/templates/ICICCMP0.direct.j2 b/local-playbooks/roles/setup-icic-deployer/templates/ICICCMP1.direct.j2 similarity index 94% rename from local-playbooks/roles/setup-icic-deployer/templates/ICICCMP0.direct.j2 rename to local-playbooks/roles/setup-icic-deployer/templates/ICICCMP1.direct.j2 index 6aac34c..43d0c4a 100644 --- a/local-playbooks/roles/setup-icic-deployer/templates/ICICCMP0.direct.j2 +++ b/local-playbooks/roles/setup-icic-deployer/templates/ICICCMP1.direct.j2 @@ -1,4 +1,4 @@ -USER ICICCMP0 ZVM4DEMO 16G 32G G +USER ICICCMP1 ZVM4DEMO 16G 32G G INCLUDE IBMDFLT ACCOUNT LINUX COMMAND SET RUN ON diff --git a/local-playbooks/roles/setup-icic-deployer/templates/icic-setup-remote.sh.j2 b/local-playbooks/roles/setup-icic-deployer/templates/icic-setup-remote.sh.j2 new file mode 100644 index 0000000..ffd7936 --- /dev/null +++ b/local-playbooks/roles/setup-icic-deployer/templates/icic-setup-remote.sh.j2 @@ -0,0 +1,293 @@ +#!/bin/bash -e +# ================================================================= +# Licensed Materials - Property of IBM +# +# (c) Copyright IBM Corp. 2024 All Rights Reserved +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# ================================================================= +# +# icic-setup.sh +# Set up ICIC to the z/VM system - Version 2.0 +# - Script is deployed by Ansible (TBC) +# - Run at boot time if ICIC is requested +# - Updated for modularisation +# + +# SMAPI credentials are the same each time so set once +source /etc/zvmesi/settings.env + +# Setup some vars: +# The version to be installed is an environment variable from the host Ansible +# The code is in a module file in the "content" directory +#icicver=$(cat {{ webroot }}/.secret/.icicver) +modpath="/opt/content/icic-${icicver}.esi" +icicpath="/opt/icic/${icicver}" + +# set the size of the minidisks needed +mdsizeckd=10016 # This is 3390-9 less one cylinder +mdsizefba=20971488 # This is 10GB less 32 blocks (8 pages) + +# Add the ICIC compute node -- the name will come from environment +#iciccmp="ICICCMP1" +icicguests="${iciccmp}" +# set a couple of variables +mgtspaceckd=50000 # This is adding approx 35GB +cmpspaceckd=110000 # This is adding approx 75GB +mgtspacefba=67108864 # This is adding approx 32GB +cmpspacefba=150994944 # This is adding approx 72GB +extnetdev=encad0 + +# pull in the existing IP details +source <(grep ^NETMASK /etc/sysconfig/network-scripts/ifcfg-${extnetdev}) +source <(grep ^PREFIX /etc/sysconfig/network-scripts/ifcfg-${extnetdev}) +source <(grep ^GATEWAY /mnt/znetboot/ZVMIP.CONF) +source <(grep ^DOMAIN /mnt/znetboot/ZVMIP.CONF); DOMAIN=${DOMAIN,,} +source <(grep ^DNS /mnt/znetboot/ZVMIP.CONF) +source <(grep ^VLAN /mnt/znetboot/ZVMIP.CONF) + +nextip() { + IP=$1 + IP_HEX=$(printf '%.2X%.2X%.2X%.2X\n' `echo $IP | sed -e 's/\./ /g'`) + NEXT_IP_HEX=$(printf %.8X `echo $(( 0x$IP_HEX + 1 ))`) + NEXT_IP=$(printf '%d.%d.%d.%d\n' `echo $NEXT_IP_HEX | sed -r 's/(..)/0x\1 /g'`) + echo "$NEXT_IP" +} + +netconf() { + HN=$1 + IP=$2 + echo "Updating network config for ${HN} for IP ${IP}..." + sed -i.bak "/IPADDR/s/^IPADDR=.*$/IPADDR=${IP}/ ; \ + /NETMASK/s/^NETMASK=.*$/NETMASK=${NETMASK}/ ; \ + /PREFIX/s/^PREFIX=.*$/PREFIX=${PREFIX}/ ; \ + /GATEWAY/s/^GATEWAY=.*$/GATEWAY=${GATEWAY}/ " /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} + if [ -v DNS1 ]; then + echo "DNS1=${DNS1}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} + if [ -v DNS2 ]; then + echo "DNS2=${DNS2}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} + if [ -v DNS3 ]; then + echo "DNS3=${DNS3}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} + fi + fi + fi + # Write out the new hostname + echo ${HN,,}.${DOMAIN} > /mnt/sysimage/etc/hostname + # Append hostname to /etc/hosts + echo "${cmpip} ${iciccmp,,}.${DOMAIN} ${iciccmp,,}" >> /mnt/sysimage/etc/hosts + # write the LDAP hostname detail to /etc/hosts + echo "172.24.26.10 ldapsrv.ibmpoc.internal" >> /mnt/sysimage/etc/hosts + # copy the CA certificates + cp -a /etc/pki/tls/certs/oqsRootCA.cert /mnt/sysimage/etc/pki/ca-trust/source/anchors/oqsRootCA.pem + cp -a /etc/pki/tls/certs/oqsCA.cert /mnt/sysimage/etc/pki/ca-trust/source/anchors/oqsCA.pem +} + +growguest() { + # Add extra disk space to the guest. + # ICIC requirement is 80GB on the compute node. + # This means an extra 72-75GB on the compute. + # From the available disks, let's find out what space is available and grab + # space until we get what's needed. + + GUEST=$1 + SPACE=$2 + + # list the available volumes, filtering out anything smaller than half the size of the initial disk + mapfile -t dasds < <(smcli ivsqd -T IBMAUTO -q 2 -e 3 ${smapiauth} -n LINUX | awk -v size=$((mdsize / 2)) '{ if ($4 >= size) {print} }') + # run through a loop to accumulate disk space + devnum=201; dasdnum=0; + until [ ${SPACE} -eq 0 ]; do + # grab an available space + IFS=" " read -r -a dasd <<< ${dasds[$((dasdnum++))]} + IFS=" " avail=${dasd[3]} + # if there is no more DASD, avail will either not be a number or be blank + if [ -z "${avail}" ] || [ ! -z "${avail//[0-9]}" ]; then + echo "No DASD volume available, we are ${SPACE} too short" + break + fi + if [ "${avail}" -ge "${SPACE}" ]; then + # This disk has more free space than we need, just grab it + alloc=${SPACE} + SPACE=0 + else + # This disk doesn't have enough, grab all we can and we'll need to come back again + alloc=${avail} + SPACE=$(( SPACE - alloc )) + fi + # issue an smcli idicrd for the dasd + echo "Allocating ${GUEST} mdisk ${devnum} with space ${alloc} onto volume ${dasd[0]}..." + smcli idicrd -T ${GUEST} -a $((devnum++)) -d X -t AUTOV -n ${dasd[0]} -u 1 -s ${alloc} -m MR -f 1 ${smapiauth} + done +} + +# check if SMAPI is active +if ! systemctl is-active --quiet smapi.service ; then + echo "SMAPI is not operating, cannot continue" + exit 5 +else + echo "SMAPI listener active, continuing..." +fi + +# Set up the mount of the ICIC module content +cat <"/etc/auto.icic-install-${icicver}" +${icicpath} -fstype=squashfs,exec :${modpath} +EOF +cat <"/etc/auto.master.d/icic-install-${icicver}.autofs" +/- /etc/auto.icic-install-${icicver} +EOF +systemctl reload autofs + +if ! ls ${icicpath}/icicimg.txt 2>/dev/null ; then + echo "ICIC content module file could not be mounted!" + exit 6 +fi + +# tmpip was calculated as shown here, now it's passed in environment +# fetch our current IP address, and strip the netmask off the end +#myip=$(ip -br addr show dev encad0 | awk '{print $3}') # the devname in here has to be set using Ansible too +#tmpip=${myip%/*} +IFS=" "; + +# Look for what type of space we have: if we find no "3390-" then it's FBA. +if [ "$(smcli ivsqd -T IBMAUTO -q 3 -e 3 ${smapiauth} -n LINUX | grep 3390- | wc -l)" == "0" ]; then + disktype=FBA + mdsize=${mdsizefba} + mgtspace=${mgtspacefba} + cmpspace=${cmpspacefba} +else + disktype=ECKD + mdsize=${mdsizeckd} + mgtspace=${mgtspaceckd} + cmpspace=${cmpspaceckd} +fi + +# If VLAN is set, set up a VLAN parameter for the NICDEF statements +if [ ! -z ${VLAN} ]; then + vlanparm="VLAN ${VLAN}" +else + vlanparm="" +fi +# list the available volumes with sufficient free space +mapfile -t dasds < <(smcli ivsqd -T IBMAUTO -q 2 -e 3 ${smapiauth} -n LINUX | awk -v size=${mdsize} '{ if ($4 >= size) {print} }') + +# add the compute guest +smcli icd -T ${iciccmp} ${smapiauth} < <(sed -e "s/VLANPLACE/${vlanparm}/" -e "s/ICMPCMP1/${iciccmp}" /sys/bus/ccw/devices/0.0.1200/raw_track_access +if [ ${disktype} == "ECKD" ]; then + chccwdev --attribute raw_track_access=1 -e 1200 +fi +# Fetch the disk image file +icicimg=$(cat ${icicpath}/icicimg.txt 2>/dev/null) +if [ ${disktype} == "FBA" ]; then + icicimg=$(echo ${icicimg} | sed -e 's/-/-fba-/g') +fi +echo "Restoring ${icicimg%.*}..." +zcat ${icicpath}/${icicimg} | tee \ + >(dd of=/dev/$(lsdasd | grep 0.0.1200 | awk '{print $3}') bs=64k iflag=fullblock oflag=direct) +sync;sync;sync +sleep 1 + +# There should not be anything holding the disks open, so using the "safe" option should be enough to release them +until chccwdev -s 1200 ; do + echo "Waiting for DASD to release..." + sleep 1; +done + +# FIXME -- will get told IP via environment +cmpip=$(nextip ${tmpip}) +echo "Telling Ansible about our IP addresses and disk type..." +cat >> /opt/ansible/inventory/group_vars/all/runtime.yml <> /mnt/sysimage/root/.ssh/authorized_keys +cat /root/.ssh/id_rsa_icic.pub >> /mnt/sysimage/root/.ssh/authorized_keys +# Rewrite the URL for the ICICdeps +sed -i.bak "/baseurl/s/^baseurl=.*$/baseurl=http:\/\/172.24.26.1:8080\/ICICdeps/" /mnt/sysimage/etc/yum.repos.d/ICICdeps.repo + +# Done now... unmount, remove the VG, release the disk +umount /mnt/sysimage +vgchange -an rhel_rhel8img +sleep 1 +# There should not be anything holding the disks open, so using the "safe" option should be enough to release it +until chccwdev -s 1200 ; do + echo "Waiting for DASD to release..." + sleep 1; +done + +# The disk is finished with now. +cio_ignore -a 1200 +vmcp detach 1200 + +# Call function to grow the compute node +growguest ${iciccmp} ${cmpspace} + +# Add the host details to our local /etc/hosts +echo "${cmpip} ${iciccmp,,}.${DOMAIN} ${iciccmp,,}" >> /etc/hosts + +# set RACF OPERATIONS on the compute guest +echo "Adding RACF attribute to ${iciccmp}..." +ldapmodify -Zx -h LDAPSRV.ibmpoc.internal -p 389 -D racfid={{ ocp_smapi_user }},profiletype=user,o=ibmzvm -w '{{ ocp_smapi_password }}' <> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} - if [ -v DNS2 ]; then - echo "DNS2=${DNS2}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} - if [ -v DNS3 ]; then - echo "DNS3=${DNS3}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-${extnetdev} + /GATEWAY/s/^GATEWAY=.*$/GATEWAY=${GATEWAY}/ " /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-"${extnetdev}" + if [[ -v DNS1 ]]; then + echo "DNS1=${DNS1}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-"${extnetdev}" + if [[ -v DNS2 ]]; then + echo "DNS2=${DNS2}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-"${extnetdev}" + if [[ -v DNS3 ]]; then + echo "DNS3=${DNS3}" >> /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-"${extnetdev}" fi fi fi # Write out the new hostname - echo ${HN,,}.${DOMAIN} > /mnt/sysimage/etc/hostname + echo "${HN,,}"."${DOMAIN}" > /mnt/sysimage/etc/hostname + # Stop NetworkMangler from mangling DNS + sed -i 's/\[main\]/[main]\ndns=none/' /mnt/sysimage/etc/NetworkManager/NetworkManager.conf + # Set the correct DNS server (our internal one) + cat < /mnt/sysimage/etc/resolv.conf +# File NOT managed by NetworkManager +search ${DOMAIN} +nameserver 172.24.26.1 +EOFRES # Append hostnames to /etc/hosts - echo "${mgtip} ${icicmgt,,}.${DOMAIN} ${icicmgt,,}" >> /mnt/sysimage/etc/hosts - echo "${cmpip} ${iciccmp,,}.${DOMAIN} ${iciccmp,,}" >> /mnt/sysimage/etc/hosts + { echo "${mgtip} ${icicmgt,,}.${DOMAIN} ${icicmgt,,}"; + echo "${cmpip} ${iciccmp,,}.${DOMAIN} ${iciccmp,,}";} >> /mnt/sysimage/etc/hosts # write the LDAP hostname detail to /etc/hosts echo "172.24.26.10 ldapsrv.ibmpoc.internal" >> /mnt/sysimage/etc/hosts # copy the CA certificates @@ -91,16 +106,16 @@ growguest() { mapfile -t dasds < <(smcli ivsqd -T IBMAUTO -q 2 -e 3 ${smapiauth} -n LINUX | awk -v size=$((mdsize / 2)) '{ if ($4 >= size) {print} }') # run through a loop to accumulate disk space devnum=201; dasdnum=0; - until [ ${SPACE} -eq 0 ]; do + until [[ ${SPACE} -eq 0 ]]; do # grab an available space IFS=" " read -r -a dasd <<< ${dasds[$((dasdnum++))]} IFS=" " avail=${dasd[3]} # if there is no more DASD, avail will either not be a number or be blank - if [ -z "${avail}" ] || [ ! -z "${avail//[0-9]}" ]; then + if [[ -z "${avail}" ]] || [[ ! -z "${avail//[0-9]}" ]]; then echo "No DASD volume available, we are ${SPACE} too short" break fi - if [ "${avail}" -ge "${SPACE}" ]; then + if [[ "${avail}" -ge "${SPACE}" ]]; then # This disk has more free space than we need, just grab it alloc=${SPACE} SPACE=0 @@ -115,27 +130,35 @@ growguest() { done } -# pause until SMAPI is active, then a little more -until vmcp Q VSMREQIN 2>/dev/null ; do - echo "Pausing to wait for SMAPI listener" - sleep 10 -done -sleep 10 -smcli qafl -T IBMAUTO ${smapiauth} -if [ $? -ne 0 ]; then +# check if SMAPI is active +if ! systemctl is-active --quiet smapi.service ; then echo "SMAPI is not operating, cannot continue" exit 5 else echo "SMAPI listener active, continuing..." fi +# Set up the mount of the ICIC module content +cat <"/etc/auto.icic-install-${icicver}" +${icicpath} -fstype=squashfs,exec :${modpath} +EOF +cat <"/etc/auto.master.d/icic-install-${icicver}.autofs" +/- /etc/auto.icic-install-${icicver} +EOF +systemctl reload autofs + +if ! ls ${icicpath}/icicimg.txt 2>/dev/null ; then + echo "ICIC content module file could not be mounted!" + exit 6 +fi + # fetch our current IP address, and strip the netmask off the end myip=$(ip -br addr show dev encad0 | awk '{print $3}') # the devname in here has to be set using Ansible too tmpip=${myip%/*} IFS=" "; # Look for what type of space we have: if we find no "3390-" then it's FBA. -if [ "$(smcli ivsqd -T IBMAUTO -q 3 -e 3 ${smapiauth} -n LINUX | grep 3390- | wc -l)" == "0" ]; then +if [[ "$(smcli ivsqd -T IBMAUTO -q 3 -e 3 ${smapiauth} -n LINUX | grep 3390- | wc -l)" == "0" ]]; then disktype=FBA mdsize=${mdsizefba} mgtspace=${mgtspacefba} @@ -147,8 +170,22 @@ else cmpspace=${cmpspaceckd} fi +# Set up the mount of the ICIC module content +cat <"/etc/auto.icic-install-${icicver}" +${icicpath} -fstype=squashfs,exec :/opt/content/icic-${icicver}-${disktype,,}.esi +EOFAU +cat <"/etc/auto.master.d/icic-install-${icicver}.autofs" +/- /etc/auto.icic-install-${icicver} +EOFAM +systemctl reload autofs && sleep 2 + +if ! ls ${icicpath}/icicimg.txt 2>/dev/null ; then + echo "ICIC content module file could not be mounted!" + exit 6 +fi + # If VLAN is set, set up a VLAN parameter for the NICDEF statements -if [ ! -z ${VLAN} ]; then +if [[ ! -z ${VLAN} ]]; then vlanparm="VLAN ${VLAN}" else vlanparm="" @@ -160,8 +197,8 @@ smcli icd -T ${icicmgt} ${smapiauth} < <(sed "s/VLANPLACE/${vlanparm}/" /sys/bus/ccw/devices/0.0.1200/raw_track_access -if [ ${disktype} == "ECKD" ]; then +if [[ ${disktype} == "ECKD" ]]; then chccwdev --attribute raw_track_access=1 -e 1200,1201 +else + chccwdev -e 1200,1201 fi # Fetch the disk image file -icicimg=$(curl ${fastvmurl}/ICICIMG.txt 2>/dev/null) -if [ ${disktype} == "FBA" ]; then - icicimg=$(echo ${icicimg} | sed -e 's/-/-fba-/g') +icicimg=$(cat "${icicpath}"/icicimg.txt 2>/dev/null) +if [[ ${disktype} == "FBA" ]]; then + icicimg=$(echo "${icicimg}" | sed -e 's/-/-fba-/g') fi -echo "Fetching ${icicimg%.*} for restoration..." -curl ${fastvmurl}/${icicimg} | zcat | tee \ +echo "Restoring ${icicimg%.*}..." +zcat ${icicpath}/${icicimg} | tee \ >(dd of=/dev/$(lsdasd | grep 0.0.1200 | awk '{print $3}') bs=64k iflag=fullblock oflag=direct) \ | dd of=/dev/$(lsdasd | grep 0.0.1201 | awk '{print $3}') bs=64k iflag=fullblock oflag=direct sync;sync;sync @@ -216,8 +255,8 @@ until chccwdev -s 1200,1201 ; do sleep 1; done -mgtip=$(nextip ${tmpip}) -cmpip=$(nextip ${mgtip}) +mgtip=$(nextip "${tmpip}") +cmpip=$(nextip "${mgtip}") echo "Telling Ansible about our IP addresses and disk type..." cat >> /opt/ansible/inventory/group_vars/all/runtime.yml <> /mnt/sysimage/root/.ssh/authorized_keys -# Rewrite the URL for the ICICdeps, jic -sed -i.bak "/baseurl/s/^baseurl=.*$/baseurl=http:\/\/172.24.26.1:8080\/ICICdeps/" /mnt/sysimage/etc/yum.repos.d/ICICdeps.repo +cat /root/.ssh/id_rsa_icic.pub >> /mnt/sysimage/root/.ssh/authorized_keys +# Remove the old ICICdeps repo definition, if present +rm -f /mnt/sysimage/etc/yum.repos.d/ICICdeps.repo +# Copy the repo files from the ELAN +for FN in $(cat /var/www/html/.secret/.repofiles); do + cp /etc/yum.repos.d/${FN} /mnt/sysimage/etc/yum.repos.d/${FN} +done # Done now... unmount, remove the VG, release the disk umount /mnt/sysimage @@ -258,8 +304,10 @@ until chccwdev -s 1200 ; do done # Second, the compute: -if [ ${disktype} == "ECKD" ]; then +if [[ ${disktype} == "ECKD" ]]; then chccwdev --attribute raw_track_access=0 -e 1201 +else + chccwdev -e 1201 fi # give the VG a little time to settle before mounting sleep 3 @@ -268,17 +316,22 @@ vgchange -ay # derive the name of the root LV in the image rootLV=$(lvs | grep "^[[:space:]]*root.*rhel8img" | awk '{print $1;}') # go ahead and mount -mount /dev/rhel_rhel8img/${rootLV} /mnt/sysimage +mount /dev/rhel_rhel8img/"${rootLV}" /mnt/sysimage # change the interface details -netconf ${iciccmp} ${cmpip} +netconf "${iciccmp}" "${cmpip}" # Update the address for the internal interface sed -i.bak '/IPADDR/s/11/12/' /mnt/sysimage/etc/sysconfig/network-scripts/ifcfg-encad8 # Purge the old SSH host keys for regeneration at boot rm -f /mnt/sysimage/etc/ssh/ssh_host_* -# Add the SSH key from here to the management node +# Add the SSH keys from here to the management node cat /root/.ssh/id_ed25519.pub >> /mnt/sysimage/root/.ssh/authorized_keys -# Rewrite the URL for the ICICdeps -sed -i.bak "/baseurl/s/^baseurl=.*$/baseurl=http:\/\/172.24.26.1:8080\/ICICdeps/" /mnt/sysimage/etc/yum.repos.d/ICICdeps.repo +cat /root/.ssh/id_rsa_icic.pub >> /mnt/sysimage/root/.ssh/authorized_keys +# Remove the old ICICdeps repo definition, if present +rm -f /mnt/sysimage/etc/yum.repos.d/ICICdeps.repo +# Copy the repo files from the ELAN +for FN in $(cat /var/www/html/.secret/.repofiles); do + cp /etc/yum.repos.d/${FN} /mnt/sysimage/etc/yum.repos.d/${FN} +done # Done now... unmount, remove the VG, release the disk umount /mnt/sysimage @@ -303,14 +356,15 @@ growguest ${iciccmp} ${cmpspace} echo "${mgtip} ${icicmgt,,}.${DOMAIN} ${icicmgt,,}" >> /etc/hosts echo "${cmpip} ${iciccmp,,}.${DOMAIN} ${iciccmp,,}" >> /etc/hosts -# Fetch the ICIC dependencies content and expand it -icicdep=$(curl ${fastvmurl}/ICICDEP.txt 2>/dev/null) -echo "Fetching ${icicdep%.*} for ICIC dependencies..." -curl ${fastvmurl}/${icicdep} 2>/dev/null | tar -C /var/www/html/ -zx +# No longer doing this! +## Fetch the ICIC dependencies content and expand it +#icicdep=$(cat ${icicpath}/icicdep.txt 2>/dev/null) +#echo "Setting up ${icicdep%.*} for ICIC dependencies..." +#tar zxf ${icicpath}/${icicdep} -C /var/www/html/ 2>/dev/null # IPLing the management guest echo "Issuing a SMAPI IPL for the management guest..." -smcli ia -T ${icicmgt} ${smapiauth} +smcli ia -T "${icicmgt}" "${smapiauth}" sleep 1 echo "Starting the Ansible configuration of management guest..." cd /opt/ansible && ansible-playbook -i inventory -v setup-icic-management.yml @@ -318,7 +372,7 @@ cd /opt/ansible && ansible-playbook -i inventory -v setup-icic-management.yml # set RACF OPERATIONS on the compute guest echo "Adding RACF attribute to ${iciccmp}..." ldapmodify -Zx -h LDAPSRV.ibmpoc.internal -p 389 -D racfid={{ ocp_smapi_user }},profiletype=user,o=ibmzvm -w '{{ ocp_smapi_password }}' </dev/null - warn: false - register: icicfile - - name: extract the remote archive - unarchive: - src: "{% raw %}{{ fastvmurl }}/{{ icicfile.stdout }}{% endraw %}" - dest: /tmp/ - remote_src: yes - - name: pull the version from the supplied file name - set_fact: - icicver: '{% raw %}{{ icicfile.stdout.split("-")[4] | splitext | first }}{% endraw %}' + - name: Install dependencies of the installer + dnf: + state: present + name: + - gettext + - gettext-libs + - java-1.8.0-openjdk + - java-1.8.0-openjdk-headless + + - name: Extract the installation code + unarchive: + src: "{% raw %}{{ item }}{% endraw %}" + dest: /tmp/ + remote_src: false + with_fileglob: "{% raw %}{{ icicpath }}/icic-install-*-{{ icicver }}*.tgz{% endraw %}" - - name: run the silent installation script + - name: Run the silent installation script command: chdir: /tmp/icic-{% raw %}{{ icicver }}{% endraw %}/ cmd: ./install -s -z -e -c environment: HOST_INTERFACE: encad0 - - name: make sure CA certificates are onboarded + - name: Make sure CA certificates are onboarded block: - - name: run update-ca-trust + - name: Run update-ca-trust command: cmd: update-ca-trust - - name: run c_rehash + - name: Run c_rehash command: cmd: openssl rehash /etc/pki/ca-trust/source/anchors/ - - name: install the ICIC certificate(s) + - name: Install the ICIC certificate(s) block: - - name: copy the private key + - name: Copy the private key copy: src: "/etc/pki/tls/private/icic.pem" dest: "/etc/pki/tls/private/icic.key" @@ -86,7 +106,7 @@ owner: root group: root mode: "0440" - - name: copy the certificate(s) + - name: Copy the certificate(s) template: src: icic-cert-bundle.j2 dest: "/etc/pki/tls/certs/icic.crt" @@ -94,6 +114,20 @@ owner: root group: root mode: "0644" - - name: restart the ICIC services + - name: Restart the ICIC services command: cmd: /opt/ibm/icic/bin/icic-services restart + +- name: Set the DNS for the ICIC management node correctly + hosts: s390x_bastion_workstation + tasks: + - name: Add or modify A for ICIC management (make a hack) + nsupdate: + key_name: "{% raw %}{{ esigroup | lower }}-key{% endraw %}" + key_secret: "{% raw %}{{ tsigkey }}{% endraw %}" + key_algorithm: "hmac-sha256" + server: "172.24.26.1" + zone: "{% raw %}{{ esigroup | lower }}.{{ cluster_base_domain }}{% endraw %}" + record: icicmgt0 + type: A + value: "{% raw %}{{ icic_management_ip_address }}{% endraw %}" \ No newline at end of file diff --git a/local-playbooks/roles/setup-ocp-deployer/tasks/main.yml b/local-playbooks/roles/setup-ocp-deployer/tasks/main.yml index 458d43d..831a39b 100644 --- a/local-playbooks/roles/setup-ocp-deployer/tasks/main.yml +++ b/local-playbooks/roles/setup-ocp-deployer/tasks/main.yml @@ -11,8 +11,8 @@ firewalld: port: 6000/udp state: enabled - permanent: yes - immediate: yes + permanent: true + immediate: true zone: public - name: Add ntp to firewall diff --git a/local-playbooks/roles/setup-ocp-deployer/templates/group-vars.yml.j2 b/local-playbooks/roles/setup-ocp-deployer/templates/group-vars.yml.j2 index 482edf5..bbc1f1d 100644 --- a/local-playbooks/roles/setup-ocp-deployer/templates/group-vars.yml.j2 +++ b/local-playbooks/roles/setup-ocp-deployer/templates/group-vars.yml.j2 @@ -99,6 +99,13 @@ coreos_kargs_net: coreos_kargs_extra: zvm: "{{ zvm_rd_znet }} {{ zvm_rd_dasd }}" +mqttgate_id: "{{ mqttgate_id }}" +mqttgate_pass: "{{ mqttgate_pass }}" +mqtt_promtail_id: "{{ mqtt_promtail_id }}" +mqtt_promtail_pass: "{{ mqtt_promtail_pass }}" +mqtt_consend_id: "{{ mqtt_consend_id }}" +mqtt_consend_pass: "{{ mqtt_consend_pass }}" + # The IP configuration will write out the cluster_base_domain variable. # Look for runtime.yml in the group_vars/all directory (where this file is). # If this is the only file in the 'all' directory then the IP configuration did not run :/ diff --git a/local-playbooks/roles/setup-web-resources/tasks/main.yml b/local-playbooks/roles/setup-web-resources/tasks/main.yml index 79e63c4..a8e1e51 100644 --- a/local-playbooks/roles/setup-web-resources/tasks/main.yml +++ b/local-playbooks/roles/setup-web-resources/tasks/main.yml @@ -6,6 +6,7 @@ - php-json - php-ldap - php-mbstring + - php-Smarty state: present - name: Create configuration for our external non-SSL VirtualHost @@ -106,16 +107,22 @@ Substitute s|/([A-F0-9]+)/([A-F0-9]+)/\"\+lnam|/perftk/$1/$2/\"\+lnam| # Reverse-proxy configuration for cross-system stats ProxyPassMatch "^/zvmstats-(.*).json$" "https://$1:8443/zvmstats.json" + RewriteEngine On + RewriteCond %{HTTP:Upgrade} websocket [NC] + RewriteCond %{HTTP:Connection} upgrade [NC] + RewriteRule ^/grafana/?(.*) "ws://127.0.0.1:3000/grafana/$1" [P,L] + ProxyPassMatch "^/grafana/(.*)$" "http://localhost:3000/grafana/$1" + ProxyPassReverse "/grafana/" "http://localhost:3000/grafana/" - name: Download and unwind LDAP tools block: - - name: Download and unzip Smarty PHP template engine - unarchive: - src: "https://github.com/smarty-php/smarty/archive/v3.1.36.tar.gz" - dest: "/usr/local/bin/" - creates: "/usr/local/bin/smarty-3.1.36/" - remote_src: yes - environment: "{{ local_proxy_env | default(omit) }}" +# - name: Download and unzip Smarty PHP template engine +# unarchive: +# src: "https://github.com/smarty-php/smarty/archive/v3.1.36.tar.gz" +# dest: "/usr/local/bin/" +# creates: "/usr/local/bin/smarty-3.1.36/" +# remote_src: yes +# environment: "{{ local_proxy_env | default(omit) }}" - name: Download and unzip self-service-password unarchive: @@ -143,20 +150,15 @@ dest: "{{ webroot }}/w3ds" creates: "{{ webroot }}/w3ds/w3ds.js" - - name: Download and unzip site structure - unarchive: - src: "/opt/resources/{{ elan_web_pkgname }}.zip" - dest: "{{ webroot }}/" - creates: "{{ webroot }}/{{ elan_web_pkgname }}/{{ meta_header_file }}" - - - name: Move downloaded site directory # noqa no-free-form - shell: - cmd: | - rm -rf {{ webroot }}/{scripts,justgage} \ - rm -rf {{ webroot }}/{{ elan_web_pkgname }}/{build,tests,.travis.yml} \ - && mv {{ webroot }}/{{ elan_web_pkgname }}/* {{ webroot }}/ \ - && rm -rf {{ webroot }}/{{ elan_web_pkgname }} - removes: "{{ webroot }}/{{ elan_web_pkgname }}" + - name: Install the web site RPM + yum: + name: elan-web + state: latest + update_cache: yes + retries: 3 + delay: 6 + register: result + until: result is not failed - name: Set permission on the HTML files to enable Includes file: @@ -233,7 +235,7 @@ name: MAILTO value: root - - name: Create/set permission on directories - secrets, Smarty template cache, content, and install + - name: Create/set permission on directories - secrets, Smarty template cache, and install file: path: "{{ item }}" owner: apache @@ -243,11 +245,19 @@ loop: - "{{ webroot }}/.secret" - "{{ webroot }}/service-desk-racf-master/templates_c" - - /opt/content - "{{ webroot }}/install" - "{{ webroot }}/.cluster" - "{{ webroot }}/.clustertmp" + - name: Create/set permission on content directory + file: + path: /opt/content + owner: apache + group: support + mode: 0770 + setype: httpd_sys_rw_content_t + state: directory + - name: Create and set perms on incrond trigger files file: path: "{{ webroot }}/.secret/{{ item }}" @@ -258,7 +268,7 @@ state: touch loop: - ".ocp4_pull_secret" - - ".fastvmurl" + - ".icicver" # - name: fix path to Smarty in config file # lineinfile: diff --git a/local-playbooks/roles/setup-web-resources/templates/service-desk-racf/config.inc.local.php.j2 b/local-playbooks/roles/setup-web-resources/templates/service-desk-racf/config.inc.local.php.j2 index ab20cf4..4e4400f 100644 --- a/local-playbooks/roles/setup-web-resources/templates/service-desk-racf/config.inc.local.php.j2 +++ b/local-playbooks/roles/setup-web-resources/templates/service-desk-racf/config.inc.local.php.j2 @@ -1,6 +1,6 @@