diff --git a/lib/hacluster.pm b/lib/hacluster.pm index 743ad8b1bf56..6e505c7eb18c 100644 --- a/lib/hacluster.pm +++ b/lib/hacluster.pm @@ -83,6 +83,8 @@ our @EXPORT = qw( crm_wait_for_maintenance crm_check_resource_location generate_lun_list + show_cluster_parameter + set_cluster_parameter ); =head1 SYNOPSIS @@ -1460,4 +1462,58 @@ sub generate_lun_list { $index += $num_luns; } } + + +=head2 set_cluster_parameter + + set_cluster_parameter(resource=>'Totoro', parameter=>'neighbour', value=>'my'); + +Manage HA cluster parameter using crm shell. + +=over + +=item * B: Resource containing parameter + +=item * B: Parameter name + +=item * B: Target parameter value + +=back + +=cut + +sub set_cluster_parameter { + my (%args) = @_; + for my $arg ('resource', 'parameter', 'value') { + croak("Mandatory argument '$arg' missing.") unless $arg; + } + my $cmd = join(' ', 'crm', 'resource', 'param', $args{resource}, 'set', $args{parameter}, $args{value}); + assert_script_run($cmd); +} + +=head2 show_cluster_parameter + + show_cluster_parameter(resource=>'Totoro', parameter=>'neighbour'); + +Show cluster parameter value using CRM shell. + +=over + +=item * B: Resource containing parameter + +=item * B: Parameter name + +=back + +=cut + +sub show_cluster_parameter { + my (%args) = @_; + for my $arg ('resource', 'parameter') { + croak("Mandatory argument '$arg' missing.") unless $arg; + } + my $cmd = join(' ', 'crm', 'resource', 'param', $args{resource}, 'show', $args{parameter}); + return script_output($cmd); +} + 1; diff --git a/lib/saputils.pm b/lib/saputils.pm index db4691135113..c30735aeca64 100644 --- a/lib/saputils.pm +++ b/lib/saputils.pm @@ -20,6 +20,8 @@ our @EXPORT = qw( calculate_hana_topology check_hana_topology check_crm_output + get_primary_node + get_failover_node ); =head1 SYNOPSIS @@ -176,4 +178,47 @@ sub check_crm_output { return (($resource_starting != 1) && ($failed_actions != 1) ? 1 : 0); } +=head2 get_primary_node + get_primary_node(topology_data=>$topology_data); + + Returns hostname of current primary node obtained from B output. + +=over + +=item B - Output from `calculate_hana_topology()` function + +=back +=cut + +sub get_primary_node { + my (%args) = @_; + croak("Argument missing") unless $args{topology_data}; + my $topology = $args{topology_data}; + for my $db (keys %$topology) { + return $db if $topology->{$db}{sync_state} eq 'PRIM'; + } +} + +=head2 get_failover_node + get_failover_node(topology_data=>$topology_data); + + Returns hostname of current failover (replica) node obtained from B output. + Returns node hostname even if it's in 'SFAIL' state. + +=over + +=item B - Output from `calculate_hana_topology()` function + +=back +=cut + +sub get_failover_node { + my (%args) = @_; + croak("Argument missing") unless $args{topology_data}; + my $topology = $args{topology_data}; + for my $db (keys %$topology) { + return $db if grep /$topology->{$db}{sync_state}/, ('SOK', 'SFAIL'); + } +} + 1; diff --git a/lib/sles4sap/console_redirection.pm b/lib/sles4sap/console_redirection.pm index 992834987b5c..010eeb3cef3b 100644 --- a/lib/sles4sap/console_redirection.pm +++ b/lib/sles4sap/console_redirection.pm @@ -129,7 +129,11 @@ sub connect_target_to_serial { my $redirect_port = get_required_var("QEMUPORT") + 1; my $redirect_ip = get_var('QEMU_HOST_IP', '10.0.2.2'); my $redirect_opts = "-R $redirect_port:$redirect_ip:$redirect_port"; - my $switch_root_cmd = $args{switch_root} ? 'sudo su -' : ''; + my $switch_root_cmd = $args{switch_root} && $args{ssh_user} ne 'root' ? 'sudo su -' : ''; + if ($args{switch_root} && $args{ssh_user} eq 'root') { + record_info('WARNING', 'No need to use switch_root when ssh_user is root. Omitting "sudo su-"'); + } + my $ssh_cmd = join(' ', 'ssh -t', $ssh_opt, $redirect_opts, "$args{ssh_user}\@$args{destination_ip}", $switch_root_cmd, "2>&1 | tee -a /dev/$serialdev" ); diff --git a/lib/sles4sap/database_hana.pm b/lib/sles4sap/database_hana.pm new file mode 100644 index 000000000000..ec4a11ea4c3e --- /dev/null +++ b/lib/sles4sap/database_hana.pm @@ -0,0 +1,199 @@ +# SUSE's openQA tests +# +# Copyright 2017-2024 SUSE LLC +# SPDX-License-Identifier: FSFAP +# Maintainer: QE-SAP + +package sles4sap::database_hana; +use strict; +use warnings; +use testapi; +use Carp qw(croak); +use Exporter qw(import); +use saputils qw(check_crm_output get_primary_node get_failover_node calculate_hana_topology); +use hacluster qw($crm_mon_cmd); +use sles4sap::sapcontrol; + +our @EXPORT = qw( + hdb_stop + wait_for_failed_resources + wait_for_takeover + register_replica + get_node_roles + find_hana_resource_name +); + + +=head1 SYNOPSIS + +Package contains functions for interacting with hana database and related actions. + +=cut + +=head2 hdb_stop + + hdb_stop(instance_id=>'00', [switch_user=>'sidadm', command=>'kill']); + +Stop hana database using C command. Function expects to be executed as sidadm, however you can use B +to execute command using C as a different user. The user needs to have correct permissions for performing +requested action. +Function waits till all DB processes are stopped. + +=over + +=item * B: Database instance ID. Mandatory. + +=item * B: Execute command as specified user with help of C. Default: undef + +=item * B: HDB command to trigger. Default: stop + +=back + +=cut + +sub hdb_stop { + my (%args) = @_; + my $stop_timeout = 600; + $args{command} //= 'stop'; + croak("Command '$args{command}' is not supported.") unless grep(/$args{command}/, ('kill', 'stop')); + + my $method_cmd = ($args{command} eq 'kill') ? 'kill -x' : $args{command}; + my $sudo_su = $args{switch_user} ? "sudo su - $args{switch_user} -c" : ''; + my $cmd = join(' ', $sudo_su, '"', 'HDB', $method_cmd, '"'); + record_info('HDB stop', "Executing '$cmd' on " . script_output('hostname')); + assert_script_run($cmd, timeout => $stop_timeout); + + # Wait Hana processes to stop + sapcontrol_process_check(instance_id => $args{instance_id}, expected_state => 'stopped', wait_for_state => 'yes', timeout => $stop_timeout); + record_info('DB stopped'); +} + +=head2 wait_for_failed_resources + + wait_for_failed_resources(); + +Wait until 'crm_mon' starts showing failed resources. This can be used as first indicator of a started failover. + +=cut + +sub wait_for_failed_resources { + my $timeout = 300; + my $start_time = time; + while (check_crm_output(input => script_output($crm_mon_cmd, quiet => 1))) { + sleep 30; + die("Cluster did not register any failed resource within $timeout sec") if (time - $timeout > $start_time); + } + record_info('CRM info', "Cluster registered failed resources\n" . script_output($crm_mon_cmd, quiet => 1)); +} + +=head2 wait_for_takeover + + wait_for_takeover(target_node=>'expeliarmus'); + +Waits until B performs takeover and reaches 'PRIM' state. + +=over + +=item * B: Node hostname which is expected to take over. + +=back + +=cut + +sub wait_for_takeover { + my (%args) = @_; + my $timeout = 300; + my $start_time = time; + my $topology; + my $takeover_ok; + until ($takeover_ok) { + die("Node '$args{target_node}' did not take over within $timeout sec") if (time - $timeout > $start_time); + $topology = calculate_hana_topology(input => script_output('SAPHanaSR-showAttr --format=script')); + $takeover_ok = 1 if (get_primary_node(topology_data => $topology) eq $args{target_node}); + sleep 30; + } +} + +=head2 register_replica + + register_replica(target_hostname=>'Dumbledore', instance_id=>'00' [, switch_user=>'hdbadm']); + +Executes replica node registration after failover using 'hdbnsutil' command. Node must be stopped, otherwise command fails. + +=over + +=item * B: Hostname of the node that should be registered as replica + +=item * B: Instance ID + +=item * B: Execute command as specified user with help of C. Default: undef + + +=back + +=cut + +sub register_replica { + my (%args) = @_; + croak('Argument "$replica_hostname" missing') unless $args{target_hostname}; + my $topology = calculate_hana_topology(input => script_output('SAPHanaSR-showAttr --format=script')); + my $primary_hostname = get_primary_node(topology_data => $topology); + croak("Primary node '$primary_hostname' not found in 'SAPHanaSR-showAttr' output") unless $primary_hostname; + croak("Replica node '$args{target_hostname}' not found in 'SAPHanaSR-showAttr' output") unless + $topology->{$args{target_hostname}}; + + my $cmd = join(' ', + 'hdbnsutil', + '-sr_register', + "--remoteHost=$primary_hostname", + "--remoteInstance=$args{instance_id}", + "--replicationMode=$topology->{$args{target_hostname}}{srmode}", + "--operationMode=$topology->{$args{target_hostname}}{op_mode}", + "--name=$topology->{$args{target_hostname}}{site}", + '--online'); + $cmd = join(' ', 'sudo', 'su', '-', $args{switch_user}, '-c', '"', $cmd, '"') if $args{switch_user}; + assert_script_run($cmd); + record_info('HANA REG', "Site '$topology->{$args{target_hostname}}{site}' registered as replica"); +} + +=head2 get_node_roles + + get_node_roles(); + +Returns B containing current status of Hana cluster node roles by parsing 'SAPHanaSR-showAttr' output. +Example: + {primary_node=>'Harry', failover_node='Potter'} + +=cut + +sub get_node_roles { + my $topology = calculate_hana_topology(input => script_output('SAPHanaSR-showAttr --format=script')); + my %result = ( + primary_node => get_primary_node(topology_data => $topology), + failover_node => get_failover_node(topology_data => $topology)); + return (\%result); +} + +=head2 find_hana_resource_name + + find_hana_resource_name(); + +Finds SAP Hana primitive resource name by listing primitives with type 'ocf:suse:SAPHana'. + +=cut + +sub find_hana_resource_name { + foreach (split("\n", script_output('crm configure show related:ocf:suse:SAPHana | grep primitive'))) { + # split primitive line "primitive rsc_SAPHana_HDB_HDB00 ocf:suse:SAPHana" + my @aux = split(/\s+/, $_); + if ($aux[2] and $aux[2] eq 'ocf:suse:SAPHana') { + # additional check if returned HANA resource exists + assert_script_run("crm resource status $aux[1]"); + return $aux[1]; + } + } + # Return empty string if no resource found + return ''; +} + +1; diff --git a/lib/sles4sap/sap_host_agent.pm b/lib/sles4sap/sap_host_agent.pm new file mode 100644 index 000000000000..dcc441cc373a --- /dev/null +++ b/lib/sles4sap/sap_host_agent.pm @@ -0,0 +1,101 @@ +# SUSE's openQA tests +# +# Copyright 2017-2024 SUSE LLC +# SPDX-License-Identifier: FSFAP +# +# Summary: Functions for SAP tests +# Maintainer: QE-SAP + +package sles4sap::sap_host_agent; +use strict; +use warnings; +use testapi; +use Carp qw(croak); +use Exporter qw(import); + +our @EXPORT = qw( + saphostctrl_list_databases + parse_instance_name +); + +my $saphostctrl = '/usr/sap/hostctrl/exe/saphostctrl'; + +=head1 SYNOPSIS + +Package with functions related to interaction with SAP Host Agent (Command B). Those can be used for collecting +data about instances and performing various operations. Keep in mind that command needs to be executed using either +B or B. + +=cut + +=head2 saphostctrl_list_databases + + saphostctrl_list_databases([as_root=>1]); + +Uses C to get list of all databases residing on host. Returns parsed output as B. +Data for each DB is contained in a B +Example: +$VAR1 = { + 'Hostname' => 'qesdhdb01l029', + 'Release' => '2.00.075.00.1702888292', + 'Vendor' => 'HDB', # HDB = SAP hana database (not SID) + 'Type' => 'hdb', # type of hana DB - hdb, mdc (multitenant), systemdb + 'Instance name' => 'PRD00' + }; +$VAR2 = { + 'Hostname' => 'qesdhdb01l029', + 'Type' => 'hdb', + 'Release' => '2.00.075.00.1702888292', + 'Instance name' => 'QAS01', + 'Vendor' => 'HDB' + }; + +=over + +=item * B: Execute command using sudo. Default: false + +=back +=cut + +sub saphostctrl_list_databases { + my (%args) = @_; + assert_script_run("[ -f $saphostctrl ]"); + + # command returns data for each DB in new line = one array entry for each DB + my $sudo = $args{as_root} ? 'sudo' : ''; + my $cmd = join(' ', $sudo, $saphostctrl, '-function', 'ListDatabases', '| grep Instance'); + my @output = split("\n", script_output($cmd, timeout => 180)); + + # create hash with data from each array entry + @output = map { { split(/,\s|:\s/, $_) } } @output; + + my @result; + # change hash keys to lower case and replace spaces with underscore + for my $entry (@output) { + push(@result, {map { lc($_ =~ s/\s/_/gr) => $entry->{$_} } keys %$entry}); + } + return (\@result); +} + +=head2 parse_instance_name + + parse_instance_name($instance_name); + +Splits instance name into B and B. Example: DBH01 -> sid=DBH, id=01 + +=over + +=item * B<$instance_name>: Instance name + +=back + +=cut + +sub parse_instance_name { + my ($instance_name) = @_; + croak("Invalid instance name: $instance_name\nInstance name is a combination of SID and instance ID.") if + length($instance_name) != 5 or grep(/\s|[a-z]|\W/, $instance_name); + my @result = $instance_name =~ /(.{3})(.{2})/s; + return (\@result); +} + diff --git a/schedule/sles4sap/sap_deployment_automation_framework/hanasr_redirected.yml b/schedule/sles4sap/sap_deployment_automation_framework/hana_sr_primary_failover.yml similarity index 91% rename from schedule/sles4sap/sap_deployment_automation_framework/hanasr_redirected.yml rename to schedule/sles4sap/sap_deployment_automation_framework/hana_sr_primary_failover.yml index 3e63f42af9eb..07b54c630153 100644 --- a/schedule/sles4sap/sap_deployment_automation_framework/hanasr_redirected.yml +++ b/schedule/sles4sap/sap_deployment_automation_framework/hana_sr_primary_failover.yml @@ -12,4 +12,4 @@ schedule: - sles4sap/sap_deployment_automation_framework/prepare_ssh_config - sles4sap/redirection_tests/redirection_check - sles4sap/redirection_tests/hana_cluster_check - - sles4sap/sap_deployment_automation_framework/cleanup + - sles4sap/redirection_tests/hanasr_schedule_tests diff --git a/t/11_hacluster.t b/t/11_hacluster.t index d3d0f3d27b04..4a8e7b8b3fe7 100644 --- a/t/11_hacluster.t +++ b/t/11_hacluster.t @@ -242,4 +242,31 @@ subtest '[crm_check_resource_location]' => sub { $hostname, "Return correct hostname: $hostname"; }; +subtest '[set_cluster_parameter]' => sub { + my $hacluster = Test::MockModule->new('hacluster', no_auto => 1); + my @calls; + $hacluster->redefine(assert_script_run => sub { @calls = @_; return; }); + + set_cluster_parameter(resource => 'Hogwarts', parameter => 'RoomOfRequirement', value => 'open'); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /crm/, @calls), 'Execute "crm" command.'); + ok((grep /resource param Hogwarts/, @calls), 'Call "resource" option'); + ok((grep /set/, @calls), 'Specify "set" action'); + ok((grep /RoomOfRequirement open/, @calls), 'Specify parameter name'); +}; + + +subtest '[show_cluster_parameter]' => sub { + my $hacluster = Test::MockModule->new('hacluster', no_auto => 1); + my @calls; + $hacluster->redefine(script_output => sub { @calls = @_; return 'false'; }); + + show_cluster_parameter(resource => 'Hogwarts', parameter => 'RoomOfRequirement'); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /crm/, @calls), 'Execute "crm" command.'); + ok((grep /resource param Hogwarts/, @calls), 'Call "resource" option'); + ok((grep /show/, @calls), 'Specify "show" action'); + ok((grep /RoomOfRequirement/, @calls), 'Specify parameter name'); +}; + done_testing; diff --git a/t/18_saputils.t b/t/18_saputils.t index 0534f438f048..bdadc0d62db5 100644 --- a/t/18_saputils.t +++ b/t/18_saputils.t @@ -256,4 +256,20 @@ subtest '[check_crm_output] starting and failed' => sub { ok $ret eq 0, "Ret:$ret has to be 0"; }; +subtest '[get_primary_node] starting and failed' => sub { + my $mock_input = { + hana_node_01 => {sync_state => 'PRIM'}, + hana_node_02 => {sync_state => 'SOK'} + }; + is get_primary_node(topology_data => $mock_input), 'hana_node_01', 'Return correct primary node name'; +}; + +subtest '[get_failover_node] starting and failed' => sub { + my $mock_input = { + hana_node_01 => {sync_state => 'PRIM'}, + hana_node_02 => {sync_state => 'SOK'} + }; + is get_failover_node(topology_data => $mock_input), 'hana_node_02', 'Return correct primary node name'; +}; + done_testing; diff --git a/t/31_sap_host_agent.t b/t/31_sap_host_agent.t new file mode 100644 index 000000000000..dfbfc5d6470d --- /dev/null +++ b/t/31_sap_host_agent.t @@ -0,0 +1,65 @@ +use strict; +use warnings; +use Test::More; +use Test::Exception; +use Test::Warnings; +use Test::MockModule; +use testapi; +use sles4sap::sap_host_agent; + +subtest '[saphostctrl_list_databases] Verify command compilation' => sub { + my $saphostctrl_output = 'Instance name: PRD00, Hostname: qesdhdb01l029, Vendor: HDB, Type: hdb, Release: 42'; + my $mock = Test::MockModule->new('sles4sap::sap_host_agent', no_auto => 1); + my @calls; + $mock->redefine(script_output => sub { push @calls, $_[0]; return $saphostctrl_output; }); + $mock->redefine(assert_script_run => sub { return 0; }); + + saphostctrl_list_databases(); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /saphostctrl/, @calls), 'Execute "saphostctrl" binary'); + ok((grep /-function ListDatabases/, @calls), 'Execute "ListDatabases" fucntion'); + ok((grep /\| grep Instance/, @calls), 'Show only "Instances" entries'); +}; + +subtest '[saphostctrl_list_databases] Verify command compilation - executed as root' => sub { + my $saphostctrl_output = 'Instance name: PRD00, Hostname: qesdhdb01l029, Vendor: HDB, Type: hdb, Release: 42'; + my $mock = Test::MockModule->new('sles4sap::sap_host_agent', no_auto => 1); + my @calls; + $mock->redefine(script_output => sub { push @calls, $_[0]; return $saphostctrl_output; }); + $mock->redefine(assert_script_run => sub { return 0; }); + + saphostctrl_list_databases(as_root => 1); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /sudo/, @calls), 'Execute as root'); +}; + +subtest '[saphostctrl_list_databases] Verify output' => sub { + my $saphostctrl_output = 'Instance name: PRD00, Hostname: qesdhdb01l029, Vendor: HDB, Type: hdb, Release: 42'; + + my $mock = Test::MockModule->new('sles4sap::sap_host_agent', no_auto => 1); + $mock->redefine(script_output => sub { return $saphostctrl_output; }); + $mock->redefine(assert_script_run => sub { return 0; }); + + my @output = @{saphostctrl_list_databases()}; + is $output[0]->{instance_name}, 'PRD00', 'Check "instance_name" value'; + is $output[0]->{hostname}, 'qesdhdb01l029', 'Check "hostname" value'; + is $output[0]->{vendor}, 'HDB', 'Check "vendor" value'; + is $output[0]->{type}, 'hdb', 'Check "type" value'; + is $output[0]->{release}, '42', 'Check "release" value'; +}; + +subtest '[parse_instance_name] ' => sub { + my ($sid, $id) = @{parse_instance_name('POO08')}; + is $sid, 'POO', "Return correct SID: $sid"; + is $id, '08', "Return correct ID: $id"; +}; + +subtest '[parse_instance_name] Exceptions' => sub { + dies_ok { parse_instance_name('POO0') } 'Instance name with less than 5 characters'; + dies_ok { parse_instance_name('POO0ASDF') } 'Instance name with more than 5 characters'; + dies_ok { parse_instance_name('POO0 ') } 'Instance name contains spaces'; + dies_ok { parse_instance_name('Poo0a') } 'Instance name contains lowercase characters'; + dies_ok { parse_instance_name('POO0.') } 'Instance name contains any non-word characters'; +}; + +done_testing; diff --git a/t/32_database_hana.pm b/t/32_database_hana.pm new file mode 100644 index 000000000000..7acc2c33ec7e --- /dev/null +++ b/t/32_database_hana.pm @@ -0,0 +1,108 @@ +use strict; +use warnings; +use Test::More; +use Test::Exception; +use Test::Warnings; +use Test::MockModule; +use Test::Mock::Time; +use testapi; +use sles4sap::database_hana; + +subtest '[hdb_stop] HDB command compilation' => sub { + my $db_hana = Test::MockModule->new('sles4sap::database_hana', no_auto => 1); + my @calls; + $db_hana->redefine(assert_script_run => sub { @calls = $_[0]; return 0; }); + $db_hana->redefine(script_output => sub { return 'Dumbledore'; }); + $db_hana->redefine(sapcontrol_process_check => sub { return 0; }); + $db_hana->redefine(record_info => sub { note(join(' ', 'RECORD_INFO -->', @_)); }); + + hdb_stop(instance_id => '00', switch_user => 'Albus'); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /HDB/, @calls), 'Execute HDB command'); + ok((grep /stop/, @calls), 'Use "stop" function'); + ok((grep /sudo su \- Albus/, @calls), 'Run as another user'); + + hdb_stop(instance_id => '00', switch_user => 'Albus', command => 'kill'); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /kill \-x/, @calls), 'Use "kill" function'); +}; + +subtest '[hdb_stop] Sapcontrol arguments' => sub { + my $db_hana = Test::MockModule->new('sles4sap::database_hana', no_auto => 1); + my @sapcontrol_args; + $db_hana->redefine(assert_script_run => sub { return 0; }); + $db_hana->redefine(script_output => sub { return 'Dumbledore'; }); + $db_hana->redefine(sapcontrol_process_check => sub { @sapcontrol_args = @_; return 0; }); + $db_hana->redefine(record_info => sub { note(join(' ', 'RECORD_INFO -->', @_)); }); + + hdb_stop(instance_id => 'Albus'); + note("\n --> " . join("\n --> ", @sapcontrol_args)); + ok((grep /instance_id/, @sapcontrol_args), 'Madatory arg "instance_id"'); + ok((grep /expected_state/, @sapcontrol_args), 'Define expected state'); + ok((grep /wait_for_state/, @sapcontrol_args), 'Wait until processes are in correct state'); +}; + +subtest '[register_replica] Command compilation' => sub { + my $db_hana = Test::MockModule->new('sles4sap::database_hana', no_auto => 1); + my $topology = { + Hogwarts => { + srmode => 'FunnyGuy', + site => 'Dumbledore', + op_mode => 'VeryOP' + }, + Durmstrang => { + srmode => 'DeathEater', + site => 'Karkaroff', + op_mode => 'SeemsWeak' + } + }; + my @calls; + $db_hana->redefine(assert_script_run => sub { @calls = @_; return 0; }); + $db_hana->redefine(script_output => sub { return 'Revelio'; }); + $db_hana->redefine(calculate_hana_topology => sub { return $topology; }); + $db_hana->redefine(get_primary_node => sub { return 'Durmstrang'; }); + $db_hana->redefine(record_info => sub { note(join(' ', 'RECORD_INFO -->', @_)); }); + + register_replica(instance_id => '00', target_hostname => 'Hogwarts'); + note("\n --> " . join("\n --> ", @calls)); + ok((grep /hdbnsutil/, @calls), 'Main "hdbnsutil" command'); + ok((grep /-sr_register/, @calls), '"-sr_rergister" option'); + ok((grep /--remoteHost=Durmstrang/, @calls), 'Define "--remoteHost"'); + ok((grep /--remoteInstance=00/, @calls), 'Define "--remoteInstance"'); + ok((grep /--operationMode=VeryOP/, @calls), 'Define "--operationMode"'); + ok((grep /--name=/, @calls), 'Define "--name"'); + +}; + +subtest '[get_node_roles] ' => sub { + my $db_hana = Test::MockModule->new('sles4sap::database_hana', no_auto => 1); + $db_hana->redefine(script_output => sub { return 'Revelio'; }); + $db_hana->redefine(calculate_hana_topology => sub { return 'Aparecium'; }); + $db_hana->redefine(get_primary_node => sub { return 'AccioPrimary'; }); + $db_hana->redefine(get_failover_node => sub { return 'AccioFailover'; }); + + my %result = %{get_node_roles()}; + is $result{primary_node}, 'AccioPrimary', 'Return correct primary node in hash'; + is $result{failover_node}, 'AccioFailover', 'Return correct failover node in hash'; +}; + +subtest '[find_hana_resource_name]' => sub { + my $db_hana = Test::MockModule->new('sles4sap::database_hana', no_auto => 1); + my $mock_output = ' +primitive rsc_SAPHanaTopology_HDB_HDB00 ocf:suse:SAPHanaTopology \ +not relevant line \ +primitive rsc_SAPHana_HDB_HDB00 ocf:suse:SAPHana \ +another not relevant line \ +'; + + my @calls; + $db_hana->redefine(script_output => sub { @calls = @_; return $mock_output; }); + $db_hana->redefine(assert_script_run => sub { return 0; }); + + my $returned_value = find_hana_resource_name(); + note("\n --> " . join("\n --> ", @calls)); + is $returned_value, 'rsc_SAPHana_HDB_HDB00', 'Check for correct value returned'; + +}; + +done_testing; diff --git a/tests/sles4sap/redirection_tests/hana_sr_primary_failover.pm b/tests/sles4sap/redirection_tests/hana_sr_primary_failover.pm new file mode 100644 index 000000000000..b376f2beec63 --- /dev/null +++ b/tests/sles4sap/redirection_tests/hana_sr_primary_failover.pm @@ -0,0 +1,108 @@ +# SUSE's openQA tests +# +# Copyright SUSE LLC +# SPDX-License-Identifier: FSFAP +# Maintainer: QE-SAP +# Summary: Test module performs variants of HANA primary site takeover scenario + +use parent 'sles4sap::sap_deployment_automation_framework::basetest'; + +use warnings; +use strict; +use testapi; +use serial_terminal qw(select_serial_terminal); +use sles4sap::console_redirection; +use hacluster qw(wait_for_idle_cluster wait_until_resources_started show_cluster_parameter); +use sles4sap::sap_host_agent qw(saphostctrl_list_databases parse_instance_name); +use sles4sap::database_hana; +use sles4sap::sapcontrol qw(sapcontrol_process_check sap_show_status_info); +use Carp qw(croak); +use Data::Dumper; + +=head1 SYNOPSIS + +Module executes variants of 'SAP HANA Primary site takeover' scenario on Performance-optimized setup. +Variants of the tests are described here: https://documentation.suse.com/sbp/sap-15/html/SLES4SAP-hana-sr-guide-PerfOpt-15/index.html#cha.s4s.test-cluster +Currently supported variants include Stopping database using 'HDB stop' command and killing DB processes with 'HDB kill -x'. +It is not possible to use this module as standalone but rather scheduling it via 'loadtest' API + +=cut + +sub run { + my ($self, $run_args) = @_; + my @supported_scenarios = ('stop', 'kill'); + my %scenario = %{$run_args->{scenarios}{$self->{name}}}; + my $expected_primary_db = $scenario{primary_db}; + my $expected_failover_db = $scenario{failover_db}; + my $failover_method = $scenario{failover_method}; + + croak "Failover type $failover_method not supported" unless grep /$failover_method/, @supported_scenarios; + record_info('Test INFO', "Performing primary DB failover scenario: $failover_method"); + + # Connect to one of the DB nodes and collect topology data + my %databases = %{$run_args->{redirection_data}{db_hana}}; + for ($expected_primary_db, $expected_failover_db) { + croak("Console redirection: Missing SSH connection data for database $_\nGot:\n" . Dumper(\%databases)) + unless $databases{$_}; + } + + my %target_node_data = %{$databases{$expected_primary_db}}; + connect_target_to_serial( + destination_ip => $target_node_data{ip_address}, ssh_user => $target_node_data{ssh_user}, switch_root => 1); + my $node_roles = get_node_roles(); + + record_info('Roles', Dumper($node_roles)); + # Check if cluster node state is correct + die "Incorrect cluster state\nExpected primary: '$expected_primary_db'\nCurrent primary: '$node_roles->{primary_node}'" if + $expected_primary_db ne $node_roles->{primary_node}; + die "Incorrect cluster state\nExpected failover: '$expected_failover_db'\nCurrent failover: '$node_roles->{failover_node}'" if + $expected_failover_db ne $node_roles->{failover_node}; + + # Retrieve database information: DB SID and instance ID + my @db_data = @{(saphostctrl_list_databases())}; + record_info('DB data', Dumper(@db_data)); + die('Multiple databases on one host not supported') if @db_data > 1; + my ($db_sid, $db_id) = @{parse_instance_name($db_data[0]->{instance_name})}; + + # Perform failover on primary + sap_show_status_info(cluster => 1, netweaver => 1, instance_id => $db_id); + wait_until_resources_started(); + wait_for_idle_cluster(); + hdb_stop(instance_id => $db_id, switch_user => lc($db_sid) . 'adm', command => $failover_method); + + # Wait for takeover + record_info('Takeover', "Waiting for node '$node_roles->{failover_node}' to become primary"); + wait_for_failed_resources(); + wait_for_takeover(target_node => $node_roles->{failover_node}); + + # Register and start replication + my $automatic_register = show_cluster_parameter(resource => find_hana_resource_name(), parameter => 'AUTOMATED_REGISTER'); + if ($automatic_register eq 'true') { + record_info('REG: Auto', "Parameter: AUTOMATED_REGISTER=true\nNo action to be done"); + } + else { + record_info('REG: Manual', "Parameter: AUTOMATED_REGISTER=false\nRegistration will be done manually"); + # Failed Primary node will be registered for replication after takeover + register_replica(target_hostname => $node_roles->{primary_node}, instance_id => $db_id, switch_user => lc($db_sid) . 'adm'); + } + + # cleanup resources + assert_script_run('crm resource cleanup'); + + # Wait for database processes to start + record_info('DB wait', "Waiting for database node '$node_roles->{primary_node}' to start"); + sapcontrol_process_check( + instance_id => $db_id, expected_state => 'started', wait_for_state => 'yes', timeout => 600); + record_info('DB started', "All database node '$node_roles->{primary_node}' processes are 'GREEN'"); + + # Wait for cluster co come up + record_info('Cluster wait', 'Waiting for cluster to come up'); + wait_until_resources_started(); + wait_for_idle_cluster(); + record_info('Cluster OK', 'Cluster resources up and running'); + sap_show_status_info(cluster => 1, netweaver => 1, instance_id => $db_id); + + disconnect_target_from_serial(); +} + +1; diff --git a/tests/sles4sap/redirection_tests/hanasr_schedule_tests.pm b/tests/sles4sap/redirection_tests/hanasr_schedule_tests.pm new file mode 100644 index 000000000000..2eebd6a9c008 --- /dev/null +++ b/tests/sles4sap/redirection_tests/hanasr_schedule_tests.pm @@ -0,0 +1,100 @@ +# SUSE's openQA tests +# +# Copyright SUSE LLC +# SPDX-License-Identifier: FSFAP +# Maintainer: QE-SAP +# Summary: Test module is used for scheduling multiple variants of HanaSR failover scenario on primary database. + +use parent 'sles4sap::sap_deployment_automation_framework::basetest'; + +use strict; +use warnings FATAL => 'all'; +use testapi; +use main_common 'loadtest'; +use sles4sap::console_redirection; +use sles4sap::database_hana qw(find_hana_resource_name); +use saputils qw(calculate_hana_topology get_primary_node get_failover_node); +use hacluster qw(set_cluster_parameter); +use Data::Dumper; + +=head1 SYNOPSIS + +Test module is used for scheduling multiple variants of HanaSR failover scenario on primary database. +At the moment, the code supports only SDAF based deployment, but the dependencies can be removed completely in the future. + +B + +=over + +=item B : Switch Cluster parameter 'AUTOMATED_REGISTER' value. Default: false + + + +=back + +=cut + +sub test_flags { + return {fatal => 1, publiccloud_multi_module => 1}; +} + +sub run { + my ($self, $run_args) = @_; + my %databases = %{$run_args->{redirection_data}{db_hana}}; + # Connect to any database cluster node to get topology data + my $target_node = (keys %databases)[0]; + my %target_node_data = %{$databases{$target_node}}; + connect_target_to_serial( + destination_ip => $target_node_data{ip_address}, ssh_user => $target_node_data{ssh_user}, switch_root => '1'); + + my $topology = calculate_hana_topology(input => script_output('SAPHanaSR-showAttr --format=script')); + + # Set AUTOMATED_REGISTER value according to parameter HANA_AUTOMATED_REGISTER with 'false' being the default value + my $automated_register = get_var('HANA_AUTOMATED_REGISTER') ? 'true' : 'false'; + record_info('AUTOMATED_REGISTER', "Cluster parameter 'AUTOMATED_REGISTER' set to $automated_register"); + set_cluster_parameter( + resource => find_hana_resource_name(), parameter => 'AUTOMATED_REGISTER', value => $automated_register); + + # No need for open SSH session anymore + disconnect_target_from_serial(); + + my $primary_db = get_primary_node(topology_data => $topology); + my $primary_site = $topology->{$primary_db}{site}; + my $failover_db = get_failover_node(topology_data => $topology); + my $failover_site = $topology->{$failover_db}{site}; + my %scenarios; + my @failover_actions = split(",", get_var("HANASR_FAILOVER_SCENARIOS", 'stop,kill')); + for my $method (@failover_actions) { + my $test_name = ucfirst($method) . "_DB-$primary_site-$primary_db"; + $scenarios{$test_name} = { + primary_db => $primary_db, + failover_db => $failover_db, + failover_method => $method + }; + loadtest('sles4sap/redirection_tests/hana_sr_primary_failover', name => $test_name, run_args => $run_args, @_); + record_info('Load test', "Scheduling Primary DB failover using '$method' method.\n + Test name: $test_name\n + Primary site $primary_db will be stopped.\n + Failover site $failover_db will take over."); + + # Reverse roles and put cluster into original state using same failover method + $test_name = ucfirst($method) . "_DB-$failover_site-$failover_db"; + $scenarios{$test_name} = { + primary_db => $failover_db, + failover_db => $primary_db, + failover_method => $method + }; + loadtest('sles4sap/redirection_tests/hana_sr_primary_failover', name => $test_name, run_args => $run_args, @_); + record_info('Load test', "Scheduling Primary DB failover using '$method' method.\n + Test name: $test_name\n + Primary site $failover_db will be stopped.\n + Failover site $primary_db will take over."); + } + + loadtest('sles4sap/sap_deployment_automation_framework/cleanup', name => "SDAF cleanup", run_args => $run_args, @_); + record_info('Load test', 'Scheduling SDAF cleanup'); + + $run_args->{scenarios} = \%scenarios; +} + +1; diff --git a/tests/sles4sap/redirection_tests/redirection_check.pm b/tests/sles4sap/redirection_tests/redirection_check.pm index a56a026066cc..b76baa644193 100644 --- a/tests/sles4sap/redirection_tests/redirection_check.pm +++ b/tests/sles4sap/redirection_tests/redirection_check.pm @@ -8,7 +8,6 @@ # For more information read 'README.md' use parent 'sles4sap::sap_deployment_automation_framework::basetest'; - use warnings; use strict; use testapi; @@ -29,8 +28,8 @@ sub run { # Check if hostnames matches with what is expected # Check API calls: script_output, assert_script_run - my $hostname_real = script_output('hostname'); - assert_script_run("echo \$(hostname) > /tmp/hostname_$hostname_real"); + my $hostname_real = script_output('hostname', quiet => 1); + assert_script_run("echo \$(hostname) > /tmp/hostname_$hostname_real", quiet => 1); die "Expected hostname '$hostname' does not match hostname returned '$hostname_real'" unless $hostname_real eq $hostname; record_info('API check', "script_output: PASS\nassert_script_run: PASS\nhostname match: PASS"); @@ -39,7 +38,7 @@ sub run { # Check API calls: save_tmp_file, upload_logs upload_logs("/tmp/hostname_$hostname_real"); save_tmp_file('hostname.txt', $hostname); - assert_script_run('curl -s ' . autoinst_url . "/files/hostname.txt| grep $hostname"); + assert_script_run('curl -s ' . autoinst_url . "/files/hostname.txt| grep $hostname", quiet => 1); record_info('API check', "upload_logs: PASS\nsave_tmp_file: PASS\nOpenQA connection: PASS"); disconnect_target_from_serial();