/* -*- Mode: c -*- $Id: Intercomm_merge_c,v 1.1 1999/09/01 20:51:55 wgeorge Exp $ This test is the same as "Intercomm merge b" except that the constructed communicators are freed at the end. Note that the construction of the groups, which uses the mod operator (%) on the rank, and the subsequent merging of these groups can lead to unique groups of processes, very much mixed up compared to MPI_COMM_WORLD with the ordering of ranks changed and varying number of processes included in each new communicator (Comm A and Comm B) from each of the IMPI clients and hosts. This may uncover subtle bugs not seen in previous tests and unrelated to the operation of MPI_Intercomm_merge(). For example, collective operations on these new communicators may show bugs in the determination of "master" processes that might not otherwise show up. These problems could show up in the call to MPI_Comm_free() which internally calls the collective MPI_Barrier(). The only collective operation called in this test is MPI_Comm_free(). Subsequent Intercomm merge tests exercise these new communicators with other collectives. */ { int i; int total_errors; int num_errors; int nprocs; int my_rank; int my_rank_A; int my_rank_B; int tag; int msg; int expected; MPI_Comm myComm; MPI_Comm myFirstComm; MPI_Comm mySecondComm; int membershipKey; MPI_Comm comm_A; MPI_Comm comm_B; int size_A; int size_B; MPI_Group oldGroup; int status[3]; int inmsg; int outmsg; int in; int expected; int tag; MPI_Comm_size(MPI_COMM_WORLD, nprocs); MPI_Comm_rank(MPI_COMM_WORLD, my_rank); total_errors = 0; if (nprocs < 3) { report_info("Intercomm merge c: There must be more than 2 processes to run the Intercomm create test."); report_indeterminate ("Intercomm merge c: Unable to determine test success or failure."); total_errors = 1; } else { membershipKey = my_rank % 3; MPI_Comm_split(MPI_COMM_WORLD, membershipKey, my_rank, myComm); MPI_Comm_size(myComm, i); report_info("Intercomm merge c: myComm size: %.0f", i); MPI_Comm_rank(myComm, i); report_info("Intercomm merge c: my rank in myComm: %.0f", i); if (membershipKey == 0) { /* Group 0 communicates with Group 1 */ MPI_Intercomm_create(myComm, 0, MPI_COMM_WORLD, 1, 1, myFirstComm); } else if (membershipKey == 1) { /* Group 1 communicates with Groups 0 and 2 */ MPI_Intercomm_create(myComm, 0, MPI_COMM_WORLD, 0, 1, myFirstComm); MPI_Intercomm_create(myComm, 0, MPI_COMM_WORLD, 2, 12, mySecondComm); } else if (membershipKey == 2) { /* Group 2 communicates with Group 1 */ MPI_Intercomm_create(myComm, 0, MPI_COMM_WORLD, 1, 12, myFirstComm); } total_errors = total_errors + num_errors; num_errors = 0; if (total_errors > 0) { report_fail("Intercomm merge c: Failed in rank %.0f, %.0f errors.", my_rank, total_errors); } else { /* =============================== */ /* Now test MPI_Intercomm_merge() */ /* =============================== */ /* Merge intercommunicators */ if (membershipKey == 0) { MPI_Intercomm_merge(myFirstComm, 0, comm_A); MPI_Comm_size(comm_A, size_A); } else if (membershipKey == 1) { MPI_Intercomm_merge(myFirstComm, 1, comm_A); MPI_Comm_size(comm_A, size_A); MPI_Intercomm_merge(mySecondComm, 0, comm_B); MPI_Comm_size(comm_B, size_B); } else if (membershipKey == 2) { MPI_Intercomm_merge(myFirstComm, 1, comm_B); MPI_Comm_size(comm_B, size_B); } my_rank_A = -1; my_rank_B = -1; /* Test new intracommunicators */ if (membershipKey == 0) { MPI_Comm_rank(comm_A, my_rank_A); } else if (membershipKey == 1) { MPI_Comm_rank(comm_A, my_rank_A); MPI_Comm_rank(comm_B, my_rank_B); } else if (membershipKey == 2) { MPI_Comm_rank(comm_B, my_rank_B); } if (my_rank_A != -1) { report_info("Intercomm merge c: Rank %.0f of %.0f in comm A", my_rank_A, size_A); } if (my_rank_B != -1) { report_info("Intercomm merge c: Rank %.0f of %.0f in comm B", my_rank_B, size_B); } tag = 1000; /* Exercise comm_A with some simple point-to-point calls. First, Rank 0 in comm A sends an int to all the other ranks in comm A. */ if (my_rank_A == 0) { for (i=1; i 0) { MPI_Recv(inmsg, 1, MPI_INT, 0, tag, comm_A, status); expected = 123 + my_rank_A; if (inmsg != expected) { report_info("Intercomm_merge c: Error: Rank %.0f in comm A expected %.0f but received %.0f", my_rank_A, expected, inmsg); num_errors = num_errors + 1; } } total_errors = total_errors + num_errors; num_errors = 0; /* Rank 0 in comm B sends an int to all the other ranks in comm B. */ if (my_rank_B == 0) { for (i=1; i 0) { MPI_Recv(inmsg, 1, MPI_INT, 0, tag, comm_B, status); expected = 246 + my_rank_B; if (inmsg != expected) { report_info("Intercomm_merge c: Error: Rank %.0f in comm B expected %.0f but received %.0f", my_rank_B, expected, inmsg); num_errors = num_errors + 1; } } total_errors = total_errors + num_errors; num_errors = 0; if (total_errors == 0) { /* Free the communicators. */ MPI_Comm_free(myFirstComm); if (membershipKey == 1) { MPI_Comm_free(mySecondComm); } MPI_Comm_free(myComm); if (membershipKey == 0) { MPI_Comm_free(comm_A); } else if (membershipKey == 1) { MPI_Comm_free(comm_A); MPI_Comm_free(comm_B); } else if (membershipKey == 2) { MPI_Comm_free(comm_B); } } } } if (total_errors == 0) { report_pass("Intercomm merge c: Passed in rank %.0f", my_rank); } else { report_fail("Intercomm merge c: Failed in rank %.0f, %.0f errors.", my_rank, total_errors); } return; }