public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] repo/gentoo:master commit in: sci-libs/pastix/, sci-libs/pastix/files/
@ 2022-01-29  0:00 David Seifert
  0 siblings, 0 replies; only message in thread
From: David Seifert @ 2022-01-29  0:00 UTC (permalink / raw
  To: gentoo-commits

commit:     017a3b4500248d207227590d7728f66f4195db9a
Author:     David Seifert <soap <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 29 00:00:15 2022 +0000
Commit:     David Seifert <soap <AT> gentoo <DOT> org>
CommitDate: Sat Jan 29 00:00:15 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=017a3b45

sci-libs/pastix: fix building against OpenMPI 4.1+

Closes: https://bugs.gentoo.org/692742
Signed-off-by: David Seifert <soap <AT> gentoo.org>

 sci-libs/pastix/files/pastix-5.2.3-MPI-3.0.patch | 139 +++++++++++++++++++++++
 sci-libs/pastix/pastix-5.2.3.ebuild              |   5 +-
 2 files changed, 142 insertions(+), 2 deletions(-)

diff --git a/sci-libs/pastix/files/pastix-5.2.3-MPI-3.0.patch b/sci-libs/pastix/files/pastix-5.2.3-MPI-3.0.patch
new file mode 100644
index 000000000000..d86ec5b78416
--- /dev/null
+++ b/sci-libs/pastix/files/pastix-5.2.3-MPI-3.0.patch
@@ -0,0 +1,139 @@
+https://bugs.gentoo.org/692742
+rename MPI 1.0 to 3.0+ functions:
+- MPI_Address -> MPI_Get_address
+- MPI_Type_struct -> MPI_Type_create_struct
+
+--- a/common/src/nompi.h
++++ b/common/src/nompi.h
+@@ -127,9 +127,9 @@
+ #define MPI_Start(request)
+ #define MPI_Startall(count, array_of_requests)
+ #define MPI_Type_contiguous(count, oldtype, newtype)
+-#define MPI_Type_struct(count, array_of_blocklengths, array_of_displacement, \
++#define MPI_Type_create_struct(count, array_of_blocklengths, array_of_displacement, \
+                         oldtype, newtype)
+-#define MPI_Address(location, newtype)
++#define MPI_Get_address(location, newtype)
+ #define MPI_Type_commit(datatype)
+ #define MPI_Type_free(datatype)
+ #define MPI_Request_free(request)
+--- a/sopalin/src/sopalin_sendrecv.c
++++ b/sopalin/src/sopalin_sendrecv.c
+@@ -1300,10 +1300,10 @@
+   thread_data->gtaboffs[packnbr]   = FANIN_INFOTAB(t);
+   thread_data->gtaboffs[packnbr+1] = FANIN_COEFTAB(t);
+ #else /* NO_MPI_TYPE */
+-  CALL_MPI MPI_Address(FANIN_INFOTAB(t),&(thread_data->gtaboffs[packnbr]));
+-  TEST_MPI("MPI_Address");
+-  CALL_MPI MPI_Address(FANIN_COEFTAB(t),&(thread_data->gtaboffs[packnbr+1]));
+-  TEST_MPI("MPI_Address");
++  CALL_MPI MPI_Get_address(FANIN_INFOTAB(t),&(thread_data->gtaboffs[packnbr]));
++  TEST_MPI("MPI_Get_address");
++  CALL_MPI MPI_Get_address(FANIN_COEFTAB(t),&(thread_data->gtaboffs[packnbr+1]));
++  TEST_MPI("MPI_Get_address");
+ #endif /* NO_MPI_TYPE */
+ 
+   /* Add other contribution for the same task */
+@@ -1410,12 +1410,12 @@
+           thread_data->gtaboffs[packnbr]   = FANIN_INFOTAB(t);
+           thread_data->gtaboffs[packnbr+1] = FANIN_COEFTAB(t);
+ #else /* NO_MPI_TYPE */
+-          CALL_MPI MPI_Address(FANIN_INFOTAB(t),
++          CALL_MPI MPI_Get_address(FANIN_INFOTAB(t),
+                                &(thread_data->gtaboffs[packnbr]));
+-          TEST_MPI("MPI_Address");
+-          CALL_MPI MPI_Address(FANIN_COEFTAB(t),
++          TEST_MPI("MPI_Get_address");
++          CALL_MPI MPI_Get_address(FANIN_COEFTAB(t),
+                                &(thread_data->gtaboffs[packnbr+1]));
+-          TEST_MPI("MPI_Address");
++          TEST_MPI("MPI_Get_address");
+ #endif /* NO_MPI_TYPE */
+ 
+           if (queueSize(sendqueue))
+@@ -1508,10 +1508,10 @@
+   TEST_MPI("MPI_Rsend");
+ #  endif
+ #else /* NO_MPI_TYPE */
+-  CALL_MPI MPI_Type_struct(2*(packnbr/2+1), thread_data->gtabsize,
++  CALL_MPI MPI_Type_create_struct(2*(packnbr/2+1), thread_data->gtabsize,
+                            thread_data->gtaboffs,
+                            thread_data->gtabtype, &newtype);
+-  TEST_MPI("MPI_Type_struct");
++  TEST_MPI("MPI_Type_create_struct");
+   CALL_MPI MPI_Type_commit(&newtype);
+   TEST_MPI("MPI_Type_commit");
+ #  ifdef TEST_ISEND
+@@ -1602,15 +1602,15 @@
+               (long)me, (unsigned int)(intptr_t)BTAG_COEFTAB(t));
+ 
+ #ifndef NO_MPI_TYPE
+-  CALL_MPI MPI_Address(BTAG_BTAGTAB(t),&(taboffs[0]));
+-  TEST_MPI("MPI_Address");
+-  CALL_MPI MPI_Address(BTAG_BCOFTAB(t),&(taboffs[1]));
+-  TEST_MPI("MPI_Address");
+-  CALL_MPI MPI_Address((void *)BTAG_COEFTAB(t),&(taboffs[2]));
+-  TEST_MPI("MPI_Address");
++  CALL_MPI MPI_Get_address(BTAG_BTAGTAB(t),&(taboffs[0]));
++  TEST_MPI("MPI_Get_address");
++  CALL_MPI MPI_Get_address(BTAG_BCOFTAB(t),&(taboffs[1]));
++  TEST_MPI("MPI_Get_address");
++  CALL_MPI MPI_Get_address((void *)BTAG_COEFTAB(t),&(taboffs[2]));
++  TEST_MPI("MPI_Get_address");
+ 
+-  CALL_MPI MPI_Type_struct(3,tabsize,taboffs,tabtype,&newtype);
+-  TEST_MPI("MPI_Type_struct");
++  CALL_MPI MPI_Type_create_struct(3,tabsize,taboffs,tabtype,&newtype);
++  TEST_MPI("MPI_Type_create_struct");
+ 
+   CALL_MPI MPI_Type_commit(&newtype);
+   TEST_MPI("MPI_Type_commit");
+--- a/sopalin/src/updo_sendrecv.c
++++ b/sopalin/src/updo_sendrecv.c
+@@ -347,12 +347,12 @@
+   tabtype[0] = COMM_INT;
+   tabtype[1] = COMM_FLOAT;
+ 
+-  CALL_MPI MPI_Address(infotab,&(taboffs[0]));
+-  TEST_MPI("MPI_Address");
+-  CALL_MPI MPI_Address(FANIN_COEFTAB(SOLV_FTGTIND(j)),&(taboffs[1]));
+-  TEST_MPI("MPI_Address");
+-  CALL_MPI MPI_Type_struct(2,tabsize,taboffs,tabtype,&newtype);
+-  TEST_MPI("MPI_Type_struct");
++  CALL_MPI MPI_Get_address(infotab,&(taboffs[0]));
++  TEST_MPI("MPI_Get_address");
++  CALL_MPI MPI_Get_address(FANIN_COEFTAB(SOLV_FTGTIND(j)),&(taboffs[1]));
++  TEST_MPI("MPI_Get_address");
++  CALL_MPI MPI_Type_create_struct(2,tabsize,taboffs,tabtype,&newtype);
++  TEST_MPI("MPI_Type_create_struct");
+   CALL_MPI MPI_Type_commit(&newtype);
+   TEST_MPI("MPI_Type_commit");
+ #  endif /* NO_MPI_TYPE */
+@@ -974,8 +974,8 @@
+ 
+   tabtype[0] = COMM_INT;
+ 
+-  CALL_MPI MPI_Address(infotab,&(taboffs[0]));
+-  TEST_MPI("MPI_Address");
++  CALL_MPI MPI_Get_address(infotab,&(taboffs[0]));
++  TEST_MPI("MPI_Get_address");
+ 
+   /* If schur, send empty data */
+   if ((sopalin_data->sopar->iparm[IPARM_SCHUR] == API_YES &&
+@@ -988,12 +988,12 @@
+     for (iter=1; iter<UPDOWN_SM2XNBR+1; iter++) {
+       tabsize[iter] = size;
+       tabtype[iter] = COMM_FLOAT;
+-      CALL_MPI MPI_Address(gb+((iter-1)*UPDOWN_SM2XSZE),&(taboffs[iter]));
+-      TEST_MPI("MPI_Address");
++      CALL_MPI MPI_Get_address(gb+((iter-1)*UPDOWN_SM2XSZE),&(taboffs[iter]));
++      TEST_MPI("MPI_Get_address");
+     }
+   }
+-  CALL_MPI MPI_Type_struct(UPDOWN_SM2XNBR+1,tabsize,taboffs,tabtype,&newtype);
+-  TEST_MPI("MPI_Type_struct");
++  CALL_MPI MPI_Type_create_struct(UPDOWN_SM2XNBR+1,tabsize,taboffs,tabtype,&newtype);
++  TEST_MPI("MPI_Type_create_struct");
+   CALL_MPI MPI_Type_commit(&newtype);
+   TEST_MPI("MPI_Type_commit");
+ 

diff --git a/sci-libs/pastix/pastix-5.2.3.ebuild b/sci-libs/pastix/pastix-5.2.3.ebuild
index 2b9322818a96..3d6c4757eed5 100644
--- a/sci-libs/pastix/pastix-5.2.3.ebuild
+++ b/sci-libs/pastix/pastix-5.2.3.ebuild
@@ -1,4 +1,4 @@
-# Copyright 1999-2021 Gentoo Authors
+# Copyright 1999-2022 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 EAPI=7
@@ -18,6 +18,7 @@ PID=218
 DESCRIPTION="Parallel solver for very large sparse linear systems"
 HOMEPAGE="https://pastix.gforge.inria.fr"
 SRC_URI="https://gforge.inria.fr/frs/download.php/latestfile/${PID}/${PN}_${PV}.tar.bz2"
+S="${WORKDIR}/${PN}_${PV}/src"
 
 LICENSE="CeCILL-C"
 SLOT="0"
@@ -33,7 +34,7 @@ RDEPEND="
 DEPEND="${RDEPEND}"
 BDEPEND="virtual/pkgconfig"
 
-S="${WORKDIR}/${PN}_${PV}/src"
+PATCHES=( "${FILESDIR}"/${P}-MPI-3.0.patch )
 
 src_prepare() {
 	default


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2022-01-29  0:00 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-01-29  0:00 [gentoo-commits] repo/gentoo:master commit in: sci-libs/pastix/, sci-libs/pastix/files/ David Seifert

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox