* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2020-05-11 23:04 Sergey Torokhov
0 siblings, 0 replies; 6+ messages in thread
From: Sergey Torokhov @ 2020-05-11 23:04 UTC (permalink / raw
To: gentoo-commits
commit: 021c7068ab1f61ef171029cbb243d13f1b33237e
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
AuthorDate: Mon May 11 22:56:45 2020 +0000
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
CommitDate: Mon May 11 22:56:45 2020 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=021c7068
sci-physics/SU2: new package
The SU2 package contains several bunbled libraries that currently
aren't unbundled in ebuild.
CGNS:
Tried to unbundled CNGS but it failed to compiled against system
gcnslib-3.3.0; successfully compiled against cgnslib-3.4.0
but related tests are failed.
Metis, Parmetis:
This packages couldn't be installed simultaneously in Gentoo
and required by build system if compiled via meson build system
with mpi option being enabled. They could be optionaly chosen
if autotools build system is used (ebuild uses meson).
Some addidional features disabled due to their experimantal status
or due to requirement to download additional third-party libraries.
At this moment ebuild doesn't provide such features to be built.
They are also will be bundled if implemented and compiled statically.
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru>
sci-physics/SU2/Manifest | 3 +
sci-physics/SU2/SU2-7.0.4.ebuild | 112 +
sci-physics/SU2/files/SU2-7.0.4-fix-env.patch | 24 +
.../SU2/files/SU2-7.0.4-fix-python-optimize.patch | 2302 ++++++++++++++++++++
.../SU2/files/SU2-7.0.4-unbundle_boost.patch | 31 +
sci-physics/SU2/metadata.xml | 28 +
6 files changed, 2500 insertions(+)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
new file mode 100644
index 0000000..6ebe00f
--- /dev/null
+++ b/sci-physics/SU2/Manifest
@@ -0,0 +1,3 @@
+DIST SU2-7.0.4-TestCases.tar.gz 437960103 BLAKE2B 2469edc23f62589fa18be5fff6e036965f6b5f6e2be207642d318aac4d2044c07f0891568f86c1a3ab065e79afce50cc73ad0857b82093d79ac28a4d0451a4ad SHA512 f21d963815e024582e99647a21ebae0b17fc69f75bc34bb72cc3a86cc9ff8502342b31755b5da73e7088b4d0ce430bdd6b4efefc03583cbfcf5156c1849328e1
+DIST SU2-7.0.4-Tutorials.tar.gz 64282233 BLAKE2B b0d13a0988d5617868fad6098fe8110e3600415f05784ff04416cb23162fadc8c1d06d50c5200b14f65afb3e97ee766b21dfdcd4ec8ded9026baf510ca829e48 SHA512 604a05e15a8eae1c7255016261a6576a97fc364f66004ecaccaae932e3a97624c2599d354dd874562824caa8f8ea3dac2f03e0105b1c27d66ec0bf59e3a27105
+DIST SU2-7.0.4.tar.gz 20516147 BLAKE2B 21f45e4918bbc6a72bf47ad61d3301abed50a7cf569e9e8d4040201ff653e583d50a547853365302671922f023d0cc6f3735c1afcd0f3b6bf3c3fc92dc807787 SHA512 8e69f0e1d335adef0bd98666c98e29bc15ee0d7a0fcbbbc91a1ba02275ca52fda7f8f47434547f7982ce0e73a6ff78bd2ed57ca328d1e87b8afdd3b0a698d262
diff --git a/sci-physics/SU2/SU2-7.0.4.ebuild b/sci-physics/SU2/SU2-7.0.4.ebuild
new file mode 100644
index 0000000..1884b11
--- /dev/null
+++ b/sci-physics/SU2/SU2-7.0.4.ebuild
@@ -0,0 +1,112 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=7
+
+PYTHON_COMPAT=( python3_{6,7,8} )
+
+inherit meson python-single-r1
+
+DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
+HOMEPAGE="https://su2code.github.io/"
+SRC_URI="
+ https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
+ test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz )
+ tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz )
+"
+
+LICENSE="LGPL-2.1"
+SLOT="0"
+KEYWORDS="~amd64"
+
+IUSE="cgns -mkl +mpi openblas tecio test tutorials"
+RESTRICT="!test? ( test )"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ mkl? ( !openblas )
+"
+
+RDEPEND="
+ ${PYTHON_DEPS}
+ mpi? ( virtual/mpi[cxx] )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+DEPEND="
+ ${RDEPEND}
+ tecio? ( dev-libs/boost:= )
+"
+BDEPEND="virtual/pkgconfig"
+
+PATCHES=(
+ "${FILESDIR}/${P}-fix-env.patch"
+ "${FILESDIR}/${P}-unbundle_boost.patch"
+ "${FILESDIR}/${P}-fix-python-optimize.patch"
+)
+
+DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
+
+src_unpack() {
+ unpack "${P}.tar.gz"
+ if use test ; then
+ einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
+ tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die
+ fi
+ if use tutorials ; then
+ einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
+ mkdir "${P}"/Tutorials
+ tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die
+ fi
+}
+
+src_configure() {
+ local emesonargs=(
+ -Denable-autodiff=false
+ -Denable-directdiff=false
+ -Denable-pastix=false
+ -Denable-pywrapper=false
+ -Dwith-omp=false
+ $(meson_feature mpi with-mpi)
+ $(meson_use cgns enable-cgns)
+ $(meson_use mkl enable-mkl)
+ $(meson_use openblas enable-openblas)
+ $(meson_use tecio enable-tecio)
+ $(meson_use test enable-tests)
+ )
+ meson_src_configure
+}
+
+src_test() {
+ ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
+ ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
+ ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
+ ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
+ ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH
+ ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
+
+ export SU2_RUN="${S}/SU2_PY"
+ export SU2_HOME="${S}"
+ export PATH=$PATH:$SU2_RUN
+ export PYTHONPATH=$PYTHONPATH:$SU2_RUN
+
+ einfo "Running UnitTests ..."
+ ../${P}-build/UnitTests/test_driver
+
+ pushd TestCases/
+ use mpi && python parallel_regression.py
+ use mpi || python serial_regression.py
+ use tutorials && use mpi && python tutorials.py
+ popd
+}
+
+src_install() {
+ meson_src_install
+ mkdir -p "${ED}$(python_get_sitedir)"
+ mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
+ python_optimize "${D}/$(python_get_sitedir)"
+
+ if use tutorials ; then
+ insinto "/usr/share/${P}"
+ doins -r Tutorials
+ fi
+}
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
new file mode 100644
index 0000000..3f65764
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
@@ -0,0 +1,24 @@
+diff -Naur old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp
+--- old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-03-31 12:26:03.000000000 +0300
++++ new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-05-10 17:04:24.000000000 +0300
+@@ -28,7 +28,7 @@
+ #pragma once
+
+ #include <vector>
+-#include "../../../Common/include/option_structure.hpp"
++#include "../../../../Common/include/option_structure.hpp"
+
+ class CWindowingTools{
+ public:
+diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
+--- old_env/UnitTests/meson.build 2020-05-10 17:03:43.000000000 +0300
++++ new_env/UnitTests/meson.build 2020-05-10 17:04:35.000000000 +0300
+@@ -24,7 +24,7 @@
+ test_driver = executable(
+ 'test_driver',
+ unit_test_files,
+- install : true,
++ install : false,
+ dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
+ cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
+ )
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch
new file mode 100644
index 0000000..6ad7387
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-fix-python-optimize.patch
@@ -0,0 +1,2302 @@
+diff -Naur old/SU2_PY/FSI/FSIInterface.py new/SU2_PY/FSI/FSIInterface.py
+--- old/SU2_PY/FSI/FSIInterface.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/FSIInterface.py 2020-05-10 16:17:07.000000000 +0300
+@@ -6,8 +6,8 @@
+ # \version 7.0.4 "Blackbird"
+ #
+ # SU2 Project Website: https://su2code.github.io
+-#
+-# The SU2 Project is maintained by the SU2 Foundation
++#
++# The SU2 Project is maintained by the SU2 Foundation
+ # (http://su2foundation.org)
+ #
+ # Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
+@@ -16,7 +16,7 @@
+ # modify it under the terms of the GNU Lesser General Public
+ # License as published by the Free Software Foundation; either
+ # version 2.1 of the License, or (at your option) any later version.
+-#
++#
+ # SU2 is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+@@ -42,19 +42,19 @@
+ # ----------------------------------------------------------------------
+
+ class Interface:
+- """
++ """
+ FSI interface class that handles fluid/solid solvers synchronisation and communication
+ """
+-
++
+ def __init__(self, FSI_config, FluidSolver, SolidSolver, have_MPI):
+- """
+- Class constructor. Declare some variables and do some screen outputs.
+- """
+-
++ """
++ Class constructor. Declare some variables and do some screen outputs.
++ """
++
+ if have_MPI == True:
+ from mpi4py import MPI
+ self.MPI = MPI
+- self.comm = MPI.COMM_WORLD #MPI World communicator
++ self.comm = MPI.COMM_WORLD #MPI World communicator
+ self.have_MPI = True
+ myid = self.comm.Get_rank()
+ else:
+@@ -62,42 +62,42 @@
+ self.have_MPI = False
+ myid = 0
+
+- self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
++ self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
+
+- self.nDim = FSI_config['NDIM'] #problem dimension
++ self.nDim = FSI_config['NDIM'] #problem dimension
+
+- self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
+- self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
+- self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
+- self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
++ self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
++ self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
++ self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
++ self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
+
+- self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
+- self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
++ self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
++ self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
+ self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes
+- self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
++ self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
+
+- self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
+- self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
++ self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
++ self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
+
+- self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
+- self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
++ self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
++ self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
+
+- self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
+- self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
+- self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
+- self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
+-
+- self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
+- self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
+- self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
+- self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
+- self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
+-
+- self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
+- self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
+- self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
+- self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
+- self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
++ self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
++ self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
++ self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
++ self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
++
++ self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
++ self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
++ self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
++ self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
++ self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
++
++ self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
++ self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
++ self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
++ self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
++ self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
+
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixA = None
+@@ -106,83 +106,83 @@
+ self.MappingMatrixB_T = None
+ self.d_RBF = self.nDim+1
+ else:
+- self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
+- self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
+ self.d_RBF = 0
+
+- self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
++ self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
+ self.localFluidInterface_array_Y_init = None
+ self.localFluidInterface_array_Z_init = None
+
+- self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
++ self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
+
+- self.solidInterface_array_DispX = None #solid interface displacement
++ self.solidInterface_array_DispX = None #solid interface displacement
+ self.solidInterface_array_DispY = None
+ self.solidInterface_array_DispZ = None
+
+- self.solidInterfaceResidual_array_X = None #solid interface position residual
++ self.solidInterfaceResidual_array_X = None #solid interface position residual
+ self.solidInterfaceResidual_array_Y = None
+ self.solidInterfaceResidual_array_Z = None
+
+- self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
++ self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
+ self.solidInterfaceResidualnM1_array_Y = None
+ self.solidInterfaceResidualnM1_array_Z = None
+-
+- self.fluidInterface_array_DispX = None #fluid interface displacement
++
++ self.fluidInterface_array_DispX = None #fluid interface displacement
+ self.fluidInterface_array_DispY = None
+ self.fluidInterface_array_DispZ = None
+
+- self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
++ self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
+ self.fluidLoads_array_Y = None
+ self.fluidLoads_array_Z = None
+
+- self.solidLoads_array_X = None #loads on the solid side of the f/s interface
++ self.solidLoads_array_X = None #loads on the solid side of the f/s interface
+ self.solidLoads_array_Y = None
+ self.solidLoads_array_Z = None
+
+- self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
+- self.FSIIter = 0 #current FSI iteration
+- self.unsteady = False #flag for steady or unsteady simulation (default is steady)
+-
+- # ---Some screen output ---
+- self.MPIPrint('Fluid solver : SU2_CFD')
+- self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
++ self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
++ self.FSIIter = 0 #current FSI iteration
++ self.unsteady = False #flag for steady or unsteady simulation (default is steady)
++
++ # ---Some screen output ---
++ self.MPIPrint('Fluid solver : SU2_CFD')
++ self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
+
+- if FSI_config['TIME_MARCHING'] == 'YES':
++ if FSI_config['TIME_MARCHING'] == 'YES':
+ self.MPIPrint('Unsteady coupled simulation with physical time step : {} s'.format(FSI_config['UNST_TIMESTEP']))
+ self.unsteady = True
+- else:
+- self.MPIPrint('Steady coupled simulation')
++ else:
++ self.MPIPrint('Steady coupled simulation')
+
+- if FSI_config['MATCHING_MESH'] == 'YES':
+- self.MPIPrint('Matching fluid-solid interface')
+- else:
++ if FSI_config['MATCHING_MESH'] == 'YES':
++ self.MPIPrint('Matching fluid-solid interface')
++ else:
+ if FSI_config['MESH_INTERP_METHOD'] == 'TPS':
+- self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
+ elif FSI_config['MESH_INTERP_METHOD'] == 'RBF':
+ self.MPIPrint('Non matching fluid-solid interface with Radial Basis Function interpolation')
+ self.RBF_rad = FSI_config['RBF_RADIUS']
+- self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
++ self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
+ else:
+- self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
+
+- self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
++ self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
+
+- self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
++ self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
+
+- self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
++ self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
+
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- else:
+- self.MPIPrint('No Aitken under-relaxation')
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ else:
++ self.MPIPrint('No Aitken under-relaxation')
+
+ self.MPIPrint('FSI interface is set')
+
+ def MPIPrint(self, message):
+- """
++ """
+ Print a message on screen only from the master process.
+ """
+
+@@ -198,28 +198,28 @@
+ """
+ Perform a synchronization barrier in case of parallel run with MPI.
+ """
+-
++
+ if self.have_MPI == True:
+ self.comm.barrier()
+
+ def connect(self, FSI_config, FluidSolver, SolidSolver):
+- """
+- Connection between solvers.
+- Creates the communication support between the two solvers.
+- Gets information about f/s interfaces from the two solvers.
+- """
++ """
++ Connection between solvers.
++ Creates the communication support between the two solvers.
++ Gets information about f/s interfaces from the two solvers.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
+- # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
++
++ # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
+ self.fluidInterfaceIdentifier = None
+ self.nLocalFluidInterfaceNodes = 0
+ if FluidSolver != None:
+- print('Fluid solver is initialized on process {}'.format(myid))
++ print('Fluid solver is initialized on process {}'.format(myid))
+ self.haveFluidSolver = True
+ allMovingMarkersTags = FluidSolver.GetAllMovingMarkersTag()
+ allMarkersID = FluidSolver.GetAllBoundaryMarkers()
+@@ -229,23 +229,23 @@
+ if allMovingMarkersTags[0] in allMarkersID.keys():
+ self.fluidInterfaceIdentifier = allMarkersID[allMovingMarkersTags[0]]
+ if self.fluidInterfaceIdentifier != None:
+- self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
+- if self.nLocalFluidInterfaceNodes != 0:
++ self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
++ if self.nLocalFluidInterfaceNodes != 0:
+ self.haveFluidInterface = True
+- print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
+- else:
+- pass
++ print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
++ else:
++ pass
+
+- if SolidSolver != None:
+- print('Solid solver is initialized on process {}'.format(myid))
++ if SolidSolver != None:
++ print('Solid solver is initialized on process {}'.format(myid))
+ self.haveSolidSolver = True
+- self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
+- self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
+- if self.nLocalSolidInterfaceNodes != 0:
++ self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
++ self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
++ if self.nLocalSolidInterfaceNodes != 0:
+ self.haveSolidInterface = True
+ print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes))
+- else:
+- pass
++ else:
++ pass
+
+ # --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying ---
+ if self.have_MPI == True:
+@@ -266,18 +266,18 @@
+ else:
+ sendBufSolidInterface = np.array(int(0))
+ rcvBufFluid = np.zeros(MPIsize, dtype = int)
+- rcvBufSolid = np.zeros(MPIsize, dtype = int)
++ rcvBufSolid = np.zeros(MPIsize, dtype = int)
+ rcvBufFluidInterface = np.zeros(MPIsize, dtype = int)
+- rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
++ rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
+ self.comm.Allgather(sendBufFluid, rcvBufFluid)
+ self.comm.Allgather(sendBufSolid, rcvBufSolid)
+ self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface)
+ self.comm.Allgather(sendBufSolidInterface, rcvBufSolidInterface)
+ for iProc in range(MPIsize):
+- if rcvBufFluid[iProc] == 1:
++ if rcvBufFluid[iProc] == 1:
+ self.fluidSolverProcessors.append(iProc)
+ if rcvBufSolid[iProc] == 1:
+- self.solidSolverProcessors.append(iProc)
++ self.solidSolverProcessors.append(iProc)
+ if rcvBufFluidInterface[iProc] == 1:
+ self.fluidInterfaceProcessors.append(iProc)
+ if rcvBufSolidInterface[iProc] == 1:
+@@ -285,19 +285,19 @@
+ del sendBufFluid, sendBufSolid, rcvBufFluid, rcvBufSolid, sendBufFluidInterface, sendBufSolidInterface, rcvBufFluidInterface, rcvBufSolidInterface
+ else:
+ self.fluidSolverProcessors.append(0)
+- self.solidSolverProcessors.append(0)
++ self.solidSolverProcessors.append(0)
+ self.fluidInterfaceProcessors.append(0)
+ self.solidInterfaceProcessors.append(0)
+
+- self.MPIBarrier()
+-
+- # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
++ self.MPIBarrier()
++
++ # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
+ # Calculate the number of halo nodes on each partition
+ self.nLocalFluidInterfaceHaloNode = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
+ if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- self.FluidHaloNodeList[GlobalIndex] = iVertex
++ self.FluidHaloNodeList[GlobalIndex] = iVertex
+ self.nLocalFluidInterfaceHaloNode += 1
+ # Calculate the number of physical (= not halo) nodes on each partition
+ self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode
+@@ -308,10 +308,10 @@
+
+ # Same thing for the solid part
+ self.nLocalSolidInterfaceHaloNode = 0
+- #for iVertex in range(self.nLocalSolidInterfaceNodes):
++ #for iVertex in range(self.nLocalSolidInterfaceNodes):
+ #if SoliddSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ #GlobalIndex = SolidSolver.GetVertexGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- #self.SolidHaloNodeList[GlobalIndex] = iVertex
++ #self.SolidHaloNodeList[GlobalIndex] = iVertex
+ #self.nLocalSolidInterfaceHaloNode += 1
+ self.nLocalSolidInterfacePhysicalNodes = self.nLocalSolidInterfaceNodes - self.nLocalSolidInterfaceHaloNode
+ if self.have_MPI == True:
+@@ -323,11 +323,11 @@
+ # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors ---
+ sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+- if self.have_MPI == True:
++ if self.have_MPI == True:
+ self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nFluidInterfaceNodes = rcvBuffHalo[0]
+ self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -339,11 +339,11 @@
+ # Same thing for the solid part
+ sendBuffHalo = np.array(int(self.nLocalSolidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalSolidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+ if self.have_MPI == True:
+- self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.barrier()
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nSolidInterfaceNodes = rcvBuffHalo[0]
+ self.nSolidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -375,7 +375,7 @@
+ if myid in self.fluidInterfaceProcessors:
+ globalIndexStart = 0
+ for iProc in range(myid):
+- globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
++ globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
+ globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1
+ else:
+ globalIndexStart = 0
+@@ -387,8 +387,8 @@
+ temp[0] = [0,self.nLocalFluidInterfacePhysicalNodes-1]
+ self.fluidGlobalIndexRange = list()
+ self.fluidGlobalIndexRange.append(temp)
+-
+- # Same thing for the solid part
++
++ # Same thing for the solid part
+ if self.have_MPI == True:
+ if myid in self.solidInterfaceProcessors:
+ globalIndexStart = 0
+@@ -404,14 +404,14 @@
+ temp = {}
+ temp[0] = [0,self.nSolidInterfacePhysicalNodes-1]
+ self.solidGlobalIndexRange = list()
+- self.solidGlobalIndexRange.append(temp)
++ self.solidGlobalIndexRange.append(temp)
+
+- self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
+- self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
++ self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
++ self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
+ self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes))
+ self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes))
+
+- self.MPIBarrier()
++ self.MPIBarrier()
+
+ # --- Create all the PETSc vectors required for parallel communication and parallel mesh mapping/interpolation (working for serial too) ---
+ if self.have_MPI == True:
+@@ -432,8 +432,8 @@
+ self.solidInterface_array_DispY.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispZ.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispX.set(0.0)
+- self.solidInterface_array_DispY.set(0.0)
+- self.solidInterface_array_DispZ.set(0.0)
++ self.solidInterface_array_DispY.set(0.0)
++ self.solidInterface_array_DispZ.set(0.0)
+
+ if self.have_MPI == True:
+ self.fluidInterface_array_DispX = PETSc.Vec().create(self.comm)
+@@ -536,30 +536,30 @@
+ self.solidInterfaceResidualnM1_array_Z.set(0.0)
+
+ def interfaceMapping(self,FluidSolver, SolidSolver, FSI_config):
+- """
+- Creates the one-to-one mapping between interfaces in case of matching meshes.
+- Creates the interpolation rules between interfaces in case of non-matching meshes.
+- """
+- if self.have_MPI == True:
++ """
++ Creates the one-to-one mapping between interfaces in case of matching meshes.
++ Creates the interpolation rules between interfaces in case of non-matching meshes.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+
+- # --- Get the fluid interface from fluid solver on each partition ---
+- GlobalIndex = int()
++ # --- Get the fluid interface from fluid solver on each partition ---
++ GlobalIndex = int()
+ localIndex = 0
+ fluidIndexing_temp = {}
+ self.localFluidInterface_array_X_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Y_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Z_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
+- posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
+- posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
+- if GlobalIndex in self.FluidHaloNodeList[myid].keys():
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
++ posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
++ posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
++ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz)
+ else:
+ fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex)
+@@ -576,17 +576,17 @@
+ self.fluidIndexing = fluidIndexing_temp.copy()
+ del fluidIndexing_temp
+
+- # --- Get the solid interface from solid solver on each partition ---
++ # --- Get the solid interface from solid solver on each partition ---
+ localIndex = 0
+ solidIndexing_temp = {}
+- self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
++ self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Y = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Z = np.zeros(self.nLocalSolidInterfaceNodes)
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+@@ -605,14 +605,14 @@
+ del solidIndexing_temp
+
+
+- # --- Create the PETSc parallel interpolation matrix ---
++ # --- Create the PETSc parallel interpolation matrix ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ self.MappingMatrixA = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB = PETSc.Mat().create(self.comm)
+ self.MappingMatrixA_T = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB_T = PETSc.Mat().create(self.comm)
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('mpiaij')
+ self.MappingMatrixB.setType('mpiaij')
+ self.MappingMatrixA_T.setType('mpiaij')
+@@ -627,7 +627,7 @@
+ self.MappingMatrixB = PETSc.Mat().create()
+ self.MappingMatrixA_T = PETSc.Mat().create()
+ self.MappingMatrixB_T = PETSc.Mat().create()
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('aij')
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+@@ -637,16 +637,16 @@
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+ self.MappingMatrixB_T.setType('aij')
+- self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA.setUp()
+ self.MappingMatrixA.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixB.setUp()
+ self.MappingMatrixB.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA_T.setUp()
+ self.MappingMatrixA_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrixB_T.setUp()
+ self.MappingMatrixB_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+ else:
+@@ -660,21 +660,21 @@
+ self.MappingMatrix_T = PETSc.Mat().create()
+ self.MappingMatrix.setType('aij')
+ self.MappingMatrix_T.setType('aij')
+- self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
++ self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
+ self.MappingMatrix.setUp()
+ self.MappingMatrix.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrix_T.setUp()
+ self.MappingMatrix_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+-
+-
++
++
+ # --- Fill the interpolation matrix in parallel (working in serial too) ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MPIPrint('Building interpolation matrices...')
+ if self.have_MPI == True:
+ for iProc in self.solidInterfaceProcessors:
+ if myid == iProc:
+- for jProc in self.solidInterfaceProcessors:
++ for jProc in self.solidInterfaceProcessors:
+ self.comm.Send(self.localSolidInterface_array_X, dest=jProc, tag=1)
+ self.comm.Send(self.localSolidInterface_array_Y, dest=jProc, tag=2)
+ self.comm.Send(self.localSolidInterface_array_Z, dest=jProc, tag=3)
+@@ -726,7 +726,7 @@
+ self.TPSMeshMapping_B(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ self.NearestNeighboorMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+- else:
++ else:
+ self.matchingMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ if FSI_config['MATCHING_MESH'] == 'NO':
+@@ -735,10 +735,10 @@
+ elif FSI_config['MESH_INTERP_METHOD'] == 'TPS' :
+ self.TPSMeshMapping_B(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+ else:
+- self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+- else:
++ self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
++ else:
+ self.matchingMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+-
++
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixB.assemblyBegin()
+ self.MappingMatrixB.assemblyEnd()
+@@ -751,9 +751,9 @@
+ self.MappingMatrix_T.assemblyBegin()
+ self.MappingMatrix_T.assemblyEnd()
+ self.MPIPrint("Interpolation matrix is built.")
+-
++
+ self.MPIBarrier()
+-
++
+ del self.localSolidInterface_array_X
+ del self.localSolidInterface_array_Y
+ del self.localSolidInterface_array_Z
+@@ -768,20 +768,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ if self.nFluidInterfacePhysicalNodes != self.nSolidInterfacePhysicalNodes:
+ raise Exception("Fluid and solid interface must have the same number of nodes for matching meshes ! ")
+@@ -822,20 +822,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ # --- For each fluid interface node, find the nearest solid interface node and fill the boolean mapping matrix ---
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -863,20 +863,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+ posX = self.localSolidInterface_array_X[iVertexSolid]
+@@ -915,20 +915,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+ posX = self.localFluidInterface_array_X_init[iVertexFluid]
+@@ -965,7 +965,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+@@ -999,7 +999,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -1031,7 +1031,7 @@
+ """
+ phi = 0.0
+ eps = distance/rad
+-
++
+ if eps < 1:
+ phi = ((1.0-eps)**4)*(4.0*eps+1.0)
+ else:
+@@ -1044,20 +1044,20 @@
+ Description
+ """
+ phi = 0.0
+-
++
+ if distance > 0.0:
+ phi = (distance**2)*np.log10(distance)
+ else:
+ phi = 0.0
+
+- return phi
++ return phi
+
+
+ def interpolateSolidPositionOnFluidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+@@ -1110,12 +1110,12 @@
+ del gamma_array_DispY
+ del gamma_array_DispZ
+ del KSP_solver
+- else:
++ else:
+ self.MappingMatrix.mult(self.solidInterface_array_DispX, self.fluidInterface_array_DispX)
+ self.MappingMatrix.mult(self.solidInterface_array_DispY, self.fluidInterface_array_DispY)
+ self.MappingMatrix.mult(self.solidInterface_array_DispZ, self.fluidInterface_array_DispZ)
+
+- # --- Checking conservation ---
++ # --- Checking conservation ---
+ WSX = self.solidLoads_array_X.dot(self.solidInterface_array_DispX)
+ WSY = self.solidLoads_array_Y.dot(self.solidInterface_array_DispY)
+ WSZ = self.solidLoads_array_Z.dot(self.solidInterface_array_DispZ)
+@@ -1124,11 +1124,11 @@
+ WFY = self.fluidLoads_array_Y.dot(self.fluidInterface_array_DispY)
+ WFZ = self.fluidLoads_array_Z.dot(self.fluidInterface_array_DispZ)
+
+- self.MPIPrint("Checking f/s interface conservation...")
+- self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
+- self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++ self.MPIPrint("Checking f/s interface conservation...")
++ self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
++ self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++
+
+-
+ # --- Redistribute the interpolated fluid interface according to the partitions that own the fluid interface ---
+ # Gather the fluid interface on the master process
+ if self.have_MPI == True:
+@@ -1156,7 +1156,7 @@
+ displ = tuple(displ)
+
+ del sendBuffNumber, rcvBuffNumber
+-
++
+ #print("DEBUG MESSAGE From proc {}, counts = {}".format(myid, counts))
+ #print("DEBUG MESSAGE From proc {}, displ = {}".format(myid, displ))
+
+@@ -1213,18 +1213,18 @@
+ del sendBuff
+
+ def interpolateFluidLoadsOnSolidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
++
+ # --- Interpolate (or map) in parallel the fluid interface loads on the solid interface ---
+- #self.MappingMatrix.transpose()
++ #self.MappingMatrix.transpose()
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ gamma_array_LoadX = PETSc.Vec().create(self.comm)
+@@ -1280,10 +1280,10 @@
+ self.solidLoads_array_X_recon = None
+ self.solidLoads_array_Y_recon = None
+ self.solidLoads_array_Z_recon = None
+- if myid == self.rootProcess:
+- self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ if myid == self.rootProcess:
++ self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ myNumberOfNodes = self.solidLoads_array_X.getArray().shape[0]
+ sendBuffNumber = np.array([myNumberOfNodes], dtype=int)
+ rcvBuffNumber = np.zeros(MPIsize, dtype=int)
+@@ -1293,9 +1293,9 @@
+ displ = np.zeros(MPIsize, dtype=int)
+ for ii in range(rcvBuffNumber.shape[0]):
+ displ[ii] = rcvBuffNumber[0:ii].sum()
+- displ = tuple(displ)
++ displ = tuple(displ)
+
+- del sendBuffNumber, rcvBuffNumber
++ del sendBuffNumber, rcvBuffNumber
+
+ self.comm.Gatherv(self.solidLoads_array_X.getArray(), [self.solidLoads_array_X_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+ self.comm.Gatherv(self.solidLoads_array_Y.getArray(), [self.solidLoads_array_Y_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+@@ -1336,25 +1336,25 @@
+
+
+ '''def getSolidInterfacePosition(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_X.setValues([iGlobalVertex],newPosx)
+ self.solidInterface_array_Y.setValues([iGlobalVertex],newPosy)
+@@ -1375,25 +1375,25 @@
+ #print("DEBUG MESSAGE From PROC {} : array_X = {}".format(myid, self.solidInterface_array_X.getArray()))'''
+
+ def getSolidInterfaceDisplacement(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++ newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_DispX.setValues([iGlobalVertex],newDispx)
+ self.solidInterface_array_DispY.setValues([iGlobalVertex],newDispy)
+@@ -1408,9 +1408,9 @@
+ self.solidInterface_array_DispZ.assemblyEnd()
+
+ def getFluidInterfaceNodalForce(self, FSI_config, FluidSolver):
+- """
+- Gets the fluid interface loads from the fluid solver.
+- """
++ """
++ Gets the fluid interface loads from the fluid solver.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+@@ -1422,17 +1422,17 @@
+ FZ = 0.0
+
+ # --- Get the fluid interface loads from the fluid solver and directly fill the corresponding PETSc vector ---
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
+- if halo==False:
+- if FSI_config['CSD_SOLVER'] == 'GETDP':
+- newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
+- else:
+- newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
++ if halo==False:
++ if FSI_config['CSD_SOLVER'] == 'GETDP':
++ newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
++ else:
++ newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('fluid', myid, localIndex)
+ self.fluidLoads_array_X.setValues([iGlobalVertex], newFx)
+ self.fluidLoads_array_Y.setValues([iGlobalVertex], newFy)
+@@ -1457,22 +1457,22 @@
+ FX_b = self.fluidLoads_array_X.sum()
+ FY_b = self.fluidLoads_array_Y.sum()
+ FZ_b = self.fluidLoads_array_Z.sum()
+-
++
+
+ def setFluidInterfaceVarCoord(self, FluidSolver):
+- """
+- Communicate the change of coordinates of the fluid interface to the fluid solver.
+- Prepare the fluid solver for mesh deformation.
+- """
++ """
++ Communicate the change of coordinates of the fluid interface to the fluid solver.
++ Prepare the fluid solver for mesh deformation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Send the new fluid interface position to the fluid solver (on each partition, halo nodes included) ---
+ localIndex = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ posX0, posY0, posZ0 = self.haloNodesPositionsInit[GlobalIndex]
+ DispX, DispY, DispZ = self.haloNodesDisplacements[GlobalIndex]
+@@ -1491,32 +1491,32 @@
+ FluidSolver.SetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex, posZ)
+ localIndex += 1
+ # Prepares the mesh deformation in the fluid solver
+- nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++ nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++
+
+-
+ def setSolidInterfaceLoads(self, SolidSolver, FSI_config, time):
+- """
+- Communicates the new solid interface loads to the solid solver.
+- In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
+- """
++ """
++ Communicates the new solid interface loads to the solid solver.
++ In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- FY = 0.0 # solid-side resultant forces
++ FY = 0.0 # solid-side resultant forces
+ FX = 0.0
+ FZ = 0.0
+- FFX = 0.0 # fluid-side resultant forces
+- FFY = 0.0
+- FFZ = 0.0
++ FFX = 0.0 # fluid-side resultant forces
++ FFY = 0.0
++ FFZ = 0.0
+
+ # --- Check for total force conservation after interpolation
+ FFX = self.fluidLoads_array_X.sum()
+ FFY = self.fluidLoads_array_Y.sum()
+ FFZ = self.fluidLoads_array_Z.sum()
+
+-
++
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ FX += self.localSolidLoads_array_X[iVertex]
+ FY += self.localSolidLoads_array_Y[iVertex]
+@@ -1527,9 +1527,9 @@
+ FY = self.comm.allreduce(FY)
+ FZ = self.comm.allreduce(FZ)
+
+- self.MPIPrint("Checking f/s interface total force...")
+- self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
+- self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
++ self.MPIPrint("Checking f/s interface total force...")
++ self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
++ self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
+
+ # --- Send the new solid interface loads to the solid solver (on each partition, halo nodes included) ---
+ GlobalIndex = int()
+@@ -1541,25 +1541,25 @@
+ pass
+ else:
+ Fx = self.localSolidLoads_array_X[localIndex]
+- Fy = self.localSolidLoads_array_Y[localIndex]
+- Fz = self.localSolidLoads_array_Z[localIndex]
++ Fy = self.localSolidLoads_array_Y[localIndex]
++ Fz = self.localSolidLoads_array_Z[localIndex]
+ SolidSolver.applyload(iVertex, Fx, Fy, Fz, time)
+ localIndex += 1
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
+ SolidSolver.setGeneralisedForce()
+- SolidSolver.setGeneralisedMoment()
++ SolidSolver.setGeneralisedMoment()
+
+ def computeSolidInterfaceResidual(self, SolidSolver):
+- """
+- Computes the solid interface FSI displacement residual.
+- """
++ """
++ Computes the solid interface FSI displacement residual.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- normInterfaceResidualSquare = 0.0
++ normInterfaceResidualSquare = 0.0
+
+ # --- Create and fill the PETSc vector for the predicted solid interface position (predicted by the solid computation) ---
+ if self.have_MPI == True:
+@@ -1575,27 +1575,27 @@
+ predDisp_array_Y = PETSc.Vec().create()
+ predDisp_array_Y.setType('seq')
+ predDisp_array_Z = PETSc.Vec().create()
+- predDisp_array_Z.setType('seq')
++ predDisp_array_Z.setType('seq')
+ predDisp_array_X.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Y.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Z.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+-
+- if myid in self.solidSolverProcessors:
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++
++ if myid in self.solidSolverProcessors:
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, iVertex)
+ predDisp_array_X.setValues([iGlobalVertex], predDispx)
+ predDisp_array_Y.setValues([iGlobalVertex], predDispy)
+ predDisp_array_Z.setValues([iGlobalVertex], predDispz)
+-
+- predDisp_array_X.assemblyBegin()
+- predDisp_array_X.assemblyEnd()
+- predDisp_array_Y.assemblyBegin()
+- predDisp_array_Y.assemblyEnd()
+- predDisp_array_Z.assemblyBegin()
+- predDisp_array_Z.assemblyEnd()
++
++ predDisp_array_X.assemblyBegin()
++ predDisp_array_X.assemblyEnd()
++ predDisp_array_Y.assemblyBegin()
++ predDisp_array_Y.assemblyEnd()
++ predDisp_array_Z.assemblyBegin()
++ predDisp_array_Z.assemblyEnd()
+
+ # --- Calculate the residual (vector and norm) ---
+ self.solidInterfaceResidual_array_X = predDisp_array_X - self.solidInterface_array_DispX
+@@ -1615,45 +1615,45 @@
+ del predDisp_array_Y
+ del predDisp_array_Z
+
+- return sqrt(normInterfaceResidualSquare)
++ return sqrt(normInterfaceResidualSquare)
+
+ def relaxSolidPosition(self,FSI_config):
+- """
+- Apply solid displacement under-relaxation.
+- """
++ """
++ Apply solid displacement under-relaxation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+ # --- Set the Aitken coefficient for the relaxation ---
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.aitkenParam = FSI_config['AITKEN_PARAM']
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.setAitkenCoefficient(FSI_config)
+- else:
+- self.aitkenParam = 1.0
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.aitkenParam = FSI_config['AITKEN_PARAM']
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.setAitkenCoefficient(FSI_config)
++ else:
++ self.aitkenParam = 1.0
+
+- self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
++ self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
+
+ # --- Relax the solid interface position ---
+ self.solidInterface_array_DispX += self.aitkenParam*self.solidInterfaceResidual_array_X
+ self.solidInterface_array_DispY += self.aitkenParam*self.solidInterfaceResidual_array_Y
+ self.solidInterface_array_DispZ += self.aitkenParam*self.solidInterfaceResidual_array_Z
+-
++
+
+ def setAitkenCoefficient(self, FSI_config):
+- """
+- Computes the Aitken coefficients for solid displacement under-relaxation.
+- """
+-
+- deltaResNormSquare = 0.0
+- prodScalRes = 0.0
+-
++ """
++ Computes the Aitken coefficients for solid displacement under-relaxation.
++ """
++
++ deltaResNormSquare = 0.0
++ prodScalRes = 0.0
++
+ # --- Create the PETSc vector for the difference between the residuals (current and previous FSI iter) ---
+- if self.FSIIter == 0:
+- self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
+- else:
++ if self.FSIIter == 0:
++ self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
++ else:
+ if self.have_MPI:
+ deltaResx_array_X = PETSc.Vec().create(self.comm)
+ deltaResx_array_X.setType('mpi')
+@@ -1688,9 +1688,9 @@
+ deltaResNormSquare_X = (deltaResx_array_X.norm())**2
+ deltaResNormSquare_Y = (deltaResx_array_Y.norm())**2
+ deltaResNormSquare_Z = (deltaResx_array_Z.norm())**2
+- deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
++ deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
+
+- self.aitkenParam *= -prodScalRes/deltaResNormSquare
++ self.aitkenParam *= -prodScalRes/deltaResNormSquare
+
+ deltaResx_array_X.destroy()
+ deltaResx_array_Y.destroy()
+@@ -1708,27 +1708,27 @@
+ self.solidInterfaceResidual_array_Z.copy(self.solidInterfaceResidualnM1_array_Z)
+
+ def displacementPredictor(self, FSI_config , SolidSolver, deltaT):
+- """
+- Calculates a prediciton for the solid interface position for the next time step.
+- """
++ """
++ Calculates a prediciton for the solid interface position for the next time step.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
+- self.MPIPrint("First order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.0
+- elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
+- self.MPIPrint("Second order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.5
+- else:
+- self.MPIPrint("No predictor")
+- alpha_0 = 0.0
+- alpha_1 = 0.0
++ if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
++ self.MPIPrint("First order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.0
++ elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
++ self.MPIPrint("Second order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.5
++ else:
++ self.MPIPrint("No predictor")
++ alpha_0 = 0.0
++ alpha_1 = 0.0
+
+ # --- Create the PETSc vectors to store the solid interface velocity ---
+ if self.have_MPI == True:
+@@ -1774,18 +1774,18 @@
+ # --- Fill the PETSc vectors ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+- velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
+- vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
+- velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
+- velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
+- velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
+- velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
++ velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
++ vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
++ velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
++ velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
++ velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
++ velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
+ Vel_array_X.setValues([iGlobalVertex],velx)
+ Vel_array_Y.setValues([iGlobalVertex],vely)
+ Vel_array_Z.setValues([iGlobalVertex],velz)
+@@ -1822,27 +1822,27 @@
+ del VelnM1_array_X, VelnM1_array_Y, VelnM1_array_Z
+
+ def writeFSIHistory(self, TimeIter, time, varCoordNorm, FSIConv):
+- """
+- Write the FSI history file of the computaion.
+- """
++ """
++ Write the FSI history file of the computaion.
++ """
+
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ if myid == self.rootProcess:
+- if self.unsteady:
+- if TimeIter == 0:
+- histFile = open('FSIhistory.dat', "w")
++ if self.unsteady:
++ if TimeIter == 0:
++ histFile = open('FSIhistory.dat', "w")
+ histFile.write("TimeIter\tTime\tFSIRes\tFSINbIter\n")
+- else:
+- histFile = open('FSIhistory.dat', "a")
+- if FSIConv:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
+- else:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
+- histFile.close()
++ else:
++ histFile = open('FSIhistory.dat', "a")
++ if FSIConv:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
++ else:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
++ histFile.close()
+ else:
+ if self.FSIIter == 0:
+ histFile = open('FSIhistory.dat', "w")
+@@ -1851,7 +1851,7 @@
+ histFile = open('FSIhistory.dat', "a")
+ histFile.write(str(self.FSIIter) + '\t' + str(varCoordNorm) + '\n')
+ histFile.close()
+-
++
+
+ self.MPIBarrier()
+
+@@ -1868,254 +1868,254 @@
+ globalIndex = globalStartIndex + iLocalVertex
+
+ return globalIndex
+-
++
+
+ def UnsteadyFSI(self,FSI_config, FluidSolver, SolidSolver):
+- """
+- Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
+- F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
+- """
++ """
++ Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
++ F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the unsteady computation --- #
+- deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
+- totTime = FSI_config['UNST_TIME'] # physical simulation time
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
+-
+- if FSI_config['RESTART_SOL'] == 'YES':
+- startTime = FSI_config['START_TIME']
+- NbTimeIter = ((totTime)/deltaT)-1
+- time = startTime
+- TimeIter = FSI_config['RESTART_ITER']
+- else:
+- NbTimeIter = (totTime/deltaT)-1 # number of time iterations
+- time = 0.0 # initial time
+- TimeIter = 0 # initial time iteration
+-
+- NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
+-
+- varCoordNorm = 0.0 # FSI residual
+- FSIConv = False # FSI convergence flag
+-
+- self.MPIPrint('\n**********************************')
+- self.MPIPrint('* Begin unsteady FSI computation *')
+- self.MPIPrint('**********************************\n')
+-
+- # --- Initialize the coupled solution --- #
+- #If restart (DOES NOT WORK YET)
+- if FSI_config['RESTART_SOL'] == 'YES':
+- TimeIterTreshold = -1
+- FluidSolver.setTemporalIteration(TimeIter)
+- if myid == self.rootProcess:
+- SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ # --- Set some general variables for the unsteady computation --- #
++ deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
++ totTime = FSI_config['UNST_TIME'] # physical simulation time
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
++
++ if FSI_config['RESTART_SOL'] == 'YES':
++ startTime = FSI_config['START_TIME']
++ NbTimeIter = ((totTime)/deltaT)-1
++ time = startTime
++ TimeIter = FSI_config['RESTART_ITER']
++ else:
++ NbTimeIter = (totTime/deltaT)-1 # number of time iterations
++ time = 0.0 # initial time
++ TimeIter = 0 # initial time iteration
++
++ NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
++
++ varCoordNorm = 0.0 # FSI residual
++ FSIConv = False # FSI convergence flag
++
++ self.MPIPrint('\n**********************************')
++ self.MPIPrint('* Begin unsteady FSI computation *')
++ self.MPIPrint('**********************************\n')
++
++ # --- Initialize the coupled solution --- #
++ #If restart (DOES NOT WORK YET)
++ if FSI_config['RESTART_SOL'] == 'YES':
++ TimeIterTreshold = -1
++ FluidSolver.setTemporalIteration(TimeIter)
++ if myid == self.rootProcess:
++ SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ if self.have_MPI == True:
++ self.comm.barrier()
++ FluidSolver.setInitialMesh(True)
++ if myid == self.rootProcess:
++ SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+ if self.have_MPI == True:
+- self.comm.barrier()
+- FluidSolver.setInitialMesh(True)
+- if myid == self.rootProcess:
+- SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+- if self.have_MPI == True:
+- self.comm.barrier()
+- if myid == self.rootProcess:
+- SolidSolver.updateSolution()
+- #If no restart
+- else:
+- self.MPIPrint('Setting FSI initial conditions')
++ self.comm.barrier()
++ if myid == self.rootProcess:
++ SolidSolver.updateSolution()
++ #If no restart
++ else:
++ self.MPIPrint('Setting FSI initial conditions')
+ if myid in self.solidSolverProcessors:
+- SolidSolver.setInitialDisplacements()
++ SolidSolver.setInitialDisplacements()
+ self.getSolidInterfaceDisplacement(SolidSolver)
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
+- self.MPIPrint('\nFSI initial conditions are set')
+- self.MPIPrint('Beginning time integration\n')
+-
+- # --- External temporal loop --- #
+- while TimeIter <= NbTimeIter:
+-
+- if TimeIter > TimeIterTreshold:
+- NbFSIIter = NbFSIIterMax
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
+- else:
+- NbFSIIter = 1
+-
+- self.FSIIter = 0
+- FSIConv = False
+- FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+-
+- # --- Internal FSI loop --- #
+- while self.FSIIter <= (NbFSIIter-1):
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
++ self.MPIPrint('\nFSI initial conditions are set')
++ self.MPIPrint('Beginning time integration\n')
++
++ # --- External temporal loop --- #
++ while TimeIter <= NbTimeIter:
++
++ if TimeIter > TimeIterTreshold:
++ NbFSIIter = NbFSIIterMax
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
++ else:
++ NbFSIIter = 1
++
++ self.FSIIter = 0
++ FSIConv = False
++ FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+
+- self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++ # --- Internal FSI loop --- #
++ while self.FSIIter <= (NbFSIIter-1):
+
+- # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++
++ # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
+ self.MPIPrint('\nPerforming dynamic mesh deformation (ALE)...\n')
+ self.setFluidInterfaceVarCoord(FluidSolver)
+ FluidSolver.DynamicMeshUpdate(TimeIter)
+-
+- # --- Fluid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
++
++ # --- Fluid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
+ self.MPIBarrier()
+- FluidSolver.ResetConvergence()
+- FluidSolver.Run()
++ FluidSolver.ResetConvergence()
++ FluidSolver.Run()
+ self.MPIBarrier()
+
+- # --- Surface fluid loads interpolation and communication --- #
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- Surface fluid loads interpolation and communication --- #
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- if TimeIter > TimeIterTreshold:
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
++ if TimeIter > TimeIterTreshold:
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
+
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.timeIteration(time)
+- elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
+- SolidSolver.run(time-deltaT, time)
+-
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+- if varCoordNorm < FSITolerance:
+- FSIConv = True
+- break
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.timeIteration(time)
++ elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
++ SolidSolver.run(time-deltaT, time)
++
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ if varCoordNorm < FSITolerance:
++ FSIConv = True
++ break
+
+- # --- Relaxe the solid position --- #
++ # --- Relaxe the solid position --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
+-
+- self.FSIIter += 1
+- # --- End OF FSI loop --- #
++ self.relaxSolidPosition(FSI_config)
++
++ self.FSIIter += 1
++ # --- End OF FSI loop --- #
+
+ self.MPIBarrier()
+
+- # --- Update the FSI history file --- #
+- if TimeIter > TimeIterTreshold:
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
+-
+- # --- Update, monitor and output the fluid solution before the next time step ---#
+- FluidSolver.Update()
+- FluidSolver.Monitor(TimeIter)
+- FluidSolver.Output(TimeIter)
+-
+- if TimeIter >= TimeIterTreshold:
+- if myid in self.solidSolverProcessors:
+- # --- Output the solid solution before thr next time step --- #
+- SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
+-
+- # --- Displacement predictor for the next time step and update of the solid solution --- #
+- self.MPIPrint('\nSolid displacement prediction for next time step')
+- self.displacementPredictor(FSI_config, SolidSolver, deltaT)
++ # --- Update the FSI history file --- #
++ if TimeIter > TimeIterTreshold:
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
++
++ # --- Update, monitor and output the fluid solution before the next time step ---#
++ FluidSolver.Update()
++ FluidSolver.Monitor(TimeIter)
++ FluidSolver.Output(TimeIter)
++
++ if TimeIter >= TimeIterTreshold:
++ if myid in self.solidSolverProcessors:
++ # --- Output the solid solution before thr next time step --- #
++ SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
++
++ # --- Displacement predictor for the next time step and update of the solid solution --- #
++ self.MPIPrint('\nSolid displacement prediction for next time step')
++ self.displacementPredictor(FSI_config, SolidSolver, deltaT)
+ if myid in self.solidSolverProcessors:
+- SolidSolver.updateSolution()
+-
+- TimeIter += 1
+- time += deltaT
+- #--- End of the temporal loop --- #
++ SolidSolver.updateSolution()
++
++ TimeIter += 1
++ time += deltaT
++ #--- End of the temporal loop --- #
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\n*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************\n')
++ self.MPIPrint('\n*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************\n')
+
+ def SteadyFSI(self, FSI_config,FluidSolver, SolidSolver):
+- """
+- Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
+- """
++ """
++ Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the steady computation --- #
+- NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- varCoordNorm = 0.0
+-
+- self.MPIPrint('\n********************************')
+- self.MPIPrint('* Begin steady FSI computation *')
+- self.MPIPrint('********************************\n')
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
++ # --- Set some general variables for the steady computation --- #
++ NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ varCoordNorm = 0.0
++
++ self.MPIPrint('\n********************************')
++ self.MPIPrint('* Begin steady FSI computation *')
++ self.MPIPrint('********************************\n')
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
+
+ self.getSolidInterfaceDisplacement(SolidSolver)
+
+- # --- External FSI loop --- #
+- self.FSIIter = 0
+- while self.FSIIter < NbFSIIterMax:
+- self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
+- self.MPIPrint('\nLaunching fluid solver for a steady computation...')
+- # --- Fluid solver call for FSI subiteration ---#
+- Iter = 0
+- FluidSolver.ResetConvergence()
+- while Iter < NbIter:
+- FluidSolver.PreprocessExtIter(Iter)
+- FluidSolver.Run()
+- StopIntegration = FluidSolver.Monitor(Iter)
+- FluidSolver.Output(Iter)
+- if StopIntegration:
+- break;
+- Iter += 1
+-
+- # --- Surface fluid loads interpolation and communication ---#
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- External FSI loop --- #
++ self.FSIIter = 0
++ while self.FSIIter < NbFSIIterMax:
++ self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
++ self.MPIPrint('\nLaunching fluid solver for a steady computation...')
++ # --- Fluid solver call for FSI subiteration ---#
++ Iter = 0
++ FluidSolver.ResetConvergence()
++ while Iter < NbIter:
++ FluidSolver.PreprocessExtIter(Iter)
++ FluidSolver.Run()
++ StopIntegration = FluidSolver.Monitor(Iter)
++ FluidSolver.Output(Iter)
++ if StopIntegration:
++ break;
++ Iter += 1
++
++ # --- Surface fluid loads interpolation and communication ---#
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
+-
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a static computation...\n')
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
++
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a static computation...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.staticComputation()
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.staticComputation()
+ else:
+ SolidSolver.run(0.0, 0.05)
+- SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
++ SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
+
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+ self.writeFSIHistory(0, 0.0, varCoordNorm, False)
+- if varCoordNorm < FSITolerance:
+- break
++ if varCoordNorm < FSITolerance:
++ break
+
+ # --- Relaxe the solid displacement and update the solid solution --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
++ self.relaxSolidPosition(FSI_config)
+ if myid in self.solidSolverProcessors:
+ SolidSolver.updateSolution()
+-
+- # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.MPIPrint('\nPerforming static mesh deformation...\n')
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.StaticMeshUpdate()
+- self.FSIIter += 1
++
++ # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint('\nPerforming static mesh deformation...\n')
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.StaticMeshUpdate()
++ self.FSIIter += 1
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.MPIPrint(' ')
+- self.MPIPrint('*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************')
+- self.MPIPrint(' ')
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.MPIPrint(' ')
++ self.MPIPrint('*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************')
++ self.MPIPrint(' ')
+diff -Naur old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py
+--- old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-10 16:17:07.000000000 +0300
+@@ -174,9 +174,9 @@
+
+ with open(self.Config_file) as configfile:
+ while 1:
+- line = configfile.readline()
+- if not line:
+- break
++ line = configfile.readline()
++ if not line:
++ break
+
+ # remove line returns
+ line = line.strip('\r\n')
+@@ -189,41 +189,41 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- #if case("NB_FSI_ITER") :
+- #self.Config[this_param] = int(this_value)
+- #break
+-
+- #float values
+- if case("DELTA_T") : pass
+- if case("START_TIME") : pass
+- if case("STOP_TIME") : pass
+- if case("SPRING_MASS") : pass
+- if case("INERTIA_FLEXURAL") : pass
+- if case("SPRING_STIFFNESS") : pass
+- if case("SPRING_DAMPING") : pass
+- if case("TORSIONAL_STIFFNESS") : pass
+- if case("TORSIONAL_DAMPING") : pass
+- if case("CORD") : pass
+- if case("FLEXURAL_AXIS") : pass
+- if case("GRAVITY_CENTER") : pass
+- if case("INITIAL_DISP") : pass
+- if case("INITIAL_ANGLE") : pass
+- if case("RHO") :
+- self.Config[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("TIME_MARCHING") : pass
+- if case("MESH_FILE") : pass
+- if case("CSD_SOLVER") : pass
+- if case("MOVING_MARKER") : pass
+- if case("STRUCT_TYPE") :
+- self.Config[this_param] = this_value
+- break
++ #integer values
++ #if case("NB_FSI_ITER") :
++ #self.Config[this_param] = int(this_value)
++ #break
++
++ #float values
++ if case("DELTA_T") : pass
++ if case("START_TIME") : pass
++ if case("STOP_TIME") : pass
++ if case("SPRING_MASS") : pass
++ if case("INERTIA_FLEXURAL") : pass
++ if case("SPRING_STIFFNESS") : pass
++ if case("SPRING_DAMPING") : pass
++ if case("TORSIONAL_STIFFNESS") : pass
++ if case("TORSIONAL_DAMPING") : pass
++ if case("CORD") : pass
++ if case("FLEXURAL_AXIS") : pass
++ if case("GRAVITY_CENTER") : pass
++ if case("INITIAL_DISP") : pass
++ if case("INITIAL_ANGLE") : pass
++ if case("RHO") :
++ self.Config[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("TIME_MARCHING") : pass
++ if case("MESH_FILE") : pass
++ if case("CSD_SOLVER") : pass
++ if case("MOVING_MARKER") : pass
++ if case("STRUCT_TYPE") :
++ self.Config[this_param] = this_value
++ break
+
+- if case():
+- print(this_param + " is an invalid option !")
++ if case():
++ print(this_param + " is an invalid option !")
+ break
+
+ def __readSU2Mesh(self):
+@@ -233,78 +233,78 @@
+ print('Opened mesh file ' + self.Mesh_file + '.')
+ while 1:
+ line = meshfile.readline()
+- if not line:
+- break
++ if not line:
++ break
+
+- pos = line.find('NDIM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NDIM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nDim = int(line[1])
+- continue
+-
+- pos = line.find('NELEM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ self.nDim = int(line[1])
++ continue
++
++ pos = line.find('NELEM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nElem = int(line[1])
+- continue
++ self.nElem = int(line[1])
++ continue
+
+- pos = line.find('NPOIN')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NPOIN')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nPoint = int(line[1])
++ self.nPoint = int(line[1])
+ for iPoint in range(self.nPoint):
+- self.node.append(Point())
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',self.nDim)
+- x = float(line[0])
+- y = float(line[1])
++ self.node.append(Point())
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',self.nDim)
++ x = float(line[0])
++ y = float(line[1])
+ z = 0.0
+- if self.nDim == 3:
+- z = float(line[2])
+- self.node[iPoint].SetCoord((x,y,z))
++ if self.nDim == 3:
++ z = float(line[2])
++ self.node[iPoint].SetCoord((x,y,z))
+ self.node[iPoint].SetCoord0((x,y,z))
+- self.node[iPoint].SetCoord_n((x,y,z))
+- continue
++ self.node[iPoint].SetCoord_n((x,y,z))
++ continue
+
+- pos = line.find('NMARK')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NMARK')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nMarker = int(line[1])
+- continue
++ self.nMarker = int(line[1])
++ continue
+
+- pos = line.find('MARKER_TAG')
+- if pos != -1:
+- line = line.strip('\r\n')
+- line = line.replace(" ", "")
++ pos = line.find('MARKER_TAG')
++ if pos != -1:
++ line = line.strip('\r\n')
++ line = line.replace(" ", "")
+ line = line.split("=",1)
+- markerTag = line[1]
+- if markerTag == self.FSI_marker:
+- self.markers[markerTag] = []
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split("=",1)
+- nElem = int(line[1])
+- for iElem in range(nElem):
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',1)
+- elemType = int(line[0])
+- if elemType == 3:
+- nodes = line[1].split(' ', 1)
+- if not int(nodes[0]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[0]))
+- if not int(nodes[1]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[1]))
+- else:
+- print("Element type {} is not recognized !!".format(elemType))
+- continue
+- else:
+- continue
++ markerTag = line[1]
++ if markerTag == self.FSI_marker:
++ self.markers[markerTag] = []
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split("=",1)
++ nElem = int(line[1])
++ for iElem in range(nElem):
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',1)
++ elemType = int(line[0])
++ if elemType == 3:
++ nodes = line[1].split(' ', 1)
++ if not int(nodes[0]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[0]))
++ if not int(nodes[1]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[1]))
++ else:
++ print("Element type {} is not recognized !!".format(elemType))
++ continue
++ else:
++ continue
+
+ print("Number of dimensions: {}".format(self.nDim))
+ print("Number of elements: {}".format(self.nElem))
+@@ -441,23 +441,23 @@
+ Coord_n = self.node[iPoint].GetCoord_n()
+
+ if self.Unsteady:
+- r = Coord_n - self.centerOfRotation_n
+- else:
+- r = Coord - self.centerOfRotation
++ r = Coord_n - self.centerOfRotation_n
++ else:
++ r = Coord - self.centerOfRotation
+
+- rotCoord = rotMatrix.dot(r)
++ rotCoord = rotMatrix.dot(r)
+
+ newCoord = newCenter + rotCoord
+ newVel[0] = Centerdot[0]+psidot*(newCoord[1]-newCenter[1])
+- newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
+- newVel[2] = Centerdot[2]+0.0
++ newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
++ newVel[2] = Centerdot[2]+0.0
+
+ self.node[iPoint].SetCoord((newCoord[0], newCoord[1], newCoord[2]))
+ self.node[iPoint].SetVel((newVel[0], newVel[1], newVel[2]))
+
+- if initialize:
+- self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
+- self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
++ if initialize:
++ self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
++ self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
+
+ self.centerOfRotation = np.copy(newCenter)
+
+diff -Naur old/SU2_PY/FSI/io/FSI_config.py new/SU2_PY/FSI/io/FSI_config.py
+--- old/SU2_PY/FSI/io/FSI_config.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/io/FSI_config.py 2020-05-10 16:17:07.000000000 +0300
+@@ -58,23 +58,23 @@
+ self.readConfig()
+
+ def __str__(self):
+- tempString = str()
+- for key, value in self._ConfigContent.items():
+- tempString += "{} = {}\n".format(key,value)
+- return tempString
++ tempString = str()
++ for key, value in self._ConfigContent.items():
++ tempString += "{} = {}\n".format(key,value)
++ return tempString
+
+ def __getitem__(self,key):
+- return self._ConfigContent[key]
++ return self._ConfigContent[key]
+
+ def __setitem__(self, key, value):
+- self._ConfigContent[key] = value
++ self._ConfigContent[key] = value
+
+ def readConfig(self):
+ input_file = open(self.ConfigFileName)
+ while 1:
+- line = input_file.readline()
+- if not line:
+- break
++ line = input_file.readline()
++ if not line:
++ break
+ # remove line returns
+ line = line.strip('\r\n')
+ # make sure it has useful data
+@@ -86,46 +86,46 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- if case("NDIM") : pass
+- #if case("MESH_DEF_LIN_ITER") : pass
+- #if case("MESH_DEF_NONLIN_ITER") : pass
+- if case("RESTART_ITER") : pass
+- if case("NB_EXT_ITER") : pass
+- if case("NB_FSI_ITER") :
+- self._ConfigContent[this_param] = int(this_value)
+- break
++ #integer values
++ if case("NDIM") : pass
++ #if case("MESH_DEF_LIN_ITER") : pass
++ #if case("MESH_DEF_NONLIN_ITER") : pass
++ if case("RESTART_ITER") : pass
++ if case("NB_EXT_ITER") : pass
++ if case("NB_FSI_ITER") :
++ self._ConfigContent[this_param] = int(this_value)
++ break
+
+- #float values
++ #float values
+ if case("RBF_RADIUS") : pass
+- if case("AITKEN_PARAM") : pass
+- if case("START_TIME") : pass
+- if case("UNST_TIMESTEP") : pass
+- if case("UNST_TIME") : pass
+- if case("FSI_TOLERANCE") :
+- self._ConfigContent[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("CFD_CONFIG_FILE_NAME") : pass
+- if case("CSD_SOLVER") : pass
+- if case("CSD_CONFIG_FILE_NAME") : pass
+- if case("RESTART_SOL") : pass
+- if case("MATCHING_MESH") : pass
++ if case("AITKEN_PARAM") : pass
++ if case("START_TIME") : pass
++ if case("UNST_TIMESTEP") : pass
++ if case("UNST_TIME") : pass
++ if case("FSI_TOLERANCE") :
++ self._ConfigContent[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("CFD_CONFIG_FILE_NAME") : pass
++ if case("CSD_SOLVER") : pass
++ if case("CSD_CONFIG_FILE_NAME") : pass
++ if case("RESTART_SOL") : pass
++ if case("MATCHING_MESH") : pass
+ if case("MESH_INTERP_METHOD") : pass
+- if case("DISP_PRED") : pass
+- if case("AITKEN_RELAX") : pass
+- if case("TIME_MARCHING") : pass
+- if case("INTERNAL_FLOW") :
+- #if case("MESH_DEF_METHOD") : pass
+- self._ConfigContent[this_param] = this_value
+- break
+-
+- if case():
+- print(this_param + " is an invalid option !")
+- break
+- #end for
+-
++ if case("DISP_PRED") : pass
++ if case("AITKEN_RELAX") : pass
++ if case("TIME_MARCHING") : pass
++ if case("INTERNAL_FLOW") :
++ #if case("MESH_DEF_METHOD") : pass
++ self._ConfigContent[this_param] = this_value
++ break
++
++ if case():
++ print(this_param + " is an invalid option !")
++ break
++ #end for
++
+
+
+ #def dump()
+diff -Naur old/SU2_PY/SU2/util/filter_adjoint.py new/SU2_PY/SU2/util/filter_adjoint.py
+--- old/SU2_PY/SU2/util/filter_adjoint.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/SU2/util/filter_adjoint.py 2020-05-10 16:17:07.000000000 +0300
+@@ -179,7 +179,7 @@
+ Sens_smoother = smooth( S_clip, Sens_smooth, smth_len , 'blackman' )
+ Sens_filter = Sens_smooth + (Sens_smooth - Sens_smoother) # sharpener
+ else:
+- raise Exception, 'unknown filter type'
++ raise Exception('unknown filter type')
+
+ # --------------------------------------------
+ # PLOTTING
+@@ -472,10 +472,10 @@
+ """
+
+ if x.ndim != 1:
+- raise ValueError, "smooth only accepts 1 dimension arrays."
++ raise ValueError("smooth only accepts 1 dimension arrays.")
+
+ if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
+- raise ValueError, "Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
++ raise ValueError("Window is not of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
+
+ # interpolate to constant time sample width
+ min_dt = np.min( np.diff(t) )
+diff -Naur old/SU2_PY/compute_uncertainty.py new/SU2_PY/compute_uncertainty.py
+--- old/SU2_PY/compute_uncertainty.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/compute_uncertainty.py 2020-05-10 16:17:07.000000000 +0300
+@@ -66,13 +66,13 @@
+
+ # perform eigenvalue perturbations
+ for comp in range(1,4):
+- print "\n\n =================== Performing " + str(comp) + " Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing ' + str(comp) + ' Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+ ztate = copy.deepcopy(state)
+
+- # set componentality
++ # set componentality
+ konfig.UQ_COMPONENT = comp
+
+ # send output to a folder
+@@ -85,14 +85,14 @@
+ # run su2
+ info = SU2.run.CFD(konfig)
+ ztate.update(info)
+-
+- # Solution merging
+- konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME
+- info = SU2.run.merge(konfig)
+- ztate.update(info)
++
++ # Solution merging
++ konfig.SOLUTION_FILENAME = konfig.RESTART_FILENAME
++ info = SU2.run.merge(konfig)
++ ztate.update(info)
+
+
+- print "\n\n =================== Performing p1c1 Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing p1c1 Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+@@ -118,7 +118,7 @@
+ info = SU2.run.merge(konfig)
+ state.update(info)
+
+- print "\n\n =================== Performing p1c2 Component Perturbation =================== \n\n"
++ print('\n\n =================== Performing p1c2 Component Perturbation =================== \n\n')
+
+ # make copies
+ konfig = copy.deepcopy(config)
+diff -Naur old/SU2_PY/fsi_computation.py new/SU2_PY/fsi_computation.py
+--- old/SU2_PY/fsi_computation.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/fsi_computation.py 2020-05-10 16:17:07.000000000 +0300
+@@ -74,9 +74,9 @@
+ if myid == rootProcess:
+ if os.getcwd() not in sys.path:
+ sys.path.append(os.getcwd())
+- print("Setting working directory : {}".format(os.getcwd()))
+- else:
+- print("Working directory is set to {}".format(os.getcwd()))
++ print("Setting working directory : {}".format(os.getcwd()))
++ else:
++ print("Working directory is set to {}".format(os.getcwd()))
+
+ # starts timer
+ start = timer.time()
diff --git a/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch
new file mode 100644
index 0000000..12acdfe
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.4-unbundle_boost.patch
@@ -0,0 +1,31 @@
+diff -Naur old_static/externals/tecio/meson.build new_shared/externals/tecio/meson.build
+--- old_static/externals/tecio/meson.build 2020-05-09 16:35:10.000000000 +0300
++++ new_shared/externals/tecio/meson.build 2020-05-10 11:52:36.000000000 +0300
+@@ -1,15 +1,15 @@
+-check_dir = run_command(python,
+- script_path / 'check_dir.py',
+- 'boost')
+-if check_dir.returncode() != 0
+- message('Extracting boost ...')
+- extract_boost = run_command(python,
+- script_path / 'extract_file.py',
+- 'boost.tar.gz',
+- meson.current_source_dir(), check: true)
+-else
+- message('Boost sources found.')
+-endif
++#check_dir = run_command(python,
++# script_path / 'check_dir.py',
++# 'boost')
++#if check_dir.returncode() != 0
++# message('Extracting boost ...')
++# extract_boost = run_command(python,
++# script_path / 'extract_file.py',
++# 'boost.tar.gz',
++# meson.current_source_dir(), check: true)
++#else
++# message('Boost sources found.')
++#endif
+
+ if mpi
+ subdir('teciompisrc')
diff --git a/sci-physics/SU2/metadata.xml b/sci-physics/SU2/metadata.xml
new file mode 100644
index 0000000..636aded
--- /dev/null
+++ b/sci-physics/SU2/metadata.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "http://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+ <maintainer type="person">
+ <email>torokhov-s-a@yandex.ru</email>
+ <name>Sergey Torokhov</name>
+ </maintainer>
+ <use>
+ <flag name="cgns">Build with CGNS support (bundled)</flag>
+ <flag name="mkl">Enable Intel MKL support</flag>
+ <flag name="openblas">Enable OpenBLAS support</flag>
+ <flag name="tecio">Enable TECIO support</flag>
+ <flag name="tutorials">Install Tutorials files</flag>
+ </use>
+ <longdescription>
+ The SU2 suite is an open-source collection of C++ based software tools
+ for performing Partial Differential Equation (PDE) analysis and solving
+ PDE-constrained optimization problems.
+
+ The toolset is designed with Computational Fluid Dynamics (CFD)
+ and aerodynamic shape optimization in mind, but is extensible
+ to treat arbitrary sets of governing equations such as potential flow,
+ elasticity, electrodynamics, chemically-reacting flows, and many others.
+ </longdescription>
+ <upstream>
+ <remote-id type="github">su2code/SU2</remote-id>
+ </upstream>
+</pkgmetadata>
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2020-06-16 14:34 Sergey Torokhov
0 siblings, 0 replies; 6+ messages in thread
From: Sergey Torokhov @ 2020-06-16 14:34 UTC (permalink / raw
To: gentoo-commits
commit: d98f35a18e0df831b2c2ef37bac5152591800710
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
AuthorDate: Tue Jun 16 14:32:41 2020 +0000
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
CommitDate: Tue Jun 16 14:32:41 2020 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=d98f35a1
sci-physics/SU2: 7.0.5 version bump
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru>
sci-physics/SU2/Manifest | 3 +
sci-physics/SU2/SU2-7.0.5.ebuild | 115 ++++++++++++++++++++++++++
sci-physics/SU2/files/SU2-7.0.5-fix-env.patch | 12 +++
3 files changed, 130 insertions(+)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
index 6ebe00f..87318f5 100644
--- a/sci-physics/SU2/Manifest
+++ b/sci-physics/SU2/Manifest
@@ -1,3 +1,6 @@
DIST SU2-7.0.4-TestCases.tar.gz 437960103 BLAKE2B 2469edc23f62589fa18be5fff6e036965f6b5f6e2be207642d318aac4d2044c07f0891568f86c1a3ab065e79afce50cc73ad0857b82093d79ac28a4d0451a4ad SHA512 f21d963815e024582e99647a21ebae0b17fc69f75bc34bb72cc3a86cc9ff8502342b31755b5da73e7088b4d0ce430bdd6b4efefc03583cbfcf5156c1849328e1
DIST SU2-7.0.4-Tutorials.tar.gz 64282233 BLAKE2B b0d13a0988d5617868fad6098fe8110e3600415f05784ff04416cb23162fadc8c1d06d50c5200b14f65afb3e97ee766b21dfdcd4ec8ded9026baf510ca829e48 SHA512 604a05e15a8eae1c7255016261a6576a97fc364f66004ecaccaae932e3a97624c2599d354dd874562824caa8f8ea3dac2f03e0105b1c27d66ec0bf59e3a27105
DIST SU2-7.0.4.tar.gz 20516147 BLAKE2B 21f45e4918bbc6a72bf47ad61d3301abed50a7cf569e9e8d4040201ff653e583d50a547853365302671922f023d0cc6f3735c1afcd0f3b6bf3c3fc92dc807787 SHA512 8e69f0e1d335adef0bd98666c98e29bc15ee0d7a0fcbbbc91a1ba02275ca52fda7f8f47434547f7982ce0e73a6ff78bd2ed57ca328d1e87b8afdd3b0a698d262
+DIST SU2-7.0.5-TestCases.tar.gz 444209661 BLAKE2B a7b14a3ee679a28a3a4530ddca45f39fdc2c0bc5f29fe20e6c9f04d03dee6ca08f98648989b0160ee8742621b539e2f4af9e8bdfb802c3e460a9f3b7e1be3dda SHA512 f21acbaccce66685323566a3f856ceb5fbd36764a8dea875af839520b46be8f7e26491ccb855243f93e4ae948534574080e986faafc2a84c86754fe39f74e240
+DIST SU2-7.0.5-Tutorials.tar.gz 64282246 BLAKE2B 9c8823df5bbda1be80f03dfa6d9de4b4263904acbee15f6928d3758a9b25c84ace59bf02847798838cbf3236961f51589e851e342f288961b2f2a62e28de216c SHA512 2b7c98cc4d060c850572e45418d4ddf38843ced3349a3fb807c1d32f0e5aabf578f8944918e3fc63a32161385eaf69b4742d24ef41d21b43006d5b881a89953b
+DIST SU2-7.0.5.tar.gz 20526328 BLAKE2B 0c3b16269f9d8356eaecde991853fe184188b98df62186c56780ee34b26995763adbac0acca53db1625ac9b0dfadc3348eac792e700e66be00a174ff056e017d SHA512 283da35aa4f6bac4d7451506c6b84d98a18dfe7630adfc17764511fba8171b15c36ee80807bbfc1c55e39e196f8ed304fff3eb1182dfac15aa6a86a9613e51ad
diff --git a/sci-physics/SU2/SU2-7.0.5.ebuild b/sci-physics/SU2/SU2-7.0.5.ebuild
new file mode 100644
index 0000000..d8a4c42
--- /dev/null
+++ b/sci-physics/SU2/SU2-7.0.5.ebuild
@@ -0,0 +1,115 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=7
+
+PYTHON_COMPAT=( python3_{6,7,8} )
+
+inherit meson python-single-r1
+
+DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
+HOMEPAGE="https://su2code.github.io/"
+SRC_URI="
+ https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
+ test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz )
+ tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz )
+"
+
+LICENSE="LGPL-2.1"
+SLOT="0"
+KEYWORDS="~amd64"
+
+# cgns, metis, parmetis are bundled;
+# omp is disable as it's experimental;
+# pastix is disabled as it's try to find bundled libs;
+IUSE="cgns -mkl +mpi openblas tecio test tutorials"
+RESTRICT="!test? ( test )"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ mkl? ( !openblas )
+"
+
+RDEPEND="
+ ${PYTHON_DEPS}
+ mpi? ( virtual/mpi[cxx] )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+DEPEND="
+ ${RDEPEND}
+ tecio? ( dev-libs/boost:= )
+"
+BDEPEND="virtual/pkgconfig"
+
+PATCHES=(
+ "${FILESDIR}/${P}-fix-env.patch"
+ "${FILESDIR}/${PN}-7.0.4-unbundle_boost.patch"
+ "${FILESDIR}/${PN}-7.0.4-fix-python-optimize.patch"
+)
+
+DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
+
+src_unpack() {
+ unpack "${P}.tar.gz"
+ if use test ; then
+ einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
+ tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die
+ fi
+ if use tutorials ; then
+ einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
+ mkdir "${P}"/Tutorials
+ tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die
+ fi
+}
+
+src_configure() {
+ local emesonargs=(
+ -Denable-autodiff=false
+ -Denable-directdiff=false
+ -Denable-pastix=false
+ -Denable-pywrapper=false
+ -Dwith-omp=false
+ $(meson_feature mpi with-mpi)
+ $(meson_use cgns enable-cgns)
+ $(meson_use mkl enable-mkl)
+ $(meson_use openblas enable-openblas)
+ $(meson_use tecio enable-tecio)
+ $(meson_use test enable-tests)
+ )
+ meson_src_configure
+}
+
+src_test() {
+ ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
+ ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
+ ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
+ ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
+ ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH
+ ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
+
+ export SU2_RUN="${S}/SU2_PY"
+ export SU2_HOME="${S}"
+ export PATH=$PATH:$SU2_RUN
+ export PYTHONPATH=$PYTHONPATH:$SU2_RUN
+
+ einfo "Running UnitTests ..."
+ ../${P}-build/UnitTests/test_driver
+
+ pushd TestCases/
+ use mpi && python parallel_regression.py
+ use mpi || python serial_regression.py
+ use tutorials && use mpi && python tutorials.py
+ popd
+}
+
+src_install() {
+ meson_src_install
+ mkdir -p "${ED}$(python_get_sitedir)"
+ mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
+ python_optimize "${D}/$(python_get_sitedir)"
+
+ if use tutorials ; then
+ insinto "/usr/share/${P}"
+ doins -r Tutorials
+ fi
+}
diff --git a/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch
new file mode 100644
index 0000000..b52f171
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch
@@ -0,0 +1,12 @@
+diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
+--- old_env/UnitTests/meson.build 2020-06-15 17:03:43.000000000 +0300
++++ new_env/UnitTests/meson.build 2020-06-15 17:04:35.000000000 +0300
+@@ -24,7 +24,7 @@
+ test_driver = executable(
+ 'test_driver',
+ unit_test_files,
+- install : true,
++ install : false,
+ dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
+ cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
+ )
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2020-10-10 0:29 Sergey Torokhov
0 siblings, 0 replies; 6+ messages in thread
From: Sergey Torokhov @ 2020-10-10 0:29 UTC (permalink / raw
To: gentoo-commits
commit: 5ab4e541e928cbd52d090ae680857ff72ee4dd2f
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
AuthorDate: Sat Oct 10 00:27:53 2020 +0000
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
CommitDate: Sat Oct 10 00:27:53 2020 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=5ab4e541
sci-physics/SU2: drop old
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru>
sci-physics/SU2/Manifest | 6 --
sci-physics/SU2/SU2-7.0.4.ebuild | 112 -------------------------
sci-physics/SU2/SU2-7.0.5.ebuild | 115 --------------------------
sci-physics/SU2/files/SU2-7.0.4-fix-env.patch | 24 ------
sci-physics/SU2/files/SU2-7.0.5-fix-env.patch | 12 ---
5 files changed, 269 deletions(-)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
index a4481036..b68e4c98 100644
--- a/sci-physics/SU2/Manifest
+++ b/sci-physics/SU2/Manifest
@@ -1,9 +1,3 @@
-DIST SU2-7.0.4-TestCases.tar.gz 437960103 BLAKE2B 2469edc23f62589fa18be5fff6e036965f6b5f6e2be207642d318aac4d2044c07f0891568f86c1a3ab065e79afce50cc73ad0857b82093d79ac28a4d0451a4ad SHA512 f21d963815e024582e99647a21ebae0b17fc69f75bc34bb72cc3a86cc9ff8502342b31755b5da73e7088b4d0ce430bdd6b4efefc03583cbfcf5156c1849328e1
-DIST SU2-7.0.4-Tutorials.tar.gz 64282233 BLAKE2B b0d13a0988d5617868fad6098fe8110e3600415f05784ff04416cb23162fadc8c1d06d50c5200b14f65afb3e97ee766b21dfdcd4ec8ded9026baf510ca829e48 SHA512 604a05e15a8eae1c7255016261a6576a97fc364f66004ecaccaae932e3a97624c2599d354dd874562824caa8f8ea3dac2f03e0105b1c27d66ec0bf59e3a27105
-DIST SU2-7.0.4.tar.gz 20516147 BLAKE2B 21f45e4918bbc6a72bf47ad61d3301abed50a7cf569e9e8d4040201ff653e583d50a547853365302671922f023d0cc6f3735c1afcd0f3b6bf3c3fc92dc807787 SHA512 8e69f0e1d335adef0bd98666c98e29bc15ee0d7a0fcbbbc91a1ba02275ca52fda7f8f47434547f7982ce0e73a6ff78bd2ed57ca328d1e87b8afdd3b0a698d262
-DIST SU2-7.0.5-TestCases.tar.gz 444209661 BLAKE2B a7b14a3ee679a28a3a4530ddca45f39fdc2c0bc5f29fe20e6c9f04d03dee6ca08f98648989b0160ee8742621b539e2f4af9e8bdfb802c3e460a9f3b7e1be3dda SHA512 f21acbaccce66685323566a3f856ceb5fbd36764a8dea875af839520b46be8f7e26491ccb855243f93e4ae948534574080e986faafc2a84c86754fe39f74e240
-DIST SU2-7.0.5-Tutorials.tar.gz 64282246 BLAKE2B 9c8823df5bbda1be80f03dfa6d9de4b4263904acbee15f6928d3758a9b25c84ace59bf02847798838cbf3236961f51589e851e342f288961b2f2a62e28de216c SHA512 2b7c98cc4d060c850572e45418d4ddf38843ced3349a3fb807c1d32f0e5aabf578f8944918e3fc63a32161385eaf69b4742d24ef41d21b43006d5b881a89953b
-DIST SU2-7.0.5.tar.gz 20526328 BLAKE2B 0c3b16269f9d8356eaecde991853fe184188b98df62186c56780ee34b26995763adbac0acca53db1625ac9b0dfadc3348eac792e700e66be00a174ff056e017d SHA512 283da35aa4f6bac4d7451506c6b84d98a18dfe7630adfc17764511fba8171b15c36ee80807bbfc1c55e39e196f8ed304fff3eb1182dfac15aa6a86a9613e51ad
DIST SU2-7.0.6-TestCases.tar.gz 447944839 BLAKE2B 5212ef7bf69bb40120ca2af99a02f2a89ae6cc0a1e048e57409ee1d2911f930452f4a5ef668126d6b7144c3f81d50bdadd0bcc810f0472740ccbbb6605e5a07d SHA512 c976450e7e17df58b47cbf14d18c4435f977a70dc086d5b74ea584ae14b3204632ef87b2dce6a456396179f1b72ef8eba83065a42d978b8d6966d5446decbd3c
DIST SU2-7.0.6-Tutorials.tar.gz 64282244 BLAKE2B 3b0ce136c9fa5c3e0ffc585e28b1a60470eeaf2518cbef539fccc185f79cd41a889e3c3c8a0ada3f488cfe1d2d0115e2768267c6ef4502b882b07f909f6f382c SHA512 4aaf39b98cbbe4c9e12d78027b0ee2b3d30fd614d1e48092d8bfd25c312a06a1621b2192653a7d8ac767762b06ae339ab6fb77e81f833efdb419ce09f92dec2f
DIST SU2-7.0.6.tar.gz 20531872 BLAKE2B 30e59bc6876223d87429b79f101a5705f989096a1b81725aa20012567d15b08b6a8a24140cc76b35c6c3657a1d6afa85d56da699ab38dac85714e296d7ad8531 SHA512 a4619dd969c6d9cb20de1d373c8e0af9d56654f9f96d919662897db4c3c8bf52b45fb1239065d480ba1b4f05ba7a17c9540ff3fe47fb0d96864736200cda8bcc
diff --git a/sci-physics/SU2/SU2-7.0.4.ebuild b/sci-physics/SU2/SU2-7.0.4.ebuild
deleted file mode 100644
index c76208dc..00000000
--- a/sci-physics/SU2/SU2-7.0.4.ebuild
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=7
-
-PYTHON_COMPAT=( python3_{7,8} )
-
-inherit meson python-single-r1
-
-DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
-HOMEPAGE="https://su2code.github.io/"
-SRC_URI="
- https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
- test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz )
- tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz )
-"
-
-LICENSE="LGPL-2.1"
-SLOT="0"
-KEYWORDS="~amd64"
-
-IUSE="cgns -mkl +mpi openblas tecio test tutorials"
-RESTRICT="!test? ( test )"
-REQUIRED_USE="
- ${PYTHON_REQUIRED_USE}
- mkl? ( !openblas )
-"
-
-RDEPEND="
- ${PYTHON_DEPS}
- mpi? ( virtual/mpi[cxx] )
- mkl? ( sci-libs/mkl )
- openblas? ( sci-libs/openblas )
-"
-DEPEND="
- ${RDEPEND}
- tecio? ( dev-libs/boost:= )
-"
-BDEPEND="virtual/pkgconfig"
-
-PATCHES=(
- "${FILESDIR}/${P}-fix-env.patch"
- "${FILESDIR}/${P}-unbundle_boost.patch"
- "${FILESDIR}/${P}-fix-python-optimize.patch"
-)
-
-DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
-
-src_unpack() {
- unpack "${P}.tar.gz"
- if use test ; then
- einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
- tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die
- fi
- if use tutorials ; then
- einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
- mkdir "${P}"/Tutorials
- tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die
- fi
-}
-
-src_configure() {
- local emesonargs=(
- -Denable-autodiff=false
- -Denable-directdiff=false
- -Denable-pastix=false
- -Denable-pywrapper=false
- -Dwith-omp=false
- $(meson_feature mpi with-mpi)
- $(meson_use cgns enable-cgns)
- $(meson_use mkl enable-mkl)
- $(meson_use openblas enable-openblas)
- $(meson_use tecio enable-tecio)
- $(meson_use test enable-tests)
- )
- meson_src_configure
-}
-
-src_test() {
- ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
- ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
- ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
- ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
- ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH
- ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
-
- export SU2_RUN="${S}/SU2_PY"
- export SU2_HOME="${S}"
- export PATH=$PATH:$SU2_RUN
- export PYTHONPATH=$PYTHONPATH:$SU2_RUN
-
- einfo "Running UnitTests ..."
- ../${P}-build/UnitTests/test_driver
-
- pushd TestCases/
- use mpi && python parallel_regression.py
- use mpi || python serial_regression.py
- use tutorials && use mpi && python tutorials.py
- popd
-}
-
-src_install() {
- meson_src_install
- mkdir -p "${ED}$(python_get_sitedir)"
- mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
- python_optimize "${D}/$(python_get_sitedir)"
-
- if use tutorials ; then
- insinto "/usr/share/${P}"
- doins -r Tutorials
- fi
-}
diff --git a/sci-physics/SU2/SU2-7.0.5.ebuild b/sci-physics/SU2/SU2-7.0.5.ebuild
deleted file mode 100644
index bc5b90fe..00000000
--- a/sci-physics/SU2/SU2-7.0.5.ebuild
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=7
-
-PYTHON_COMPAT=( python3_{7,8} )
-
-inherit meson python-single-r1
-
-DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
-HOMEPAGE="https://su2code.github.io/"
-SRC_URI="
- https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
- test? ( https://github.com/su2code/TestCases/archive/v${PV}.tar.gz -> ${P}-TestCases.tar.gz )
- tutorials? ( https://github.com/su2code/Tutorials/archive/v${PV}.tar.gz -> ${P}-Tutorials.tar.gz )
-"
-
-LICENSE="LGPL-2.1"
-SLOT="0"
-KEYWORDS="~amd64"
-
-# cgns, metis, parmetis are bundled;
-# omp is disable as it's experimental;
-# pastix is disabled as it's try to find bundled libs;
-IUSE="cgns -mkl +mpi openblas tecio test tutorials"
-RESTRICT="!test? ( test )"
-REQUIRED_USE="
- ${PYTHON_REQUIRED_USE}
- mkl? ( !openblas )
-"
-
-RDEPEND="
- ${PYTHON_DEPS}
- mpi? ( virtual/mpi[cxx] )
- mkl? ( sci-libs/mkl )
- openblas? ( sci-libs/openblas )
-"
-DEPEND="
- ${RDEPEND}
- tecio? ( dev-libs/boost:= )
-"
-BDEPEND="virtual/pkgconfig"
-
-PATCHES=(
- "${FILESDIR}/${P}-fix-env.patch"
- "${FILESDIR}/${PN}-7.0.4-unbundle_boost.patch"
- "${FILESDIR}/${PN}-7.0.4-fix-python-optimize.patch"
-)
-
-DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
-
-src_unpack() {
- unpack "${P}.tar.gz"
- if use test ; then
- einfo "Unpacking ${P}-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
- tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${P}-TestCases.tar.gz" || die
- fi
- if use tutorials ; then
- einfo "Unpacking ${P}-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
- mkdir "${P}"/Tutorials
- tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${P}-Tutorials.tar.gz" || die
- fi
-}
-
-src_configure() {
- local emesonargs=(
- -Denable-autodiff=false
- -Denable-directdiff=false
- -Denable-pastix=false
- -Denable-pywrapper=false
- -Dwith-omp=false
- $(meson_feature mpi with-mpi)
- $(meson_use cgns enable-cgns)
- $(meson_use mkl enable-mkl)
- $(meson_use openblas enable-openblas)
- $(meson_use tecio enable-tecio)
- $(meson_use test enable-tests)
- )
- meson_src_configure
-}
-
-src_test() {
- ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
- ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
- ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
- ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
- ln -s ../../${P}-build/SU2_MSH/src/SU2_MSH SU2_PY/SU2_MSH
- ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
-
- export SU2_RUN="${S}/SU2_PY"
- export SU2_HOME="${S}"
- export PATH=$PATH:$SU2_RUN
- export PYTHONPATH=$PYTHONPATH:$SU2_RUN
-
- einfo "Running UnitTests ..."
- ../${P}-build/UnitTests/test_driver
-
- pushd TestCases/
- use mpi && python parallel_regression.py
- use mpi || python serial_regression.py
- use tutorials && use mpi && python tutorials.py
- popd
-}
-
-src_install() {
- meson_src_install
- mkdir -p "${ED}$(python_get_sitedir)"
- mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
- python_optimize "${D}/$(python_get_sitedir)"
-
- if use tutorials ; then
- insinto "/usr/share/${P}"
- doins -r Tutorials
- fi
-}
diff --git a/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
deleted file mode 100644
index 3f65764c..00000000
--- a/sci-physics/SU2/files/SU2-7.0.4-fix-env.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-diff -Naur old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp
---- old_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-03-31 12:26:03.000000000 +0300
-+++ new_env/SU2_CFD/include/output/tools/CWindowingTools.hpp 2020-05-10 17:04:24.000000000 +0300
-@@ -28,7 +28,7 @@
- #pragma once
-
- #include <vector>
--#include "../../../Common/include/option_structure.hpp"
-+#include "../../../../Common/include/option_structure.hpp"
-
- class CWindowingTools{
- public:
-diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
---- old_env/UnitTests/meson.build 2020-05-10 17:03:43.000000000 +0300
-+++ new_env/UnitTests/meson.build 2020-05-10 17:04:35.000000000 +0300
-@@ -24,7 +24,7 @@
- test_driver = executable(
- 'test_driver',
- unit_test_files,
-- install : true,
-+ install : false,
- dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
- cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
- )
diff --git a/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch
deleted file mode 100644
index b52f171b..00000000
--- a/sci-physics/SU2/files/SU2-7.0.5-fix-env.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
---- old_env/UnitTests/meson.build 2020-06-15 17:03:43.000000000 +0300
-+++ new_env/UnitTests/meson.build 2020-06-15 17:04:35.000000000 +0300
-@@ -24,7 +24,7 @@
- test_driver = executable(
- 'test_driver',
- unit_test_files,
-- install : true,
-+ install : false,
- dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
- cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
- )
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2020-12-09 23:40 Sergey Torokhov
0 siblings, 0 replies; 6+ messages in thread
From: Sergey Torokhov @ 2020-12-09 23:40 UTC (permalink / raw
To: gentoo-commits
commit: 3f826bf9b12a22fa0e873b7818193187f999b1b5
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
AuthorDate: Wed Dec 9 23:34:37 2020 +0000
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
CommitDate: Wed Dec 9 23:34:37 2020 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=3f826bf9
sci-physics/SU2: 7.0.8 version bump
There are no dedicated Test and Tutorials tarballs this time.
The tarbals from 7.0.7 are used.
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru>
sci-physics/SU2/Manifest | 1 +
sci-physics/SU2/SU2-7.0.8.ebuild | 114 +
sci-physics/SU2/files/SU2-7.0.8-fix-env.patch | 12 +
.../SU2/files/SU2-7.0.8-fix-python-optimize.patch | 2212 ++++++++++++++++++++
4 files changed, 2339 insertions(+)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
index 876649be..7b30d7cd 100644
--- a/sci-physics/SU2/Manifest
+++ b/sci-physics/SU2/Manifest
@@ -4,3 +4,4 @@ DIST SU2-7.0.6.tar.gz 20531872 BLAKE2B 30e59bc6876223d87429b79f101a5705f989096a1
DIST SU2-7.0.7-TestCases.tar.gz 448969006 BLAKE2B 6c886824b55d7f8516d2ea69e2f7bef36a40986f4f715da46d91f851eb59390329433c6941a280eca34ad675633b2f1b01a897d1db8d177a5c4f770b286d0625 SHA512 0884b4f750dbcfd3f2cb0e71d6005932e4edd90a50fa84eb484f6c0c523930ddebfb3ed4315161b8fdeff911a52fa72b6d79739c8e19cd634b9823e007520213
DIST SU2-7.0.7-Tutorials.tar.gz 64282235 BLAKE2B 7a6b780ee6f01d26a7a7d4751ca39798af56cfd7b99ca3e13fdff61aecd631a3aa4c98a487f48a8b2593c711ee25bd1ddc90a316bde2c287e95a383321f1d5e9 SHA512 189b5da96f08689b62ba3c42ee349edd2e145f371112895587e53497f16de3d6fdbf17308af39961775d76e3169c40872ced8e267146b6da5ae12d31a4c70fa9
DIST SU2-7.0.7.tar.gz 20618138 BLAKE2B c823ea59fd28547b78c4694d45995e83c5f2c16229c40d20b951cdd62a98e13c77c07cffa87a1ec105b29a597878c7a2342c6ac90c7c9751ed20f876194a55e1 SHA512 c5dacc8b2f4ab7eb72852d8e6ae59c0800e8126faf20641135fa31ec42915b8e3553082e328b2b158e3e337f7aab0a932b6b1f875d3c0191db538bb923affcf3
+DIST SU2-7.0.8.tar.gz 20554206 BLAKE2B ac291eeae696197f5875839defa98bde550c5250b5a5cff8049e0d1c8184c5ec6d225b77618d079a8fb3d5a7f4d0531a0b2931fd15e045299fa55c7dd03d9fd1 SHA512 26ed8c39d2dfcdb3fc4e1c986c64f683f44cd4123ce5f7a482c4557a82b04b4bd8ef1f04332ab36ad0f987cfb98053fb8b30ef81faf51c7c56aebc1a30467df9
diff --git a/sci-physics/SU2/SU2-7.0.8.ebuild b/sci-physics/SU2/SU2-7.0.8.ebuild
new file mode 100644
index 00000000..5356a05b
--- /dev/null
+++ b/sci-physics/SU2/SU2-7.0.8.ebuild
@@ -0,0 +1,114 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=7
+
+PYTHON_COMPAT=( python3_{6,7,8,9} )
+
+inherit meson python-single-r1
+
+DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
+HOMEPAGE="https://su2code.github.io/"
+SRC_URI="
+ https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
+ test? ( https://github.com/su2code/TestCases/archive/v7.0.7.tar.gz -> ${PN}-7.0.7-TestCases.tar.gz )
+ tutorials? ( https://github.com/su2code/Tutorials/archive/v7.0.7.tar.gz -> ${PN}-7.0.7-Tutorials.tar.gz )
+"
+
+LICENSE="LGPL-2.1"
+SLOT="0"
+KEYWORDS="~amd64"
+
+# cgns, metis, parmetis are bundled;
+# omp is disable as it's experimental;
+# pastix is disabled as it's try to find bundled libs;
+IUSE="cgns -mkl +mpi openblas tecio test tutorials"
+RESTRICT="!test? ( test )"
+REQUIRED_USE="
+ ${PYTHON_REQUIRED_USE}
+ mkl? ( !openblas )
+"
+
+RDEPEND="
+ ${PYTHON_DEPS}
+ mpi? ( virtual/mpi[cxx] )
+ mkl? ( sci-libs/mkl )
+ openblas? ( sci-libs/openblas )
+"
+DEPEND="
+ ${RDEPEND}
+ tecio? ( dev-libs/boost:= )
+"
+BDEPEND="virtual/pkgconfig"
+
+PATCHES=(
+ "${FILESDIR}/${P}-fix-env.patch"
+ "${FILESDIR}/${PN}-7.0.4-unbundle_boost.patch"
+ "${FILESDIR}/${P}-fix-python-optimize.patch"
+)
+
+DOCS=( "LICENSE.md" "README.md" "SU2_PY/documentation.txt" )
+
+src_unpack() {
+ unpack "${P}.tar.gz"
+ if use test ; then
+ einfo "Unpacking ${PN}-7.0.7-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
+ tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${PN}-7.0.7-TestCases.tar.gz" || die
+ fi
+ if use tutorials ; then
+ einfo "Unpacking ${PN}-7.0.7-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
+ mkdir "${P}"/Tutorials
+ tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${PN}-7.0.7-Tutorials.tar.gz" || die
+ fi
+}
+
+src_configure() {
+ local emesonargs=(
+ -Denable-autodiff=false
+ -Denable-directdiff=false
+ -Denable-pastix=false
+ -Denable-pywrapper=false
+ -Dwith-omp=false
+ $(meson_feature mpi with-mpi)
+ $(meson_use cgns enable-cgns)
+ $(meson_use mkl enable-mkl)
+ $(meson_use openblas enable-openblas)
+ $(meson_use tecio enable-tecio)
+ $(meson_use test enable-tests)
+ )
+ meson_src_configure
+}
+
+src_test() {
+ ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
+ ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
+ ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
+ ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
+ ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
+
+ export SU2_RUN="${S}/SU2_PY"
+ export SU2_HOME="${S}"
+ export PATH=$PATH:$SU2_RUN
+ export PYTHONPATH=$PYTHONPATH:$SU2_RUN
+
+ einfo "Running UnitTests ..."
+ ../${P}-build/UnitTests/test_driver
+
+ pushd TestCases/
+ use mpi && python parallel_regression.py
+ use mpi || python serial_regression.py
+ use tutorials && use mpi && python tutorials.py
+ popd
+}
+
+src_install() {
+ meson_src_install
+ mkdir -p "${ED}$(python_get_sitedir)"
+ mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)"
+ python_optimize "${D}/$(python_get_sitedir)"
+
+ if use tutorials ; then
+ insinto "/usr/share/${P}"
+ doins -r Tutorials
+ fi
+}
diff --git a/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch
new file mode 100644
index 00000000..baf7955d
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch
@@ -0,0 +1,12 @@
+diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
+--- old_env/UnitTests/meson.build 2020-06-15 17:03:43.000000000 +0300
++++ new_env/UnitTests/meson.build 2020-06-15 17:04:35.000000000 +0300
+@@ -26,7 +26,7 @@
+ test_driver = executable(
+ 'test_driver',
+ unit_test_files,
+- install : true,
++ install : false,
+ dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
+ cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
+ )
diff --git a/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch b/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch
new file mode 100644
index 00000000..64bff0a3
--- /dev/null
+++ b/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch
@@ -0,0 +1,2212 @@
+diff -Naur old/SU2_PY/FSI/FSIInterface.py new/SU2_PY/FSI/FSIInterface.py
+--- old/SU2_PY/FSI/FSIInterface.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/FSIInterface.py 2020-05-10 16:17:07.000000000 +0300
+@@ -6,8 +6,8 @@
+ # \version 7.0.8 "Blackbird"
+ #
+ # SU2 Project Website: https://su2code.github.io
+-#
+-# The SU2 Project is maintained by the SU2 Foundation
++#
++# The SU2 Project is maintained by the SU2 Foundation
+ # (http://su2foundation.org)
+ #
+ # Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
+@@ -16,7 +16,7 @@
+ # modify it under the terms of the GNU Lesser General Public
+ # License as published by the Free Software Foundation; either
+ # version 2.1 of the License, or (at your option) any later version.
+-#
++#
+ # SU2 is distributed in the hope that it will be useful,
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+@@ -42,19 +42,19 @@
+ # ----------------------------------------------------------------------
+
+ class Interface:
+- """
++ """
+ FSI interface class that handles fluid/solid solvers synchronisation and communication
+ """
+-
++
+ def __init__(self, FSI_config, FluidSolver, SolidSolver, have_MPI):
+- """
+- Class constructor. Declare some variables and do some screen outputs.
+- """
+-
++ """
++ Class constructor. Declare some variables and do some screen outputs.
++ """
++
+ if have_MPI == True:
+ from mpi4py import MPI
+ self.MPI = MPI
+- self.comm = MPI.COMM_WORLD #MPI World communicator
++ self.comm = MPI.COMM_WORLD #MPI World communicator
+ self.have_MPI = True
+ myid = self.comm.Get_rank()
+ else:
+@@ -62,42 +62,42 @@
+ self.have_MPI = False
+ myid = 0
+
+- self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
++ self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
+
+- self.nDim = FSI_config['NDIM'] #problem dimension
++ self.nDim = FSI_config['NDIM'] #problem dimension
+
+- self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
+- self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
+- self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
+- self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
++ self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
++ self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
++ self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
++ self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
+
+- self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
+- self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
++ self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
++ self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
+ self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes
+- self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
++ self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
+
+- self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
+- self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
++ self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
++ self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
+
+- self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
+- self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
++ self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
++ self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
+
+- self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
+- self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
+- self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
+- self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
+-
+- self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
+- self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
+- self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
+- self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
+- self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
+-
+- self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
+- self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
+- self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
+- self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
+- self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
++ self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
++ self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
++ self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
++ self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
++
++ self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
++ self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
++ self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
++ self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
++ self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
++
++ self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
++ self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
++ self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
++ self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
++ self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
+
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixA = None
+@@ -106,83 +106,83 @@
+ self.MappingMatrixB_T = None
+ self.d_RBF = self.nDim+1
+ else:
+- self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
+- self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
++ self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
+ self.d_RBF = 0
+
+- self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
++ self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
+ self.localFluidInterface_array_Y_init = None
+ self.localFluidInterface_array_Z_init = None
+
+- self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
++ self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
+
+- self.solidInterface_array_DispX = None #solid interface displacement
++ self.solidInterface_array_DispX = None #solid interface displacement
+ self.solidInterface_array_DispY = None
+ self.solidInterface_array_DispZ = None
+
+- self.solidInterfaceResidual_array_X = None #solid interface position residual
++ self.solidInterfaceResidual_array_X = None #solid interface position residual
+ self.solidInterfaceResidual_array_Y = None
+ self.solidInterfaceResidual_array_Z = None
+
+- self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
++ self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
+ self.solidInterfaceResidualnM1_array_Y = None
+ self.solidInterfaceResidualnM1_array_Z = None
+-
+- self.fluidInterface_array_DispX = None #fluid interface displacement
++
++ self.fluidInterface_array_DispX = None #fluid interface displacement
+ self.fluidInterface_array_DispY = None
+ self.fluidInterface_array_DispZ = None
+
+- self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
++ self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
+ self.fluidLoads_array_Y = None
+ self.fluidLoads_array_Z = None
+
+- self.solidLoads_array_X = None #loads on the solid side of the f/s interface
++ self.solidLoads_array_X = None #loads on the solid side of the f/s interface
+ self.solidLoads_array_Y = None
+ self.solidLoads_array_Z = None
+
+- self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
+- self.FSIIter = 0 #current FSI iteration
+- self.unsteady = False #flag for steady or unsteady simulation (default is steady)
+-
+- # ---Some screen output ---
+- self.MPIPrint('Fluid solver : SU2_CFD')
+- self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
++ self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
++ self.FSIIter = 0 #current FSI iteration
++ self.unsteady = False #flag for steady or unsteady simulation (default is steady)
++
++ # ---Some screen output ---
++ self.MPIPrint('Fluid solver : SU2_CFD')
++ self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
+
+- if FSI_config['TIME_MARCHING'] == 'YES':
++ if FSI_config['TIME_MARCHING'] == 'YES':
+ self.MPIPrint('Unsteady coupled simulation with physical time step : {} s'.format(FSI_config['UNST_TIMESTEP']))
+ self.unsteady = True
+- else:
+- self.MPIPrint('Steady coupled simulation')
++ else:
++ self.MPIPrint('Steady coupled simulation')
+
+- if FSI_config['MATCHING_MESH'] == 'YES':
+- self.MPIPrint('Matching fluid-solid interface')
+- else:
++ if FSI_config['MATCHING_MESH'] == 'YES':
++ self.MPIPrint('Matching fluid-solid interface')
++ else:
+ if FSI_config['MESH_INTERP_METHOD'] == 'TPS':
+- self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
+ elif FSI_config['MESH_INTERP_METHOD'] == 'RBF':
+ self.MPIPrint('Non matching fluid-solid interface with Radial Basis Function interpolation')
+ self.RBF_rad = FSI_config['RBF_RADIUS']
+- self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
++ self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
+ else:
+- self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
++ self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
+
+- self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
++ self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
+
+- self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
++ self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
+
+- self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
++ self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
+
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
+- else:
+- self.MPIPrint('No Aitken under-relaxation')
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
++ else:
++ self.MPIPrint('No Aitken under-relaxation')
+
+ self.MPIPrint('FSI interface is set')
+
+ def MPIPrint(self, message):
+- """
++ """
+ Print a message on screen only from the master process.
+ """
+
+@@ -198,28 +198,28 @@
+ """
+ Perform a synchronization barrier in case of parallel run with MPI.
+ """
+-
++
+ if self.have_MPI == True:
+ self.comm.barrier()
+
+ def connect(self, FSI_config, FluidSolver, SolidSolver):
+- """
+- Connection between solvers.
+- Creates the communication support between the two solvers.
+- Gets information about f/s interfaces from the two solvers.
+- """
++ """
++ Connection between solvers.
++ Creates the communication support between the two solvers.
++ Gets information about f/s interfaces from the two solvers.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
+- # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
++
++ # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
+ self.fluidInterfaceIdentifier = None
+ self.nLocalFluidInterfaceNodes = 0
+ if FluidSolver != None:
+- print('Fluid solver is initialized on process {}'.format(myid))
++ print('Fluid solver is initialized on process {}'.format(myid))
+ self.haveFluidSolver = True
+ allMovingMarkersTags = FluidSolver.GetAllMovingMarkersTag()
+ allMarkersID = FluidSolver.GetAllBoundaryMarkers()
+@@ -229,23 +229,23 @@
+ if allMovingMarkersTags[0] in allMarkersID.keys():
+ self.fluidInterfaceIdentifier = allMarkersID[allMovingMarkersTags[0]]
+ if self.fluidInterfaceIdentifier != None:
+- self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
+- if self.nLocalFluidInterfaceNodes != 0:
++ self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
++ if self.nLocalFluidInterfaceNodes != 0:
+ self.haveFluidInterface = True
+- print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
+- else:
+- pass
++ print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
++ else:
++ pass
+
+- if SolidSolver != None:
+- print('Solid solver is initialized on process {}'.format(myid))
++ if SolidSolver != None:
++ print('Solid solver is initialized on process {}'.format(myid))
+ self.haveSolidSolver = True
+- self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
+- self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
+- if self.nLocalSolidInterfaceNodes != 0:
++ self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
++ self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
++ if self.nLocalSolidInterfaceNodes != 0:
+ self.haveSolidInterface = True
+ print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes))
+- else:
+- pass
++ else:
++ pass
+
+ # --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying ---
+ if self.have_MPI == True:
+@@ -266,18 +266,18 @@
+ else:
+ sendBufSolidInterface = np.array(int(0))
+ rcvBufFluid = np.zeros(MPIsize, dtype = int)
+- rcvBufSolid = np.zeros(MPIsize, dtype = int)
++ rcvBufSolid = np.zeros(MPIsize, dtype = int)
+ rcvBufFluidInterface = np.zeros(MPIsize, dtype = int)
+- rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
++ rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
+ self.comm.Allgather(sendBufFluid, rcvBufFluid)
+ self.comm.Allgather(sendBufSolid, rcvBufSolid)
+ self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface)
+ self.comm.Allgather(sendBufSolidInterface, rcvBufSolidInterface)
+ for iProc in range(MPIsize):
+- if rcvBufFluid[iProc] == 1:
++ if rcvBufFluid[iProc] == 1:
+ self.fluidSolverProcessors.append(iProc)
+ if rcvBufSolid[iProc] == 1:
+- self.solidSolverProcessors.append(iProc)
++ self.solidSolverProcessors.append(iProc)
+ if rcvBufFluidInterface[iProc] == 1:
+ self.fluidInterfaceProcessors.append(iProc)
+ if rcvBufSolidInterface[iProc] == 1:
+@@ -285,19 +285,19 @@
+ del sendBufFluid, sendBufSolid, rcvBufFluid, rcvBufSolid, sendBufFluidInterface, sendBufSolidInterface, rcvBufFluidInterface, rcvBufSolidInterface
+ else:
+ self.fluidSolverProcessors.append(0)
+- self.solidSolverProcessors.append(0)
++ self.solidSolverProcessors.append(0)
+ self.fluidInterfaceProcessors.append(0)
+ self.solidInterfaceProcessors.append(0)
+
+- self.MPIBarrier()
+-
+- # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
++ self.MPIBarrier()
++
++ # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
+ # Calculate the number of halo nodes on each partition
+ self.nLocalFluidInterfaceHaloNode = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
+ if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- self.FluidHaloNodeList[GlobalIndex] = iVertex
++ self.FluidHaloNodeList[GlobalIndex] = iVertex
+ self.nLocalFluidInterfaceHaloNode += 1
+ # Calculate the number of physical (= not halo) nodes on each partition
+ self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode
+@@ -308,10 +308,10 @@
+
+ # Same thing for the solid part
+ self.nLocalSolidInterfaceHaloNode = 0
+- #for iVertex in range(self.nLocalSolidInterfaceNodes):
++ #for iVertex in range(self.nLocalSolidInterfaceNodes):
+ #if SoliddSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
+ #GlobalIndex = SolidSolver.GetVertexGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- #self.SolidHaloNodeList[GlobalIndex] = iVertex
++ #self.SolidHaloNodeList[GlobalIndex] = iVertex
+ #self.nLocalSolidInterfaceHaloNode += 1
+ self.nLocalSolidInterfacePhysicalNodes = self.nLocalSolidInterfaceNodes - self.nLocalSolidInterfaceHaloNode
+ if self.have_MPI == True:
+@@ -323,11 +323,11 @@
+ # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors ---
+ sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+- if self.have_MPI == True:
++ if self.have_MPI == True:
+ self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nFluidInterfaceNodes = rcvBuffHalo[0]
+ self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -339,11 +339,11 @@
+ # Same thing for the solid part
+ sendBuffHalo = np.array(int(self.nLocalSolidInterfaceNodes))
+ sendBuffPhysical = np.array(int(self.nLocalSolidInterfacePhysicalNodes))
+- rcvBuffHalo = np.zeros(1, dtype=int)
++ rcvBuffHalo = np.zeros(1, dtype=int)
+ rcvBuffPhysical = np.zeros(1, dtype=int)
+ if self.have_MPI == True:
+- self.comm.barrier()
+- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
++ self.comm.barrier()
++ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
+ self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
+ self.nSolidInterfaceNodes = rcvBuffHalo[0]
+ self.nSolidInterfacePhysicalNodes = rcvBuffPhysical[0]
+@@ -375,7 +375,7 @@
+ if myid in self.fluidInterfaceProcessors:
+ globalIndexStart = 0
+ for iProc in range(myid):
+- globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
++ globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
+ globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1
+ else:
+ globalIndexStart = 0
+@@ -387,8 +387,8 @@
+ temp[0] = [0,self.nLocalFluidInterfacePhysicalNodes-1]
+ self.fluidGlobalIndexRange = list()
+ self.fluidGlobalIndexRange.append(temp)
+-
+- # Same thing for the solid part
++
++ # Same thing for the solid part
+ if self.have_MPI == True:
+ if myid in self.solidInterfaceProcessors:
+ globalIndexStart = 0
+@@ -404,14 +404,14 @@
+ temp = {}
+ temp[0] = [0,self.nSolidInterfacePhysicalNodes-1]
+ self.solidGlobalIndexRange = list()
+- self.solidGlobalIndexRange.append(temp)
++ self.solidGlobalIndexRange.append(temp)
+
+- self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
+- self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
++ self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
++ self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
+ self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes))
+ self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes))
+
+- self.MPIBarrier()
++ self.MPIBarrier()
+
+ # --- Create all the PETSc vectors required for parallel communication and parallel mesh mapping/interpolation (working for serial too) ---
+ if self.have_MPI == True:
+@@ -432,8 +432,8 @@
+ self.solidInterface_array_DispY.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispZ.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ self.solidInterface_array_DispX.set(0.0)
+- self.solidInterface_array_DispY.set(0.0)
+- self.solidInterface_array_DispZ.set(0.0)
++ self.solidInterface_array_DispY.set(0.0)
++ self.solidInterface_array_DispZ.set(0.0)
+
+ if self.have_MPI == True:
+ self.fluidInterface_array_DispX = PETSc.Vec().create(self.comm)
+@@ -536,30 +536,30 @@
+ self.solidInterfaceResidualnM1_array_Z.set(0.0)
+
+ def interfaceMapping(self,FluidSolver, SolidSolver, FSI_config):
+- """
+- Creates the one-to-one mapping between interfaces in case of matching meshes.
+- Creates the interpolation rules between interfaces in case of non-matching meshes.
+- """
+- if self.have_MPI == True:
++ """
++ Creates the one-to-one mapping between interfaces in case of matching meshes.
++ Creates the interpolation rules between interfaces in case of non-matching meshes.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+- MPIsize = self.comm.Get_size()
++ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+
+- # --- Get the fluid interface from fluid solver on each partition ---
+- GlobalIndex = int()
++ # --- Get the fluid interface from fluid solver on each partition ---
++ GlobalIndex = int()
+ localIndex = 0
+ fluidIndexing_temp = {}
+ self.localFluidInterface_array_X_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Y_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ self.localFluidInterface_array_Z_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
+ for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+- posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
+- posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
+- posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
+- if GlobalIndex in self.FluidHaloNodeList[myid].keys():
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
++ posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
++ posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
++ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz)
+ else:
+ fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex)
+@@ -576,17 +576,17 @@
+ self.fluidIndexing = fluidIndexing_temp.copy()
+ del fluidIndexing_temp
+
+- # --- Get the solid interface from solid solver on each partition ---
++ # --- Get the solid interface from solid solver on each partition ---
+ localIndex = 0
+ solidIndexing_temp = {}
+- self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
++ self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Y = np.zeros(self.nLocalSolidInterfaceNodes)
+ self.localSolidInterface_array_Z = np.zeros(self.nLocalSolidInterfaceNodes)
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+- posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+@@ -605,14 +605,14 @@
+ del solidIndexing_temp
+
+
+- # --- Create the PETSc parallel interpolation matrix ---
++ # --- Create the PETSc parallel interpolation matrix ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ self.MappingMatrixA = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB = PETSc.Mat().create(self.comm)
+ self.MappingMatrixA_T = PETSc.Mat().create(self.comm)
+ self.MappingMatrixB_T = PETSc.Mat().create(self.comm)
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('mpiaij')
+ self.MappingMatrixB.setType('mpiaij')
+ self.MappingMatrixA_T.setType('mpiaij')
+@@ -627,7 +627,7 @@
+ self.MappingMatrixB = PETSc.Mat().create()
+ self.MappingMatrixA_T = PETSc.Mat().create()
+ self.MappingMatrixB_T = PETSc.Mat().create()
+- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
++ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
+ self.MappingMatrixA.setType('aij')
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+@@ -637,16 +637,16 @@
+ self.MappingMatrixB.setType('aij')
+ self.MappingMatrixA_T.setType('aij')
+ self.MappingMatrixB_T.setType('aij')
+- self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA.setUp()
+ self.MappingMatrixA.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixB.setUp()
+ self.MappingMatrixB.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
++ self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
+ self.MappingMatrixA_T.setUp()
+ self.MappingMatrixA_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrixB_T.setUp()
+ self.MappingMatrixB_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+ else:
+@@ -660,21 +660,21 @@
+ self.MappingMatrix_T = PETSc.Mat().create()
+ self.MappingMatrix.setType('aij')
+ self.MappingMatrix_T.setType('aij')
+- self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
++ self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
+ self.MappingMatrix.setUp()
+ self.MappingMatrix.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+- self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
++ self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
+ self.MappingMatrix_T.setUp()
+ self.MappingMatrix_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
+-
+-
++
++
+ # --- Fill the interpolation matrix in parallel (working in serial too) ---
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MPIPrint('Building interpolation matrices...')
+ if self.have_MPI == True:
+ for iProc in self.solidInterfaceProcessors:
+ if myid == iProc:
+- for jProc in self.solidInterfaceProcessors:
++ for jProc in self.solidInterfaceProcessors:
+ self.comm.Send(self.localSolidInterface_array_X, dest=jProc, tag=1)
+ self.comm.Send(self.localSolidInterface_array_Y, dest=jProc, tag=2)
+ self.comm.Send(self.localSolidInterface_array_Z, dest=jProc, tag=3)
+@@ -726,7 +726,7 @@
+ self.TPSMeshMapping_B(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ self.NearestNeighboorMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+- else:
++ else:
+ self.matchingMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
+ else:
+ if FSI_config['MATCHING_MESH'] == 'NO':
+@@ -735,10 +735,10 @@
+ elif FSI_config['MESH_INTERP_METHOD'] == 'TPS' :
+ self.TPSMeshMapping_B(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+ else:
+- self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+- else:
++ self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
++ else:
+ self.matchingMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
+-
++
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ self.MappingMatrixB.assemblyBegin()
+ self.MappingMatrixB.assemblyEnd()
+@@ -751,9 +751,9 @@
+ self.MappingMatrix_T.assemblyBegin()
+ self.MappingMatrix_T.assemblyEnd()
+ self.MPIPrint("Interpolation matrix is built.")
+-
++
+ self.MPIBarrier()
+-
++
+ del self.localSolidInterface_array_X
+ del self.localSolidInterface_array_Y
+ del self.localSolidInterface_array_Z
+@@ -768,20 +768,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ if self.nFluidInterfacePhysicalNodes != self.nSolidInterfacePhysicalNodes:
+ raise Exception("Fluid and solid interface must have the same number of nodes for matching meshes ! ")
+@@ -822,20 +822,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ # --- For each fluid interface node, find the nearest solid interface node and fill the boolean mapping matrix ---
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -863,20 +863,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+ posX = self.localSolidInterface_array_X[iVertexSolid]
+@@ -915,20 +915,20 @@
+ myid = 0
+
+ # --- Instantiate the spatial indexing ---
+- prop_index = index.Property()
+- prop_index.dimension = self.nDim
+- SolidSpatialTree = index.Index(properties=prop_index)
+-
++ prop_index = index.Property()
++ prop_index.dimension = self.nDim
++ SolidSpatialTree = index.Index(properties=prop_index)
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for jVertex in range(nSolidNodes):
+ posX = solidInterfaceBuffRcv_X[jVertex]
+ posY = solidInterfaceBuffRcv_Y[jVertex]
+ posZ = solidInterfaceBuffRcv_Z[jVertex]
+- if self.nDim == 2 :
+- SolidSpatialTree.add(jVertex, (posX, posY))
+- else :
+- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
++ if self.nDim == 2 :
++ SolidSpatialTree.add(jVertex, (posX, posY))
++ else :
++ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+ posX = self.localFluidInterface_array_X_init[iVertexFluid]
+@@ -965,7 +965,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
+@@ -999,7 +999,7 @@
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
+
+ for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
+@@ -1031,7 +1031,7 @@
+ """
+ phi = 0.0
+ eps = distance/rad
+-
++
+ if eps < 1:
+ phi = ((1.0-eps)**4)*(4.0*eps+1.0)
+ else:
+@@ -1044,20 +1044,20 @@
+ Description
+ """
+ phi = 0.0
+-
++
+ if distance > 0.0:
+ phi = (distance**2)*np.log10(distance)
+ else:
+ phi = 0.0
+
+- return phi
++ return phi
+
+
+ def interpolateSolidPositionOnFluidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+@@ -1110,12 +1110,12 @@
+ del gamma_array_DispY
+ del gamma_array_DispZ
+ del KSP_solver
+- else:
++ else:
+ self.MappingMatrix.mult(self.solidInterface_array_DispX, self.fluidInterface_array_DispX)
+ self.MappingMatrix.mult(self.solidInterface_array_DispY, self.fluidInterface_array_DispY)
+ self.MappingMatrix.mult(self.solidInterface_array_DispZ, self.fluidInterface_array_DispZ)
+
+- # --- Checking conservation ---
++ # --- Checking conservation ---
+ WSX = self.solidLoads_array_X.dot(self.solidInterface_array_DispX)
+ WSY = self.solidLoads_array_Y.dot(self.solidInterface_array_DispY)
+ WSZ = self.solidLoads_array_Z.dot(self.solidInterface_array_DispZ)
+@@ -1124,11 +1124,11 @@
+ WFY = self.fluidLoads_array_Y.dot(self.fluidInterface_array_DispY)
+ WFZ = self.fluidLoads_array_Z.dot(self.fluidInterface_array_DispZ)
+
+- self.MPIPrint("Checking f/s interface conservation...")
+- self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
+- self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++ self.MPIPrint("Checking f/s interface conservation...")
++ self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
++ self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
++
+
+-
+ # --- Redistribute the interpolated fluid interface according to the partitions that own the fluid interface ---
+ # Gather the fluid interface on the master process
+ if self.have_MPI == True:
+@@ -1156,7 +1156,7 @@
+ displ = tuple(displ)
+
+ del sendBuffNumber, rcvBuffNumber
+-
++
+ #print("DEBUG MESSAGE From proc {}, counts = {}".format(myid, counts))
+ #print("DEBUG MESSAGE From proc {}, displ = {}".format(myid, displ))
+
+@@ -1213,18 +1213,18 @@
+ del sendBuff
+
+ def interpolateFluidLoadsOnSolidMesh(self, FSI_config):
+- """
+- Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
+- """
+- if self.have_MPI == True:
++ """
++ Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
++ """
++ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ MPIsize = self.comm.Get_size()
+ else:
+ myid = 0
+ MPIsize = 1
+-
++
+ # --- Interpolate (or map) in parallel the fluid interface loads on the solid interface ---
+- #self.MappingMatrix.transpose()
++ #self.MappingMatrix.transpose()
+ if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
+ if self.have_MPI == True:
+ gamma_array_LoadX = PETSc.Vec().create(self.comm)
+@@ -1280,10 +1280,10 @@
+ self.solidLoads_array_X_recon = None
+ self.solidLoads_array_Y_recon = None
+ self.solidLoads_array_Z_recon = None
+- if myid == self.rootProcess:
+- self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+- self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ if myid == self.rootProcess:
++ self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
++ self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ myNumberOfNodes = self.solidLoads_array_X.getArray().shape[0]
+ sendBuffNumber = np.array([myNumberOfNodes], dtype=int)
+ rcvBuffNumber = np.zeros(MPIsize, dtype=int)
+@@ -1293,9 +1293,9 @@
+ displ = np.zeros(MPIsize, dtype=int)
+ for ii in range(rcvBuffNumber.shape[0]):
+ displ[ii] = rcvBuffNumber[0:ii].sum()
+- displ = tuple(displ)
++ displ = tuple(displ)
+
+- del sendBuffNumber, rcvBuffNumber
++ del sendBuffNumber, rcvBuffNumber
+
+ self.comm.Gatherv(self.solidLoads_array_X.getArray(), [self.solidLoads_array_X_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+ self.comm.Gatherv(self.solidLoads_array_Y.getArray(), [self.solidLoads_array_Y_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
+@@ -1336,25 +1336,25 @@
+
+
+ '''def getSolidInterfacePosition(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
+- newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
+- newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
++ newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
++ newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
++ newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_X.setValues([iGlobalVertex],newPosx)
+ self.solidInterface_array_Y.setValues([iGlobalVertex],newPosy)
+@@ -1375,25 +1375,25 @@
+ #print("DEBUG MESSAGE From PROC {} : array_X = {}".format(myid, self.solidInterface_array_X.getArray()))'''
+
+ def getSolidInterfaceDisplacement(self, SolidSolver):
+- """
+- Gets the current solid interface position from the solid solver.
+- """
++ """
++ Gets the current solid interface position from the solid solver.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+- newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++ newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+ self.solidInterface_array_DispX.setValues([iGlobalVertex],newDispx)
+ self.solidInterface_array_DispY.setValues([iGlobalVertex],newDispy)
+@@ -1408,9 +1408,9 @@
+ self.solidInterface_array_DispZ.assemblyEnd()
+
+ def getFluidInterfaceNodalForce(self, FSI_config, FluidSolver):
+- """
+- Gets the fluid interface loads from the fluid solver.
+- """
++ """
++ Gets the fluid interface loads from the fluid solver.
++ """
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+@@ -1422,17 +1422,17 @@
+ FZ = 0.0
+
+ # --- Get the fluid interface loads from the fluid solver and directly fill the corresponding PETSc vector ---
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
+- if halo==False:
+- if FSI_config['CSD_SOLVER'] == 'GETDP':
+- newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
+- else:
+- newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
+- newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
+- newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
++ if halo==False:
++ if FSI_config['CSD_SOLVER'] == 'GETDP':
++ newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
++ else:
++ newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
++ newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
++ newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('fluid', myid, localIndex)
+ self.fluidLoads_array_X.setValues([iGlobalVertex], newFx)
+ self.fluidLoads_array_Y.setValues([iGlobalVertex], newFy)
+@@ -1457,22 +1457,22 @@
+ FX_b = self.fluidLoads_array_X.sum()
+ FY_b = self.fluidLoads_array_Y.sum()
+ FZ_b = self.fluidLoads_array_Z.sum()
+-
++
+
+ def setFluidInterfaceVarCoord(self, FluidSolver):
+- """
+- Communicate the change of coordinates of the fluid interface to the fluid solver.
+- Prepare the fluid solver for mesh deformation.
+- """
++ """
++ Communicate the change of coordinates of the fluid interface to the fluid solver.
++ Prepare the fluid solver for mesh deformation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ # --- Send the new fluid interface position to the fluid solver (on each partition, halo nodes included) ---
+ localIndex = 0
+- for iVertex in range(self.nLocalFluidInterfaceNodes):
+- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalFluidInterfaceNodes):
++ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
+ posX0, posY0, posZ0 = self.haloNodesPositionsInit[GlobalIndex]
+ DispX, DispY, DispZ = self.haloNodesDisplacements[GlobalIndex]
+@@ -1491,32 +1491,32 @@
+ FluidSolver.SetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex, posZ)
+ localIndex += 1
+ # Prepares the mesh deformation in the fluid solver
+- nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++ nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
++
+
+-
+ def setSolidInterfaceLoads(self, SolidSolver, FSI_config, time):
+- """
+- Communicates the new solid interface loads to the solid solver.
+- In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
+- """
++ """
++ Communicates the new solid interface loads to the solid solver.
++ In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- FY = 0.0 # solid-side resultant forces
++ FY = 0.0 # solid-side resultant forces
+ FX = 0.0
+ FZ = 0.0
+- FFX = 0.0 # fluid-side resultant forces
+- FFY = 0.0
+- FFZ = 0.0
++ FFX = 0.0 # fluid-side resultant forces
++ FFY = 0.0
++ FFZ = 0.0
+
+ # --- Check for total force conservation after interpolation
+ FFX = self.fluidLoads_array_X.sum()
+ FFY = self.fluidLoads_array_Y.sum()
+ FFZ = self.fluidLoads_array_Z.sum()
+
+-
++
+ for iVertex in range(self.nLocalSolidInterfaceNodes):
+ FX += self.localSolidLoads_array_X[iVertex]
+ FY += self.localSolidLoads_array_Y[iVertex]
+@@ -1527,9 +1527,9 @@
+ FY = self.comm.allreduce(FY)
+ FZ = self.comm.allreduce(FZ)
+
+- self.MPIPrint("Checking f/s interface total force...")
+- self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
+- self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
++ self.MPIPrint("Checking f/s interface total force...")
++ self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
++ self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
+
+ # --- Send the new solid interface loads to the solid solver (on each partition, halo nodes included) ---
+ GlobalIndex = int()
+@@ -1541,25 +1541,25 @@
+ pass
+ else:
+ Fx = self.localSolidLoads_array_X[localIndex]
+- Fy = self.localSolidLoads_array_Y[localIndex]
+- Fz = self.localSolidLoads_array_Z[localIndex]
++ Fy = self.localSolidLoads_array_Y[localIndex]
++ Fz = self.localSolidLoads_array_Z[localIndex]
+ SolidSolver.applyload(iVertex, Fx, Fy, Fz, time)
+ localIndex += 1
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
+ SolidSolver.setGeneralisedForce()
+- SolidSolver.setGeneralisedMoment()
++ SolidSolver.setGeneralisedMoment()
+
+ def computeSolidInterfaceResidual(self, SolidSolver):
+- """
+- Computes the solid interface FSI displacement residual.
+- """
++ """
++ Computes the solid interface FSI displacement residual.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- normInterfaceResidualSquare = 0.0
++ normInterfaceResidualSquare = 0.0
+
+ # --- Create and fill the PETSc vector for the predicted solid interface position (predicted by the solid computation) ---
+ if self.have_MPI == True:
+@@ -1575,27 +1575,27 @@
+ predDisp_array_Y = PETSc.Vec().create()
+ predDisp_array_Y.setType('seq')
+ predDisp_array_Z = PETSc.Vec().create()
+- predDisp_array_Z.setType('seq')
++ predDisp_array_Z.setType('seq')
+ predDisp_array_X.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Y.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+ predDisp_array_Z.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
+-
+- if myid in self.solidSolverProcessors:
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
+- predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
+- predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
++
++ if myid in self.solidSolverProcessors:
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
++ predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
++ predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, iVertex)
+ predDisp_array_X.setValues([iGlobalVertex], predDispx)
+ predDisp_array_Y.setValues([iGlobalVertex], predDispy)
+ predDisp_array_Z.setValues([iGlobalVertex], predDispz)
+-
+- predDisp_array_X.assemblyBegin()
+- predDisp_array_X.assemblyEnd()
+- predDisp_array_Y.assemblyBegin()
+- predDisp_array_Y.assemblyEnd()
+- predDisp_array_Z.assemblyBegin()
+- predDisp_array_Z.assemblyEnd()
++
++ predDisp_array_X.assemblyBegin()
++ predDisp_array_X.assemblyEnd()
++ predDisp_array_Y.assemblyBegin()
++ predDisp_array_Y.assemblyEnd()
++ predDisp_array_Z.assemblyBegin()
++ predDisp_array_Z.assemblyEnd()
+
+ # --- Calculate the residual (vector and norm) ---
+ self.solidInterfaceResidual_array_X = predDisp_array_X - self.solidInterface_array_DispX
+@@ -1615,45 +1615,45 @@
+ del predDisp_array_Y
+ del predDisp_array_Z
+
+- return sqrt(normInterfaceResidualSquare)
++ return sqrt(normInterfaceResidualSquare)
+
+ def relaxSolidPosition(self,FSI_config):
+- """
+- Apply solid displacement under-relaxation.
+- """
++ """
++ Apply solid displacement under-relaxation.
++ """
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+ # --- Set the Aitken coefficient for the relaxation ---
+- if FSI_config['AITKEN_RELAX'] == 'STATIC':
+- self.aitkenParam = FSI_config['AITKEN_PARAM']
+- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
+- self.setAitkenCoefficient(FSI_config)
+- else:
+- self.aitkenParam = 1.0
++ if FSI_config['AITKEN_RELAX'] == 'STATIC':
++ self.aitkenParam = FSI_config['AITKEN_PARAM']
++ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
++ self.setAitkenCoefficient(FSI_config)
++ else:
++ self.aitkenParam = 1.0
+
+- self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
++ self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
+
+ # --- Relax the solid interface position ---
+ self.solidInterface_array_DispX += self.aitkenParam*self.solidInterfaceResidual_array_X
+ self.solidInterface_array_DispY += self.aitkenParam*self.solidInterfaceResidual_array_Y
+ self.solidInterface_array_DispZ += self.aitkenParam*self.solidInterfaceResidual_array_Z
+-
++
+
+ def setAitkenCoefficient(self, FSI_config):
+- """
+- Computes the Aitken coefficients for solid displacement under-relaxation.
+- """
+-
+- deltaResNormSquare = 0.0
+- prodScalRes = 0.0
+-
++ """
++ Computes the Aitken coefficients for solid displacement under-relaxation.
++ """
++
++ deltaResNormSquare = 0.0
++ prodScalRes = 0.0
++
+ # --- Create the PETSc vector for the difference between the residuals (current and previous FSI iter) ---
+- if self.FSIIter == 0:
+- self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
+- else:
++ if self.FSIIter == 0:
++ self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
++ else:
+ if self.have_MPI:
+ deltaResx_array_X = PETSc.Vec().create(self.comm)
+ deltaResx_array_X.setType('mpi')
+@@ -1688,9 +1688,9 @@
+ deltaResNormSquare_X = (deltaResx_array_X.norm())**2
+ deltaResNormSquare_Y = (deltaResx_array_Y.norm())**2
+ deltaResNormSquare_Z = (deltaResx_array_Z.norm())**2
+- deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
++ deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
+
+- self.aitkenParam *= -prodScalRes/deltaResNormSquare
++ self.aitkenParam *= -prodScalRes/deltaResNormSquare
+
+ deltaResx_array_X.destroy()
+ deltaResx_array_Y.destroy()
+@@ -1708,27 +1708,27 @@
+ self.solidInterfaceResidual_array_Z.copy(self.solidInterfaceResidualnM1_array_Z)
+
+ def displacementPredictor(self, FSI_config , SolidSolver, deltaT):
+- """
+- Calculates a prediciton for the solid interface position for the next time step.
+- """
++ """
++ Calculates a prediciton for the solid interface position for the next time step.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
++ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+
+- if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
+- self.MPIPrint("First order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.0
+- elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
+- self.MPIPrint("Second order predictor")
+- alpha_0 = 1.0
+- alpha_1 = 0.5
+- else:
+- self.MPIPrint("No predictor")
+- alpha_0 = 0.0
+- alpha_1 = 0.0
++ if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
++ self.MPIPrint("First order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.0
++ elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
++ self.MPIPrint("Second order predictor")
++ alpha_0 = 1.0
++ alpha_1 = 0.5
++ else:
++ self.MPIPrint("No predictor")
++ alpha_0 = 0.0
++ alpha_1 = 0.0
+
+ # --- Create the PETSc vectors to store the solid interface velocity ---
+ if self.have_MPI == True:
+@@ -1774,18 +1774,18 @@
+ # --- Fill the PETSc vectors ---
+ GlobalIndex = int()
+ localIndex = 0
+- for iVertex in range(self.nLocalSolidInterfaceNodes):
+- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
++ for iVertex in range(self.nLocalSolidInterfaceNodes):
++ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
+ if GlobalIndex in self.SolidHaloNodeList[myid].keys():
+ pass
+ else:
+ iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
+- velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
+- vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
+- velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
+- velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
+- velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
+- velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
++ velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
++ vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
++ velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
++ velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
++ velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
++ velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
+ Vel_array_X.setValues([iGlobalVertex],velx)
+ Vel_array_Y.setValues([iGlobalVertex],vely)
+ Vel_array_Z.setValues([iGlobalVertex],velz)
+@@ -1822,27 +1822,27 @@
+ del VelnM1_array_X, VelnM1_array_Y, VelnM1_array_Z
+
+ def writeFSIHistory(self, TimeIter, time, varCoordNorm, FSIConv):
+- """
+- Write the FSI history file of the computaion.
+- """
++ """
++ Write the FSI history file of the computaion.
++ """
+
+ if self.have_MPI == True:
+ myid = self.comm.Get_rank()
+ else:
+ myid = 0
+-
++
+ if myid == self.rootProcess:
+- if self.unsteady:
+- if TimeIter == 0:
+- histFile = open('FSIhistory.dat', "w")
++ if self.unsteady:
++ if TimeIter == 0:
++ histFile = open('FSIhistory.dat', "w")
+ histFile.write("TimeIter\tTime\tFSIRes\tFSINbIter\n")
+- else:
+- histFile = open('FSIhistory.dat', "a")
+- if FSIConv:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
+- else:
+- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
+- histFile.close()
++ else:
++ histFile = open('FSIhistory.dat', "a")
++ if FSIConv:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
++ else:
++ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
++ histFile.close()
+ else:
+ if self.FSIIter == 0:
+ histFile = open('FSIhistory.dat', "w")
+@@ -1851,7 +1851,7 @@
+ histFile = open('FSIhistory.dat', "a")
+ histFile.write(str(self.FSIIter) + '\t' + str(varCoordNorm) + '\n')
+ histFile.close()
+-
++
+
+ self.MPIBarrier()
+
+@@ -1868,254 +1868,254 @@
+ globalIndex = globalStartIndex + iLocalVertex
+
+ return globalIndex
+-
++
+
+ def UnsteadyFSI(self,FSI_config, FluidSolver, SolidSolver):
+- """
+- Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
+- F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
+- """
++ """
++ Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
++ F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the unsteady computation --- #
+- deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
+- totTime = FSI_config['UNST_TIME'] # physical simulation time
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
+-
+- if FSI_config['RESTART_SOL'] == 'YES':
+- startTime = FSI_config['START_TIME']
+- NbTimeIter = ((totTime)/deltaT)-1
+- time = startTime
+- TimeIter = FSI_config['RESTART_ITER']
+- else:
+- NbTimeIter = (totTime/deltaT)-1 # number of time iterations
+- time = 0.0 # initial time
+- TimeIter = 0 # initial time iteration
+-
+- NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
+-
+- varCoordNorm = 0.0 # FSI residual
+- FSIConv = False # FSI convergence flag
+-
+- self.MPIPrint('\n**********************************')
+- self.MPIPrint('* Begin unsteady FSI computation *')
+- self.MPIPrint('**********************************\n')
+-
+- # --- Initialize the coupled solution --- #
+- #If restart (DOES NOT WORK YET)
+- if FSI_config['RESTART_SOL'] == 'YES':
+- TimeIterTreshold = -1
+- FluidSolver.setTemporalIteration(TimeIter)
+- if myid == self.rootProcess:
+- SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ # --- Set some general variables for the unsteady computation --- #
++ deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
++ totTime = FSI_config['UNST_TIME'] # physical simulation time
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
++
++ if FSI_config['RESTART_SOL'] == 'YES':
++ startTime = FSI_config['START_TIME']
++ NbTimeIter = ((totTime)/deltaT)-1
++ time = startTime
++ TimeIter = FSI_config['RESTART_ITER']
++ else:
++ NbTimeIter = (totTime/deltaT)-1 # number of time iterations
++ time = 0.0 # initial time
++ TimeIter = 0 # initial time iteration
++
++ NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
++
++ varCoordNorm = 0.0 # FSI residual
++ FSIConv = False # FSI convergence flag
++
++ self.MPIPrint('\n**********************************')
++ self.MPIPrint('* Begin unsteady FSI computation *')
++ self.MPIPrint('**********************************\n')
++
++ # --- Initialize the coupled solution --- #
++ #If restart (DOES NOT WORK YET)
++ if FSI_config['RESTART_SOL'] == 'YES':
++ TimeIterTreshold = -1
++ FluidSolver.setTemporalIteration(TimeIter)
++ if myid == self.rootProcess:
++ SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
++ if self.have_MPI == True:
++ self.comm.barrier()
++ FluidSolver.setInitialMesh(True)
++ if myid == self.rootProcess:
++ SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+ if self.have_MPI == True:
+- self.comm.barrier()
+- FluidSolver.setInitialMesh(True)
+- if myid == self.rootProcess:
+- SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
+- if self.have_MPI == True:
+- self.comm.barrier()
+- if myid == self.rootProcess:
+- SolidSolver.updateSolution()
+- #If no restart
+- else:
+- self.MPIPrint('Setting FSI initial conditions')
++ self.comm.barrier()
++ if myid == self.rootProcess:
++ SolidSolver.updateSolution()
++ #If no restart
++ else:
++ self.MPIPrint('Setting FSI initial conditions')
+ if myid in self.solidSolverProcessors:
+- SolidSolver.setInitialDisplacements()
++ SolidSolver.setInitialDisplacements()
+ self.getSolidInterfaceDisplacement(SolidSolver)
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
+- self.MPIPrint('\nFSI initial conditions are set')
+- self.MPIPrint('Beginning time integration\n')
+-
+- # --- External temporal loop --- #
+- while TimeIter <= NbTimeIter:
+-
+- if TimeIter > TimeIterTreshold:
+- NbFSIIter = NbFSIIterMax
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
+- else:
+- NbFSIIter = 1
+-
+- self.FSIIter = 0
+- FSIConv = False
+- FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+-
+- # --- Internal FSI loop --- #
+- while self.FSIIter <= (NbFSIIter-1):
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
++ self.MPIPrint('\nFSI initial conditions are set')
++ self.MPIPrint('Beginning time integration\n')
++
++ # --- External temporal loop --- #
++ while TimeIter <= NbTimeIter:
++
++ if TimeIter > TimeIterTreshold:
++ NbFSIIter = NbFSIIterMax
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
++ else:
++ NbFSIIter = 1
++
++ self.FSIIter = 0
++ FSIConv = False
++ FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
+
+- self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++ # --- Internal FSI loop --- #
++ while self.FSIIter <= (NbFSIIter-1):
+
+- # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
++
++ # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
+ self.MPIPrint('\nPerforming dynamic mesh deformation (ALE)...\n')
+ self.setFluidInterfaceVarCoord(FluidSolver)
+ FluidSolver.DynamicMeshUpdate(TimeIter)
+-
+- # --- Fluid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
++
++ # --- Fluid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
+ self.MPIBarrier()
+- FluidSolver.ResetConvergence()
+- FluidSolver.Run()
++ FluidSolver.ResetConvergence()
++ FluidSolver.Run()
+ self.MPIBarrier()
+
+- # --- Surface fluid loads interpolation and communication --- #
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- Surface fluid loads interpolation and communication --- #
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- if TimeIter > TimeIterTreshold:
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
++ if TimeIter > TimeIterTreshold:
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
+
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.timeIteration(time)
+- elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
+- SolidSolver.run(time-deltaT, time)
+-
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+- if varCoordNorm < FSITolerance:
+- FSIConv = True
+- break
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.timeIteration(time)
++ elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
++ SolidSolver.run(time-deltaT, time)
++
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ if varCoordNorm < FSITolerance:
++ FSIConv = True
++ break
+
+- # --- Relaxe the solid position --- #
++ # --- Relaxe the solid position --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
+-
+- self.FSIIter += 1
+- # --- End OF FSI loop --- #
++ self.relaxSolidPosition(FSI_config)
++
++ self.FSIIter += 1
++ # --- End OF FSI loop --- #
+
+ self.MPIBarrier()
+
+- # --- Update the FSI history file --- #
+- if TimeIter > TimeIterTreshold:
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
+-
+- # --- Update, monitor and output the fluid solution before the next time step ---#
+- FluidSolver.Update()
+- FluidSolver.Monitor(TimeIter)
+- FluidSolver.Output(TimeIter)
+-
+- if TimeIter >= TimeIterTreshold:
+- if myid in self.solidSolverProcessors:
+- # --- Output the solid solution before thr next time step --- #
+- SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
+-
+- # --- Displacement predictor for the next time step and update of the solid solution --- #
+- self.MPIPrint('\nSolid displacement prediction for next time step')
+- self.displacementPredictor(FSI_config, SolidSolver, deltaT)
++ # --- Update the FSI history file --- #
++ if TimeIter > TimeIterTreshold:
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
++
++ # --- Update, monitor and output the fluid solution before the next time step ---#
++ FluidSolver.Update()
++ FluidSolver.Monitor(TimeIter)
++ FluidSolver.Output(TimeIter)
++
++ if TimeIter >= TimeIterTreshold:
++ if myid in self.solidSolverProcessors:
++ # --- Output the solid solution before thr next time step --- #
++ SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
++
++ # --- Displacement predictor for the next time step and update of the solid solution --- #
++ self.MPIPrint('\nSolid displacement prediction for next time step')
++ self.displacementPredictor(FSI_config, SolidSolver, deltaT)
+ if myid in self.solidSolverProcessors:
+- SolidSolver.updateSolution()
+-
+- TimeIter += 1
+- time += deltaT
+- #--- End of the temporal loop --- #
++ SolidSolver.updateSolution()
++
++ TimeIter += 1
++ time += deltaT
++ #--- End of the temporal loop --- #
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\n*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************\n')
++ self.MPIPrint('\n*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************\n')
+
+ def SteadyFSI(self, FSI_config,FluidSolver, SolidSolver):
+- """
+- Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
+- """
++ """
++ Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
++ """
+
+ if self.have_MPI == True:
+- myid = self.comm.Get_rank()
+- numberPart = self.comm.Get_size()
++ myid = self.comm.Get_rank()
++ numberPart = self.comm.Get_size()
+ else:
+ myid = 0
+ numberPart = 1
+
+- # --- Set some general variables for the steady computation --- #
+- NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
+- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
+- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
+- varCoordNorm = 0.0
+-
+- self.MPIPrint('\n********************************')
+- self.MPIPrint('* Begin steady FSI computation *')
+- self.MPIPrint('********************************\n')
+- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
++ # --- Set some general variables for the steady computation --- #
++ NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
++ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
++ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
++ varCoordNorm = 0.0
++
++ self.MPIPrint('\n********************************')
++ self.MPIPrint('* Begin steady FSI computation *')
++ self.MPIPrint('********************************\n')
++ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
+
+ self.getSolidInterfaceDisplacement(SolidSolver)
+
+- # --- External FSI loop --- #
+- self.FSIIter = 0
+- while self.FSIIter < NbFSIIterMax:
+- self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
+- self.MPIPrint('\nLaunching fluid solver for a steady computation...')
+- # --- Fluid solver call for FSI subiteration ---#
+- Iter = 0
+- FluidSolver.ResetConvergence()
+- while Iter < NbIter:
+- FluidSolver.PreprocessExtIter(Iter)
+- FluidSolver.Run()
+- StopIntegration = FluidSolver.Monitor(Iter)
+- FluidSolver.Output(Iter)
+- if StopIntegration:
+- break;
+- Iter += 1
+-
+- # --- Surface fluid loads interpolation and communication ---#
+- self.MPIPrint('\nProcessing interface fluid loads...\n')
++ # --- External FSI loop --- #
++ self.FSIIter = 0
++ while self.FSIIter < NbFSIIterMax:
++ self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
++ self.MPIPrint('\nLaunching fluid solver for a steady computation...')
++ # --- Fluid solver call for FSI subiteration ---#
++ Iter = 0
++ FluidSolver.ResetConvergence()
++ while Iter < NbIter:
++ FluidSolver.PreprocessExtIter(Iter)
++ FluidSolver.Run()
++ StopIntegration = FluidSolver.Monitor(Iter)
++ FluidSolver.Output(Iter)
++ if StopIntegration:
++ break;
++ Iter += 1
++
++ # --- Surface fluid loads interpolation and communication ---#
++ self.MPIPrint('\nProcessing interface fluid loads...\n')
+ self.MPIBarrier()
+- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
++ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
+ self.MPIBarrier()
+- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
+- self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
+-
+- # --- Solid solver call for FSI subiteration --- #
+- self.MPIPrint('\nLaunching solid solver for a static computation...\n')
++ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
++ self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
++
++ # --- Solid solver call for FSI subiteration --- #
++ self.MPIPrint('\nLaunching solid solver for a static computation...\n')
+ if myid in self.solidSolverProcessors:
+- if FSI_config['CSD_SOLVER'] == 'NATIVE':
+- SolidSolver.staticComputation()
++ if FSI_config['CSD_SOLVER'] == 'NATIVE':
++ SolidSolver.staticComputation()
+ else:
+ SolidSolver.run(0.0, 0.05)
+- SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
++ SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
+
+- # --- Compute and monitor the FSI residual --- #
+- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
+- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
++ # --- Compute and monitor the FSI residual --- #
++ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
++ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
+ self.writeFSIHistory(0, 0.0, varCoordNorm, False)
+- if varCoordNorm < FSITolerance:
+- break
++ if varCoordNorm < FSITolerance:
++ break
+
+ # --- Relaxe the solid displacement and update the solid solution --- #
+ self.MPIPrint('\nProcessing interface displacements...\n')
+- self.relaxSolidPosition(FSI_config)
++ self.relaxSolidPosition(FSI_config)
+ if myid in self.solidSolverProcessors:
+ SolidSolver.updateSolution()
+-
+- # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
+- self.interpolateSolidPositionOnFluidMesh(FSI_config)
+- self.MPIPrint('\nPerforming static mesh deformation...\n')
+- self.setFluidInterfaceVarCoord(FluidSolver)
+- FluidSolver.StaticMeshUpdate()
+- self.FSIIter += 1
++
++ # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
++ self.interpolateSolidPositionOnFluidMesh(FSI_config)
++ self.MPIPrint('\nPerforming static mesh deformation...\n')
++ self.setFluidInterfaceVarCoord(FluidSolver)
++ FluidSolver.StaticMeshUpdate()
++ self.FSIIter += 1
+
+ self.MPIBarrier()
+
+- self.MPIPrint('\nBGS is converged (strong coupling)')
+- self.MPIPrint(' ')
+- self.MPIPrint('*************************')
+- self.MPIPrint('* End FSI computation *')
+- self.MPIPrint('*************************')
+- self.MPIPrint(' ')
++ self.MPIPrint('\nBGS is converged (strong coupling)')
++ self.MPIPrint(' ')
++ self.MPIPrint('*************************')
++ self.MPIPrint('* End FSI computation *')
++ self.MPIPrint('*************************')
++ self.MPIPrint(' ')
+diff -Naur old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py
+--- old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-10 16:17:07.000000000 +0300
+@@ -174,9 +174,9 @@
+
+ with open(self.Config_file) as configfile:
+ while 1:
+- line = configfile.readline()
+- if not line:
+- break
++ line = configfile.readline()
++ if not line:
++ break
+
+ # remove line returns
+ line = line.strip('\r\n')
+@@ -189,41 +189,41 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- #if case("NB_FSI_ITER") :
+- #self.Config[this_param] = int(this_value)
+- #break
+-
+- #float values
+- if case("DELTA_T") : pass
+- if case("START_TIME") : pass
+- if case("STOP_TIME") : pass
+- if case("SPRING_MASS") : pass
+- if case("INERTIA_FLEXURAL") : pass
+- if case("SPRING_STIFFNESS") : pass
+- if case("SPRING_DAMPING") : pass
+- if case("TORSIONAL_STIFFNESS") : pass
+- if case("TORSIONAL_DAMPING") : pass
+- if case("CORD") : pass
+- if case("FLEXURAL_AXIS") : pass
+- if case("GRAVITY_CENTER") : pass
+- if case("INITIAL_DISP") : pass
+- if case("INITIAL_ANGLE") : pass
+- if case("RHO") :
+- self.Config[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("TIME_MARCHING") : pass
+- if case("MESH_FILE") : pass
+- if case("CSD_SOLVER") : pass
+- if case("MOVING_MARKER") : pass
+- if case("STRUCT_TYPE") :
+- self.Config[this_param] = this_value
+- break
++ #integer values
++ #if case("NB_FSI_ITER") :
++ #self.Config[this_param] = int(this_value)
++ #break
++
++ #float values
++ if case("DELTA_T") : pass
++ if case("START_TIME") : pass
++ if case("STOP_TIME") : pass
++ if case("SPRING_MASS") : pass
++ if case("INERTIA_FLEXURAL") : pass
++ if case("SPRING_STIFFNESS") : pass
++ if case("SPRING_DAMPING") : pass
++ if case("TORSIONAL_STIFFNESS") : pass
++ if case("TORSIONAL_DAMPING") : pass
++ if case("CORD") : pass
++ if case("FLEXURAL_AXIS") : pass
++ if case("GRAVITY_CENTER") : pass
++ if case("INITIAL_DISP") : pass
++ if case("INITIAL_ANGLE") : pass
++ if case("RHO") :
++ self.Config[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("TIME_MARCHING") : pass
++ if case("MESH_FILE") : pass
++ if case("CSD_SOLVER") : pass
++ if case("MOVING_MARKER") : pass
++ if case("STRUCT_TYPE") :
++ self.Config[this_param] = this_value
++ break
+
+- if case():
+- print(this_param + " is an invalid option !")
++ if case():
++ print(this_param + " is an invalid option !")
+ break
+
+ def __readSU2Mesh(self):
+@@ -233,78 +233,78 @@
+ print('Opened mesh file ' + self.Mesh_file + '.')
+ while 1:
+ line = meshfile.readline()
+- if not line:
+- break
++ if not line:
++ break
+
+- pos = line.find('NDIM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NDIM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nDim = int(line[1])
+- continue
+-
+- pos = line.find('NELEM')
+- if pos != -1:
+- line = line.strip('\r\n')
++ self.nDim = int(line[1])
++ continue
++
++ pos = line.find('NELEM')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nElem = int(line[1])
+- continue
++ self.nElem = int(line[1])
++ continue
+
+- pos = line.find('NPOIN')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NPOIN')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nPoint = int(line[1])
++ self.nPoint = int(line[1])
+ for iPoint in range(self.nPoint):
+- self.node.append(Point())
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',self.nDim)
+- x = float(line[0])
+- y = float(line[1])
++ self.node.append(Point())
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',self.nDim)
++ x = float(line[0])
++ y = float(line[1])
+ z = 0.0
+- if self.nDim == 3:
+- z = float(line[2])
+- self.node[iPoint].SetCoord((x,y,z))
++ if self.nDim == 3:
++ z = float(line[2])
++ self.node[iPoint].SetCoord((x,y,z))
+ self.node[iPoint].SetCoord0((x,y,z))
+- self.node[iPoint].SetCoord_n((x,y,z))
+- continue
++ self.node[iPoint].SetCoord_n((x,y,z))
++ continue
+
+- pos = line.find('NMARK')
+- if pos != -1:
+- line = line.strip('\r\n')
++ pos = line.find('NMARK')
++ if pos != -1:
++ line = line.strip('\r\n')
+ line = line.split("=",1)
+- self.nMarker = int(line[1])
+- continue
++ self.nMarker = int(line[1])
++ continue
+
+- pos = line.find('MARKER_TAG')
+- if pos != -1:
+- line = line.strip('\r\n')
+- line = line.replace(" ", "")
++ pos = line.find('MARKER_TAG')
++ if pos != -1:
++ line = line.strip('\r\n')
++ line = line.replace(" ", "")
+ line = line.split("=",1)
+- markerTag = line[1]
+- if markerTag == self.FSI_marker:
+- self.markers[markerTag] = []
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split("=",1)
+- nElem = int(line[1])
+- for iElem in range(nElem):
+- line = meshfile.readline()
+- line = line.strip('\r\n')
+- line = line.split(' ',1)
+- elemType = int(line[0])
+- if elemType == 3:
+- nodes = line[1].split(' ', 1)
+- if not int(nodes[0]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[0]))
+- if not int(nodes[1]) in self.markers[markerTag]:
+- self.markers[markerTag].append(int(nodes[1]))
+- else:
+- print("Element type {} is not recognized !!".format(elemType))
+- continue
+- else:
+- continue
++ markerTag = line[1]
++ if markerTag == self.FSI_marker:
++ self.markers[markerTag] = []
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split("=",1)
++ nElem = int(line[1])
++ for iElem in range(nElem):
++ line = meshfile.readline()
++ line = line.strip('\r\n')
++ line = line.split(' ',1)
++ elemType = int(line[0])
++ if elemType == 3:
++ nodes = line[1].split(' ', 1)
++ if not int(nodes[0]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[0]))
++ if not int(nodes[1]) in self.markers[markerTag]:
++ self.markers[markerTag].append(int(nodes[1]))
++ else:
++ print("Element type {} is not recognized !!".format(elemType))
++ continue
++ else:
++ continue
+
+ print("Number of dimensions: {}".format(self.nDim))
+ print("Number of elements: {}".format(self.nElem))
+@@ -441,23 +441,23 @@
+ Coord_n = self.node[iPoint].GetCoord_n()
+
+ if self.Unsteady:
+- r = Coord_n - self.centerOfRotation_n
+- else:
+- r = Coord - self.centerOfRotation
++ r = Coord_n - self.centerOfRotation_n
++ else:
++ r = Coord - self.centerOfRotation
+
+- rotCoord = rotMatrix.dot(r)
++ rotCoord = rotMatrix.dot(r)
+
+ newCoord = newCenter + rotCoord
+ newVel[0] = Centerdot[0]+psidot*(newCoord[1]-newCenter[1])
+- newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
+- newVel[2] = Centerdot[2]+0.0
++ newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
++ newVel[2] = Centerdot[2]+0.0
+
+ self.node[iPoint].SetCoord((newCoord[0], newCoord[1], newCoord[2]))
+ self.node[iPoint].SetVel((newVel[0], newVel[1], newVel[2]))
+
+- if initialize:
+- self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
+- self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
++ if initialize:
++ self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
++ self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
+
+ self.centerOfRotation = np.copy(newCenter)
+
+diff -Naur old/SU2_PY/FSI/io/FSI_config.py new/SU2_PY/FSI/io/FSI_config.py
+--- old/SU2_PY/FSI/io/FSI_config.py 2020-05-01 19:09:18.000000000 +0300
++++ new/SU2_PY/FSI/io/FSI_config.py 2020-05-10 16:17:07.000000000 +0300
+@@ -58,23 +58,23 @@
+ self.readConfig()
+
+ def __str__(self):
+- tempString = str()
+- for key, value in self._ConfigContent.items():
+- tempString += "{} = {}\n".format(key,value)
+- return tempString
++ tempString = str()
++ for key, value in self._ConfigContent.items():
++ tempString += "{} = {}\n".format(key,value)
++ return tempString
+
+ def __getitem__(self,key):
+- return self._ConfigContent[key]
++ return self._ConfigContent[key]
+
+ def __setitem__(self, key, value):
+- self._ConfigContent[key] = value
++ self._ConfigContent[key] = value
+
+ def readConfig(self):
+ input_file = open(self.ConfigFileName)
+ while 1:
+- line = input_file.readline()
+- if not line:
+- break
++ line = input_file.readline()
++ if not line:
++ break
+ # remove line returns
+ line = line.strip('\r\n')
+ # make sure it has useful data
+@@ -86,46 +86,46 @@
+ this_value = line[1].strip()
+
+ for case in switch(this_param):
+- #integer values
+- if case("NDIM") : pass
+- #if case("MESH_DEF_LIN_ITER") : pass
+- #if case("MESH_DEF_NONLIN_ITER") : pass
+- if case("RESTART_ITER") : pass
+- if case("NB_EXT_ITER") : pass
+- if case("NB_FSI_ITER") :
+- self._ConfigContent[this_param] = int(this_value)
+- break
++ #integer values
++ if case("NDIM") : pass
++ #if case("MESH_DEF_LIN_ITER") : pass
++ #if case("MESH_DEF_NONLIN_ITER") : pass
++ if case("RESTART_ITER") : pass
++ if case("NB_EXT_ITER") : pass
++ if case("NB_FSI_ITER") :
++ self._ConfigContent[this_param] = int(this_value)
++ break
+
+- #float values
++ #float values
+ if case("RBF_RADIUS") : pass
+- if case("AITKEN_PARAM") : pass
+- if case("START_TIME") : pass
+- if case("UNST_TIMESTEP") : pass
+- if case("UNST_TIME") : pass
+- if case("FSI_TOLERANCE") :
+- self._ConfigContent[this_param] = float(this_value)
+- break
+-
+- #string values
+- if case("CFD_CONFIG_FILE_NAME") : pass
+- if case("CSD_SOLVER") : pass
+- if case("CSD_CONFIG_FILE_NAME") : pass
+- if case("RESTART_SOL") : pass
+- if case("MATCHING_MESH") : pass
++ if case("AITKEN_PARAM") : pass
++ if case("START_TIME") : pass
++ if case("UNST_TIMESTEP") : pass
++ if case("UNST_TIME") : pass
++ if case("FSI_TOLERANCE") :
++ self._ConfigContent[this_param] = float(this_value)
++ break
++
++ #string values
++ if case("CFD_CONFIG_FILE_NAME") : pass
++ if case("CSD_SOLVER") : pass
++ if case("CSD_CONFIG_FILE_NAME") : pass
++ if case("RESTART_SOL") : pass
++ if case("MATCHING_MESH") : pass
+ if case("MESH_INTERP_METHOD") : pass
+- if case("DISP_PRED") : pass
+- if case("AITKEN_RELAX") : pass
+- if case("TIME_MARCHING") : pass
+- if case("INTERNAL_FLOW") :
+- #if case("MESH_DEF_METHOD") : pass
+- self._ConfigContent[this_param] = this_value
+- break
+-
+- if case():
+- print(this_param + " is an invalid option !")
+- break
+- #end for
+-
++ if case("DISP_PRED") : pass
++ if case("AITKEN_RELAX") : pass
++ if case("TIME_MARCHING") : pass
++ if case("INTERNAL_FLOW") :
++ #if case("MESH_DEF_METHOD") : pass
++ self._ConfigContent[this_param] = this_value
++ break
++
++ if case():
++ print(this_param + " is an invalid option !")
++ break
++ #end for
++
+
+
+ #def dump()
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2021-01-05 15:21 Andrew Ammerlaan
0 siblings, 0 replies; 6+ messages in thread
From: Andrew Ammerlaan @ 2021-01-05 15:21 UTC (permalink / raw
To: gentoo-commits
commit: 9b382fc34a3d2eae3c7cea3b3834c18c457bf81d
Author: Andrew Ammerlaan <andrewammerlaan <AT> riseup <DOT> net>
AuthorDate: Tue Jan 5 15:21:37 2021 +0000
Commit: Andrew Ammerlaan <andrewammerlaan <AT> riseup <DOT> net>
CommitDate: Tue Jan 5 15:21:37 2021 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=9b382fc3
sci-physics/SU2: drop unreferenced patch
it is now fetched, so no need to have it here anymore
Package-Manager: Portage-3.0.12, Repoman-3.0.2
Signed-off-by: Andrew Ammerlaan <andrewammerlaan <AT> riseup.net>
sci-physics/SU2/Manifest | 2 +-
.../SU2/files/SU2-7.0.8-fix-python-optimize.patch | 2212 --------------------
2 files changed, 1 insertion(+), 2213 deletions(-)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
index 10d45c06..7a4bd55d 100644
--- a/sci-physics/SU2/Manifest
+++ b/sci-physics/SU2/Manifest
@@ -1,4 +1,4 @@
DIST SU2-7.0.7-TestCases.tar.gz 448969006 BLAKE2B 6c886824b55d7f8516d2ea69e2f7bef36a40986f4f715da46d91f851eb59390329433c6941a280eca34ad675633b2f1b01a897d1db8d177a5c4f770b286d0625 SHA512 0884b4f750dbcfd3f2cb0e71d6005932e4edd90a50fa84eb484f6c0c523930ddebfb3ed4315161b8fdeff911a52fa72b6d79739c8e19cd634b9823e007520213
DIST SU2-7.0.7-Tutorials.tar.gz 64282235 BLAKE2B 7a6b780ee6f01d26a7a7d4751ca39798af56cfd7b99ca3e13fdff61aecd631a3aa4c98a487f48a8b2593c711ee25bd1ddc90a316bde2c287e95a383321f1d5e9 SHA512 189b5da96f08689b62ba3c42ee349edd2e145f371112895587e53497f16de3d6fdbf17308af39961775d76e3169c40872ced8e267146b6da5ae12d31a4c70fa9
-DIST SU2-7.0.8.tar.gz 20554206 BLAKE2B ac291eeae696197f5875839defa98bde550c5250b5a5cff8049e0d1c8184c5ec6d225b77618d079a8fb3d5a7f4d0531a0b2931fd15e045299fa55c7dd03d9fd1 SHA512 26ed8c39d2dfcdb3fc4e1c986c64f683f44cd4123ce5f7a482c4557a82b04b4bd8ef1f04332ab36ad0f987cfb98053fb8b30ef81faf51c7c56aebc1a30467df9
DIST SU2-7.0.8-fix-python-optimize.patch 104379 BLAKE2B e2cb9f58af5f600f25d01a082b55a344d3b05c455d44cfcb3370fc77546b6821d616e80f577b7af4b19ad519f086495eb6f6bde1f1712451ac477d6dd538fa0e SHA512 f21ed5fc1e61b78f3858a162a899c552777143118f05db009247c6224b06b922b6280de5725726afe21a3283a18ad9c38fc1c65dadb709d440e10cf3fe754d8c
+DIST SU2-7.0.8.tar.gz 20554206 BLAKE2B ac291eeae696197f5875839defa98bde550c5250b5a5cff8049e0d1c8184c5ec6d225b77618d079a8fb3d5a7f4d0531a0b2931fd15e045299fa55c7dd03d9fd1 SHA512 26ed8c39d2dfcdb3fc4e1c986c64f683f44cd4123ce5f7a482c4557a82b04b4bd8ef1f04332ab36ad0f987cfb98053fb8b30ef81faf51c7c56aebc1a30467df9
diff --git a/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch b/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch
deleted file mode 100644
index 64bff0a3..00000000
--- a/sci-physics/SU2/files/SU2-7.0.8-fix-python-optimize.patch
+++ /dev/null
@@ -1,2212 +0,0 @@
-diff -Naur old/SU2_PY/FSI/FSIInterface.py new/SU2_PY/FSI/FSIInterface.py
---- old/SU2_PY/FSI/FSIInterface.py 2020-05-01 19:09:18.000000000 +0300
-+++ new/SU2_PY/FSI/FSIInterface.py 2020-05-10 16:17:07.000000000 +0300
-@@ -6,8 +6,8 @@
- # \version 7.0.8 "Blackbird"
- #
- # SU2 Project Website: https://su2code.github.io
--#
--# The SU2 Project is maintained by the SU2 Foundation
-+#
-+# The SU2 Project is maintained by the SU2 Foundation
- # (http://su2foundation.org)
- #
- # Copyright 2012-2020, SU2 Contributors (cf. AUTHORS.md)
-@@ -16,7 +16,7 @@
- # modify it under the terms of the GNU Lesser General Public
- # License as published by the Free Software Foundation; either
- # version 2.1 of the License, or (at your option) any later version.
--#
-+#
- # SU2 is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-@@ -42,19 +42,19 @@
- # ----------------------------------------------------------------------
-
- class Interface:
-- """
-+ """
- FSI interface class that handles fluid/solid solvers synchronisation and communication
- """
--
-+
- def __init__(self, FSI_config, FluidSolver, SolidSolver, have_MPI):
-- """
-- Class constructor. Declare some variables and do some screen outputs.
-- """
--
-+ """
-+ Class constructor. Declare some variables and do some screen outputs.
-+ """
-+
- if have_MPI == True:
- from mpi4py import MPI
- self.MPI = MPI
-- self.comm = MPI.COMM_WORLD #MPI World communicator
-+ self.comm = MPI.COMM_WORLD #MPI World communicator
- self.have_MPI = True
- myid = self.comm.Get_rank()
- else:
-@@ -62,42 +62,42 @@
- self.have_MPI = False
- myid = 0
-
-- self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
-+ self.rootProcess = 0 #the root process is chosen to be MPI rank = 0
-
-- self.nDim = FSI_config['NDIM'] #problem dimension
-+ self.nDim = FSI_config['NDIM'] #problem dimension
-
-- self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
-- self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
-- self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
-- self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
-+ self.haveFluidSolver = False #True if the fluid solver is initialized on the current rank
-+ self.haveSolidSolver = False #True if the solid solver is initialized on the current rank
-+ self.haveFluidInterface = False #True if the current rank owns at least one fluid interface node
-+ self.haveSolidInterface = False #True if the current rank owns at least one solid interface node
-
-- self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
-- self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
-+ self.fluidSolverProcessors = list() #list of partitions where the fluid solver is initialized
-+ self.solidSolverProcessors = list() #list of partitions where the solid solver is initialized
- self.fluidInterfaceProcessors = list() #list of partitions where there are fluid interface nodes
-- self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
-+ self.solidInterfaceProcessors = list() #list of partitions where there are solid interface nodes
-
-- self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
-- self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
-+ self.fluidInterfaceIdentifier = None #object that can identify the f/s interface within the fluid solver
-+ self.solidInterfaceIdentifier = None #object that can identify the f/s interface within the solid solver
-
-- self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
-- self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
-+ self.fluidGlobalIndexRange = {} #contains the global FSI indexing of each fluid interface node for all partitions
-+ self.solidGlobalIndexRange = {} #contains the global FSI indexing of each solid interface node for all partitions
-
-- self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
-- self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
-- self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
-- self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
--
-- self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
-- self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
-- self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
-- self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
-- self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
--
-- self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
-- self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
-- self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
-- self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
-- self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
-+ self.FluidHaloNodeList = {} #contains the the indices (fluid solver indexing) of the halo nodes for each partition
-+ self.fluidIndexing = {} #links between the fluid solver indexing and the FSI indexing for the interface nodes
-+ self.SolidHaloNodeList = {} #contains the the indices (solid solver indexing) of the halo nodes for each partition
-+ self.solidIndexing = {} #links between the solid solver indexing and the FSI indexing for the interface nodes
-+
-+ self.nLocalFluidInterfaceNodes = 0 #number of nodes (halo nodes included) on the fluid interface, on each partition
-+ self.nLocalFluidInterfaceHaloNode = 0 #number of halo nodes on the fluid intrface, on each partition
-+ self.nLocalFluidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the fluid interface, on each partition
-+ self.nFluidInterfaceNodes = 0 #number of nodes on the fluid interface, sum over all the partitions
-+ self.nFluidInterfacePhysicalNodes = 0 #number of physical nodes on the fluid interface, sum over all partitions
-+
-+ self.nLocalSolidInterfaceNodes = 0 #number of physical nodes on the solid interface, on each partition
-+ self.nLocalSolidInterfaceHaloNode = 0 #number of halo nodes on the solid intrface, on each partition
-+ self.nLocalSolidInterfacePhysicalNodes = 0 #number of physical (= non halo) nodes on the solid interface, on each partition
-+ self.nSolidInterfaceNodes = 0 #number of nodes on the solid interface, sum over all partitions
-+ self.nSolidInterfacePhysicalNodes = 0 #number of physical nodes on the solid interface, sum over all partitions
-
- if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
- self.MappingMatrixA = None
-@@ -106,83 +106,83 @@
- self.MappingMatrixB_T = None
- self.d_RBF = self.nDim+1
- else:
-- self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
-- self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
-+ self.MappingMatrix = None #interpolation/mapping matrix for meshes interpolation/mapping
-+ self.MappingMatrix_T = None #transposed interpolation/mapping matrix for meshes interpolation/mapping
- self.d_RBF = 0
-
-- self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
-+ self.localFluidInterface_array_X_init = None #initial fluid interface position on each partition (used for the meshes mapping)
- self.localFluidInterface_array_Y_init = None
- self.localFluidInterface_array_Z_init = None
-
-- self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
-+ self.haloNodesPositionsInit = {} #initial position of the halo nodes (fluid side only)
-
-- self.solidInterface_array_DispX = None #solid interface displacement
-+ self.solidInterface_array_DispX = None #solid interface displacement
- self.solidInterface_array_DispY = None
- self.solidInterface_array_DispZ = None
-
-- self.solidInterfaceResidual_array_X = None #solid interface position residual
-+ self.solidInterfaceResidual_array_X = None #solid interface position residual
- self.solidInterfaceResidual_array_Y = None
- self.solidInterfaceResidual_array_Z = None
-
-- self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
-+ self.solidInterfaceResidualnM1_array_X = None #solid interface position residual at the previous BGS iteration
- self.solidInterfaceResidualnM1_array_Y = None
- self.solidInterfaceResidualnM1_array_Z = None
--
-- self.fluidInterface_array_DispX = None #fluid interface displacement
-+
-+ self.fluidInterface_array_DispX = None #fluid interface displacement
- self.fluidInterface_array_DispY = None
- self.fluidInterface_array_DispZ = None
-
-- self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
-+ self.fluidLoads_array_X = None #loads on the fluid side of the f/s interface
- self.fluidLoads_array_Y = None
- self.fluidLoads_array_Z = None
-
-- self.solidLoads_array_X = None #loads on the solid side of the f/s interface
-+ self.solidLoads_array_X = None #loads on the solid side of the f/s interface
- self.solidLoads_array_Y = None
- self.solidLoads_array_Z = None
-
-- self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
-- self.FSIIter = 0 #current FSI iteration
-- self.unsteady = False #flag for steady or unsteady simulation (default is steady)
--
-- # ---Some screen output ---
-- self.MPIPrint('Fluid solver : SU2_CFD')
-- self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
-+ self.aitkenParam = FSI_config['AITKEN_PARAM'] #relaxation parameter for the BGS method
-+ self.FSIIter = 0 #current FSI iteration
-+ self.unsteady = False #flag for steady or unsteady simulation (default is steady)
-+
-+ # ---Some screen output ---
-+ self.MPIPrint('Fluid solver : SU2_CFD')
-+ self.MPIPrint('Solid solver : {}'.format(FSI_config['CSD_SOLVER']))
-
-- if FSI_config['TIME_MARCHING'] == 'YES':
-+ if FSI_config['TIME_MARCHING'] == 'YES':
- self.MPIPrint('Unsteady coupled simulation with physical time step : {} s'.format(FSI_config['UNST_TIMESTEP']))
- self.unsteady = True
-- else:
-- self.MPIPrint('Steady coupled simulation')
-+ else:
-+ self.MPIPrint('Steady coupled simulation')
-
-- if FSI_config['MATCHING_MESH'] == 'YES':
-- self.MPIPrint('Matching fluid-solid interface')
-- else:
-+ if FSI_config['MATCHING_MESH'] == 'YES':
-+ self.MPIPrint('Matching fluid-solid interface')
-+ else:
- if FSI_config['MESH_INTERP_METHOD'] == 'TPS':
-- self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
-+ self.MPIPrint('Non matching fluid-solid interface with Thin Plate Spline interpolation')
- elif FSI_config['MESH_INTERP_METHOD'] == 'RBF':
- self.MPIPrint('Non matching fluid-solid interface with Radial Basis Function interpolation')
- self.RBF_rad = FSI_config['RBF_RADIUS']
-- self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
-+ self.MPIPrint('Radius value : {}'.format(self.RBF_rad))
- else:
-- self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
-+ self.MPIPrint('Non matching fluid-solid interface with Nearest Neighboor interpolation')
-
-- self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
-+ self.MPIPrint('Solid predictor : {}'.format(FSI_config['DISP_PRED']))
-
-- self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
-+ self.MPIPrint('Maximum number of FSI iterations : {}'.format(FSI_config['NB_FSI_ITER']))
-
-- self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
-+ self.MPIPrint('FSI tolerance : {}'.format(FSI_config['FSI_TOLERANCE']))
-
-- if FSI_config['AITKEN_RELAX'] == 'STATIC':
-- self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
-- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
-- self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
-- else:
-- self.MPIPrint('No Aitken under-relaxation')
-+ if FSI_config['AITKEN_RELAX'] == 'STATIC':
-+ self.MPIPrint('Static Aitken under-relaxation with constant parameter {}'.format(FSI_config['AITKEN_PARAM']))
-+ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
-+ self.MPIPrint('Dynamic Aitken under-relaxation with initial parameter {}'.format(FSI_config['AITKEN_PARAM']))
-+ else:
-+ self.MPIPrint('No Aitken under-relaxation')
-
- self.MPIPrint('FSI interface is set')
-
- def MPIPrint(self, message):
-- """
-+ """
- Print a message on screen only from the master process.
- """
-
-@@ -198,28 +198,28 @@
- """
- Perform a synchronization barrier in case of parallel run with MPI.
- """
--
-+
- if self.have_MPI == True:
- self.comm.barrier()
-
- def connect(self, FSI_config, FluidSolver, SolidSolver):
-- """
-- Connection between solvers.
-- Creates the communication support between the two solvers.
-- Gets information about f/s interfaces from the two solvers.
-- """
-+ """
-+ Connection between solvers.
-+ Creates the communication support between the two solvers.
-+ Gets information about f/s interfaces from the two solvers.
-+ """
- if self.have_MPI == True:
- myid = self.comm.Get_rank()
-- MPIsize = self.comm.Get_size()
-+ MPIsize = self.comm.Get_size()
- else:
- myid = 0
- MPIsize = 1
--
-- # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
-+
-+ # --- Identify the fluid and solid interfaces and store the number of nodes on both sides (and for each partition) ---
- self.fluidInterfaceIdentifier = None
- self.nLocalFluidInterfaceNodes = 0
- if FluidSolver != None:
-- print('Fluid solver is initialized on process {}'.format(myid))
-+ print('Fluid solver is initialized on process {}'.format(myid))
- self.haveFluidSolver = True
- allMovingMarkersTags = FluidSolver.GetAllMovingMarkersTag()
- allMarkersID = FluidSolver.GetAllBoundaryMarkers()
-@@ -229,23 +229,23 @@
- if allMovingMarkersTags[0] in allMarkersID.keys():
- self.fluidInterfaceIdentifier = allMarkersID[allMovingMarkersTags[0]]
- if self.fluidInterfaceIdentifier != None:
-- self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
-- if self.nLocalFluidInterfaceNodes != 0:
-+ self.nLocalFluidInterfaceNodes = FluidSolver.GetNumberVertices(self.fluidInterfaceIdentifier)
-+ if self.nLocalFluidInterfaceNodes != 0:
- self.haveFluidInterface = True
-- print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
-- else:
-- pass
-+ print('Number of interface fluid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalFluidInterfaceNodes))
-+ else:
-+ pass
-
-- if SolidSolver != None:
-- print('Solid solver is initialized on process {}'.format(myid))
-+ if SolidSolver != None:
-+ print('Solid solver is initialized on process {}'.format(myid))
- self.haveSolidSolver = True
-- self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
-- self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
-- if self.nLocalSolidInterfaceNodes != 0:
-+ self.solidInterfaceIdentifier = SolidSolver.getFSIMarkerID()
-+ self.nLocalSolidInterfaceNodes = SolidSolver.getNumberOfSolidInterfaceNodes(self.solidInterfaceIdentifier)
-+ if self.nLocalSolidInterfaceNodes != 0:
- self.haveSolidInterface = True
- print('Number of interface solid nodes (halo nodes included) on proccess {} : {}'.format(myid,self.nLocalSolidInterfaceNodes))
-- else:
-- pass
-+ else:
-+ pass
-
- # --- Exchange information about processors on which the solvers are defined and where the interface nodes are lying ---
- if self.have_MPI == True:
-@@ -266,18 +266,18 @@
- else:
- sendBufSolidInterface = np.array(int(0))
- rcvBufFluid = np.zeros(MPIsize, dtype = int)
-- rcvBufSolid = np.zeros(MPIsize, dtype = int)
-+ rcvBufSolid = np.zeros(MPIsize, dtype = int)
- rcvBufFluidInterface = np.zeros(MPIsize, dtype = int)
-- rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
-+ rcvBufSolidInterface = np.zeros(MPIsize, dtype = int)
- self.comm.Allgather(sendBufFluid, rcvBufFluid)
- self.comm.Allgather(sendBufSolid, rcvBufSolid)
- self.comm.Allgather(sendBufFluidInterface, rcvBufFluidInterface)
- self.comm.Allgather(sendBufSolidInterface, rcvBufSolidInterface)
- for iProc in range(MPIsize):
-- if rcvBufFluid[iProc] == 1:
-+ if rcvBufFluid[iProc] == 1:
- self.fluidSolverProcessors.append(iProc)
- if rcvBufSolid[iProc] == 1:
-- self.solidSolverProcessors.append(iProc)
-+ self.solidSolverProcessors.append(iProc)
- if rcvBufFluidInterface[iProc] == 1:
- self.fluidInterfaceProcessors.append(iProc)
- if rcvBufSolidInterface[iProc] == 1:
-@@ -285,19 +285,19 @@
- del sendBufFluid, sendBufSolid, rcvBufFluid, rcvBufSolid, sendBufFluidInterface, sendBufSolidInterface, rcvBufFluidInterface, rcvBufSolidInterface
- else:
- self.fluidSolverProcessors.append(0)
-- self.solidSolverProcessors.append(0)
-+ self.solidSolverProcessors.append(0)
- self.fluidInterfaceProcessors.append(0)
- self.solidInterfaceProcessors.append(0)
-
-- self.MPIBarrier()
--
-- # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
-+ self.MPIBarrier()
-+
-+ # --- Calculate the total number of nodes at the fluid interface (sum over all the partitions) ---
- # Calculate the number of halo nodes on each partition
- self.nLocalFluidInterfaceHaloNode = 0
-- for iVertex in range(self.nLocalFluidInterfaceNodes):
-+ for iVertex in range(self.nLocalFluidInterfaceNodes):
- if FluidSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
-- self.FluidHaloNodeList[GlobalIndex] = iVertex
-+ self.FluidHaloNodeList[GlobalIndex] = iVertex
- self.nLocalFluidInterfaceHaloNode += 1
- # Calculate the number of physical (= not halo) nodes on each partition
- self.nLocalFluidInterfacePhysicalNodes = self.nLocalFluidInterfaceNodes - self.nLocalFluidInterfaceHaloNode
-@@ -308,10 +308,10 @@
-
- # Same thing for the solid part
- self.nLocalSolidInterfaceHaloNode = 0
-- #for iVertex in range(self.nLocalSolidInterfaceNodes):
-+ #for iVertex in range(self.nLocalSolidInterfaceNodes):
- #if SoliddSolver.IsAHaloNode(self.fluidInterfaceIdentifier, iVertex) == True:
- #GlobalIndex = SolidSolver.GetVertexGlobalIndex(self.solidInterfaceIdentifier, iVertex)
-- #self.SolidHaloNodeList[GlobalIndex] = iVertex
-+ #self.SolidHaloNodeList[GlobalIndex] = iVertex
- #self.nLocalSolidInterfaceHaloNode += 1
- self.nLocalSolidInterfacePhysicalNodes = self.nLocalSolidInterfaceNodes - self.nLocalSolidInterfaceHaloNode
- if self.have_MPI == True:
-@@ -323,11 +323,11 @@
- # --- Calculate the total number of nodes (with and without halo) at the fluid interface (sum over all the partitions) and broadcast the number accross all processors ---
- sendBuffHalo = np.array(int(self.nLocalFluidInterfaceNodes))
- sendBuffPhysical = np.array(int(self.nLocalFluidInterfacePhysicalNodes))
-- rcvBuffHalo = np.zeros(1, dtype=int)
-+ rcvBuffHalo = np.zeros(1, dtype=int)
- rcvBuffPhysical = np.zeros(1, dtype=int)
-- if self.have_MPI == True:
-+ if self.have_MPI == True:
- self.comm.barrier()
-- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
-+ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
- self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
- self.nFluidInterfaceNodes = rcvBuffHalo[0]
- self.nFluidInterfacePhysicalNodes = rcvBuffPhysical[0]
-@@ -339,11 +339,11 @@
- # Same thing for the solid part
- sendBuffHalo = np.array(int(self.nLocalSolidInterfaceNodes))
- sendBuffPhysical = np.array(int(self.nLocalSolidInterfacePhysicalNodes))
-- rcvBuffHalo = np.zeros(1, dtype=int)
-+ rcvBuffHalo = np.zeros(1, dtype=int)
- rcvBuffPhysical = np.zeros(1, dtype=int)
- if self.have_MPI == True:
-- self.comm.barrier()
-- self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
-+ self.comm.barrier()
-+ self.comm.Allreduce(sendBuffHalo,rcvBuffHalo,op=self.MPI.SUM)
- self.comm.Allreduce(sendBuffPhysical,rcvBuffPhysical,op=self.MPI.SUM)
- self.nSolidInterfaceNodes = rcvBuffHalo[0]
- self.nSolidInterfacePhysicalNodes = rcvBuffPhysical[0]
-@@ -375,7 +375,7 @@
- if myid in self.fluidInterfaceProcessors:
- globalIndexStart = 0
- for iProc in range(myid):
-- globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
-+ globalIndexStart += self.fluidPhysicalInterfaceNodesDistribution[iProc]
- globalIndexStop = globalIndexStart + self.nLocalFluidInterfacePhysicalNodes-1
- else:
- globalIndexStart = 0
-@@ -387,8 +387,8 @@
- temp[0] = [0,self.nLocalFluidInterfacePhysicalNodes-1]
- self.fluidGlobalIndexRange = list()
- self.fluidGlobalIndexRange.append(temp)
--
-- # Same thing for the solid part
-+
-+ # Same thing for the solid part
- if self.have_MPI == True:
- if myid in self.solidInterfaceProcessors:
- globalIndexStart = 0
-@@ -404,14 +404,14 @@
- temp = {}
- temp[0] = [0,self.nSolidInterfacePhysicalNodes-1]
- self.solidGlobalIndexRange = list()
-- self.solidGlobalIndexRange.append(temp)
-+ self.solidGlobalIndexRange.append(temp)
-
-- self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
-- self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
-+ self.MPIPrint('Total number of fluid interface nodes (halo nodes included) : {}'.format(self.nFluidInterfaceNodes))
-+ self.MPIPrint('Total number of solid interface nodes (halo nodes included) : {}'.format(self.nSolidInterfaceNodes))
- self.MPIPrint('Total number of fluid interface nodes : {}'.format(self.nFluidInterfacePhysicalNodes))
- self.MPIPrint('Total number of solid interface nodes : {}'.format(self.nSolidInterfacePhysicalNodes))
-
-- self.MPIBarrier()
-+ self.MPIBarrier()
-
- # --- Create all the PETSc vectors required for parallel communication and parallel mesh mapping/interpolation (working for serial too) ---
- if self.have_MPI == True:
-@@ -432,8 +432,8 @@
- self.solidInterface_array_DispY.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
- self.solidInterface_array_DispZ.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
- self.solidInterface_array_DispX.set(0.0)
-- self.solidInterface_array_DispY.set(0.0)
-- self.solidInterface_array_DispZ.set(0.0)
-+ self.solidInterface_array_DispY.set(0.0)
-+ self.solidInterface_array_DispZ.set(0.0)
-
- if self.have_MPI == True:
- self.fluidInterface_array_DispX = PETSc.Vec().create(self.comm)
-@@ -536,30 +536,30 @@
- self.solidInterfaceResidualnM1_array_Z.set(0.0)
-
- def interfaceMapping(self,FluidSolver, SolidSolver, FSI_config):
-- """
-- Creates the one-to-one mapping between interfaces in case of matching meshes.
-- Creates the interpolation rules between interfaces in case of non-matching meshes.
-- """
-- if self.have_MPI == True:
-+ """
-+ Creates the one-to-one mapping between interfaces in case of matching meshes.
-+ Creates the interpolation rules between interfaces in case of non-matching meshes.
-+ """
-+ if self.have_MPI == True:
- myid = self.comm.Get_rank()
-- MPIsize = self.comm.Get_size()
-+ MPIsize = self.comm.Get_size()
- else:
- myid = 0
- MPIsize = 1
-
-- # --- Get the fluid interface from fluid solver on each partition ---
-- GlobalIndex = int()
-+ # --- Get the fluid interface from fluid solver on each partition ---
-+ GlobalIndex = int()
- localIndex = 0
- fluidIndexing_temp = {}
- self.localFluidInterface_array_X_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
- self.localFluidInterface_array_Y_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
- self.localFluidInterface_array_Z_init = np.zeros((self.nLocalFluidInterfacePhysicalNodes))
- for iVertex in range(self.nLocalFluidInterfaceNodes):
-- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
-- posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
-- posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
-- posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
-- if GlobalIndex in self.FluidHaloNodeList[myid].keys():
-+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
-+ posx = FluidSolver.GetVertexCoordX(self.fluidInterfaceIdentifier, iVertex)
-+ posy = FluidSolver.GetVertexCoordY(self.fluidInterfaceIdentifier, iVertex)
-+ posz = FluidSolver.GetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex)
-+ if GlobalIndex in self.FluidHaloNodeList[myid].keys():
- self.haloNodesPositionsInit[GlobalIndex] = (posx, posy, posz)
- else:
- fluidIndexing_temp[GlobalIndex] = self.__getGlobalIndex('fluid', myid, localIndex)
-@@ -576,17 +576,17 @@
- self.fluidIndexing = fluidIndexing_temp.copy()
- del fluidIndexing_temp
-
-- # --- Get the solid interface from solid solver on each partition ---
-+ # --- Get the solid interface from solid solver on each partition ---
- localIndex = 0
- solidIndexing_temp = {}
-- self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
-+ self.localSolidInterface_array_X = np.zeros(self.nLocalSolidInterfaceNodes)
- self.localSolidInterface_array_Y = np.zeros(self.nLocalSolidInterfaceNodes)
- self.localSolidInterface_array_Z = np.zeros(self.nLocalSolidInterfaceNodes)
- for iVertex in range(self.nLocalSolidInterfaceNodes):
- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
-- posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
-- posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
-- posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
-+ posx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
-+ posy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
-+ posz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
- if GlobalIndex in self.SolidHaloNodeList[myid].keys():
- pass
- else:
-@@ -605,14 +605,14 @@
- del solidIndexing_temp
-
-
-- # --- Create the PETSc parallel interpolation matrix ---
-+ # --- Create the PETSc parallel interpolation matrix ---
- if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
- if self.have_MPI == True:
- self.MappingMatrixA = PETSc.Mat().create(self.comm)
- self.MappingMatrixB = PETSc.Mat().create(self.comm)
- self.MappingMatrixA_T = PETSc.Mat().create(self.comm)
- self.MappingMatrixB_T = PETSc.Mat().create(self.comm)
-- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
-+ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
- self.MappingMatrixA.setType('mpiaij')
- self.MappingMatrixB.setType('mpiaij')
- self.MappingMatrixA_T.setType('mpiaij')
-@@ -627,7 +627,7 @@
- self.MappingMatrixB = PETSc.Mat().create()
- self.MappingMatrixA_T = PETSc.Mat().create()
- self.MappingMatrixB_T = PETSc.Mat().create()
-- if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
-+ if FSI_config['MESH_INTERP_METHOD'] == 'RBF' :
- self.MappingMatrixA.setType('aij')
- self.MappingMatrixB.setType('aij')
- self.MappingMatrixA_T.setType('aij')
-@@ -637,16 +637,16 @@
- self.MappingMatrixB.setType('aij')
- self.MappingMatrixA_T.setType('aij')
- self.MappingMatrixB_T.setType('aij')
-- self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
-+ self.MappingMatrixA.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
- self.MappingMatrixA.setUp()
- self.MappingMatrixA.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
-- self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
-+ self.MappingMatrixB.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes+self.d_RBF))
- self.MappingMatrixB.setUp()
- self.MappingMatrixB.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
-- self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
-+ self.MappingMatrixA_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nSolidInterfacePhysicalNodes+self.d_RBF))
- self.MappingMatrixA_T.setUp()
- self.MappingMatrixA_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
-- self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
-+ self.MappingMatrixB_T.setSizes((self.nSolidInterfacePhysicalNodes+self.d_RBF, self.nFluidInterfacePhysicalNodes))
- self.MappingMatrixB_T.setUp()
- self.MappingMatrixB_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
- else:
-@@ -660,21 +660,21 @@
- self.MappingMatrix_T = PETSc.Mat().create()
- self.MappingMatrix.setType('aij')
- self.MappingMatrix_T.setType('aij')
-- self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
-+ self.MappingMatrix.setSizes((self.nFluidInterfacePhysicalNodes, self.nSolidInterfacePhysicalNodes))
- self.MappingMatrix.setUp()
- self.MappingMatrix.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
-- self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
-+ self.MappingMatrix_T.setSizes((self.nSolidInterfacePhysicalNodes, self.nFluidInterfacePhysicalNodes))
- self.MappingMatrix_T.setUp()
- self.MappingMatrix_T.setOption(PETSc.Mat().Option.NEW_NONZERO_ALLOCATION_ERR, False)
--
--
-+
-+
- # --- Fill the interpolation matrix in parallel (working in serial too) ---
- if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
- self.MPIPrint('Building interpolation matrices...')
- if self.have_MPI == True:
- for iProc in self.solidInterfaceProcessors:
- if myid == iProc:
-- for jProc in self.solidInterfaceProcessors:
-+ for jProc in self.solidInterfaceProcessors:
- self.comm.Send(self.localSolidInterface_array_X, dest=jProc, tag=1)
- self.comm.Send(self.localSolidInterface_array_Y, dest=jProc, tag=2)
- self.comm.Send(self.localSolidInterface_array_Z, dest=jProc, tag=3)
-@@ -726,7 +726,7 @@
- self.TPSMeshMapping_B(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
- else:
- self.NearestNeighboorMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
-- else:
-+ else:
- self.matchingMeshMapping(solidInterfaceBuffRcv_X, solidInterfaceBuffRcv_Y, solidInterfaceBuffRcv_Z, iProc)
- else:
- if FSI_config['MATCHING_MESH'] == 'NO':
-@@ -735,10 +735,10 @@
- elif FSI_config['MESH_INTERP_METHOD'] == 'TPS' :
- self.TPSMeshMapping_B(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
- else:
-- self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
-- else:
-+ self.NearestNeighboorMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
-+ else:
- self.matchingMeshMapping(self.localSolidInterface_array_X, self.localSolidInterface_array_Y, self.localSolidInterface_array_Z, 0)
--
-+
- if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
- self.MappingMatrixB.assemblyBegin()
- self.MappingMatrixB.assemblyEnd()
-@@ -751,9 +751,9 @@
- self.MappingMatrix_T.assemblyBegin()
- self.MappingMatrix_T.assemblyEnd()
- self.MPIPrint("Interpolation matrix is built.")
--
-+
- self.MPIBarrier()
--
-+
- del self.localSolidInterface_array_X
- del self.localSolidInterface_array_Y
- del self.localSolidInterface_array_Z
-@@ -768,20 +768,20 @@
- myid = 0
-
- # --- Instantiate the spatial indexing ---
-- prop_index = index.Property()
-- prop_index.dimension = self.nDim
-- SolidSpatialTree = index.Index(properties=prop_index)
--
-+ prop_index = index.Property()
-+ prop_index.dimension = self.nDim
-+ SolidSpatialTree = index.Index(properties=prop_index)
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for jVertex in range(nSolidNodes):
- posX = solidInterfaceBuffRcv_X[jVertex]
- posY = solidInterfaceBuffRcv_Y[jVertex]
- posZ = solidInterfaceBuffRcv_Z[jVertex]
-- if self.nDim == 2 :
-- SolidSpatialTree.add(jVertex, (posX, posY))
-- else :
-- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-+ if self.nDim == 2 :
-+ SolidSpatialTree.add(jVertex, (posX, posY))
-+ else :
-+ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-
- if self.nFluidInterfacePhysicalNodes != self.nSolidInterfacePhysicalNodes:
- raise Exception("Fluid and solid interface must have the same number of nodes for matching meshes ! ")
-@@ -822,20 +822,20 @@
- myid = 0
-
- # --- Instantiate the spatial indexing ---
-- prop_index = index.Property()
-- prop_index.dimension = self.nDim
-- SolidSpatialTree = index.Index(properties=prop_index)
--
-+ prop_index = index.Property()
-+ prop_index.dimension = self.nDim
-+ SolidSpatialTree = index.Index(properties=prop_index)
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for jVertex in range(nSolidNodes):
- posX = solidInterfaceBuffRcv_X[jVertex]
- posY = solidInterfaceBuffRcv_Y[jVertex]
- posZ = solidInterfaceBuffRcv_Z[jVertex]
-- if self.nDim == 2 :
-- SolidSpatialTree.add(jVertex, (posX, posY))
-- else :
-- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-+ if self.nDim == 2 :
-+ SolidSpatialTree.add(jVertex, (posX, posY))
-+ else :
-+ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-
- # --- For each fluid interface node, find the nearest solid interface node and fill the boolean mapping matrix ---
- for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
-@@ -863,20 +863,20 @@
- myid = 0
-
- # --- Instantiate the spatial indexing ---
-- prop_index = index.Property()
-- prop_index.dimension = self.nDim
-- SolidSpatialTree = index.Index(properties=prop_index)
--
-+ prop_index = index.Property()
-+ prop_index.dimension = self.nDim
-+ SolidSpatialTree = index.Index(properties=prop_index)
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for jVertex in range(nSolidNodes):
- posX = solidInterfaceBuffRcv_X[jVertex]
- posY = solidInterfaceBuffRcv_Y[jVertex]
- posZ = solidInterfaceBuffRcv_Z[jVertex]
-- if self.nDim == 2 :
-- SolidSpatialTree.add(jVertex, (posX, posY))
-- else :
-- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-+ if self.nDim == 2 :
-+ SolidSpatialTree.add(jVertex, (posX, posY))
-+ else :
-+ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-
- for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
- posX = self.localSolidInterface_array_X[iVertexSolid]
-@@ -915,20 +915,20 @@
- myid = 0
-
- # --- Instantiate the spatial indexing ---
-- prop_index = index.Property()
-- prop_index.dimension = self.nDim
-- SolidSpatialTree = index.Index(properties=prop_index)
--
-+ prop_index = index.Property()
-+ prop_index.dimension = self.nDim
-+ SolidSpatialTree = index.Index(properties=prop_index)
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for jVertex in range(nSolidNodes):
- posX = solidInterfaceBuffRcv_X[jVertex]
- posY = solidInterfaceBuffRcv_Y[jVertex]
- posZ = solidInterfaceBuffRcv_Z[jVertex]
-- if self.nDim == 2 :
-- SolidSpatialTree.add(jVertex, (posX, posY))
-- else :
-- SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-+ if self.nDim == 2 :
-+ SolidSpatialTree.add(jVertex, (posX, posY))
-+ else :
-+ SolidSpatialTree.add(jVertex, (posX, posY, posZ))
-
- for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
- posX = self.localFluidInterface_array_X_init[iVertexFluid]
-@@ -965,7 +965,7 @@
- myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for iVertexSolid in range(self.nLocalSolidInterfaceNodes):
-@@ -999,7 +999,7 @@
- myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- nSolidNodes = solidInterfaceBuffRcv_X.shape[0]
-
- for iVertexFluid in range(self.nLocalFluidInterfacePhysicalNodes):
-@@ -1031,7 +1031,7 @@
- """
- phi = 0.0
- eps = distance/rad
--
-+
- if eps < 1:
- phi = ((1.0-eps)**4)*(4.0*eps+1.0)
- else:
-@@ -1044,20 +1044,20 @@
- Description
- """
- phi = 0.0
--
-+
- if distance > 0.0:
- phi = (distance**2)*np.log10(distance)
- else:
- phi = 0.0
-
-- return phi
-+ return phi
-
-
- def interpolateSolidPositionOnFluidMesh(self, FSI_config):
-- """
-- Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
-- """
-- if self.have_MPI == True:
-+ """
-+ Applies the one-to-one mapping or the interpolaiton rules from solid to fluid mesh.
-+ """
-+ if self.have_MPI == True:
- myid = self.comm.Get_rank()
- MPIsize = self.comm.Get_size()
- else:
-@@ -1110,12 +1110,12 @@
- del gamma_array_DispY
- del gamma_array_DispZ
- del KSP_solver
-- else:
-+ else:
- self.MappingMatrix.mult(self.solidInterface_array_DispX, self.fluidInterface_array_DispX)
- self.MappingMatrix.mult(self.solidInterface_array_DispY, self.fluidInterface_array_DispY)
- self.MappingMatrix.mult(self.solidInterface_array_DispZ, self.fluidInterface_array_DispZ)
-
-- # --- Checking conservation ---
-+ # --- Checking conservation ---
- WSX = self.solidLoads_array_X.dot(self.solidInterface_array_DispX)
- WSY = self.solidLoads_array_Y.dot(self.solidInterface_array_DispY)
- WSZ = self.solidLoads_array_Z.dot(self.solidInterface_array_DispZ)
-@@ -1124,11 +1124,11 @@
- WFY = self.fluidLoads_array_Y.dot(self.fluidInterface_array_DispY)
- WFZ = self.fluidLoads_array_Z.dot(self.fluidInterface_array_DispZ)
-
-- self.MPIPrint("Checking f/s interface conservation...")
-- self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
-- self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
-+ self.MPIPrint("Checking f/s interface conservation...")
-+ self.MPIPrint('Solid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WSX, WSY, WSZ))
-+ self.MPIPrint('Fluid side (Wx, Wy, Wz) = ({}, {}, {})'.format(WFX, WFY, WFZ))
-+
-
--
- # --- Redistribute the interpolated fluid interface according to the partitions that own the fluid interface ---
- # Gather the fluid interface on the master process
- if self.have_MPI == True:
-@@ -1156,7 +1156,7 @@
- displ = tuple(displ)
-
- del sendBuffNumber, rcvBuffNumber
--
-+
- #print("DEBUG MESSAGE From proc {}, counts = {}".format(myid, counts))
- #print("DEBUG MESSAGE From proc {}, displ = {}".format(myid, displ))
-
-@@ -1213,18 +1213,18 @@
- del sendBuff
-
- def interpolateFluidLoadsOnSolidMesh(self, FSI_config):
-- """
-- Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
-- """
-- if self.have_MPI == True:
-+ """
-+ Applies the one-to-one mapping or the interpolaiton rules from fluid to solid mesh.
-+ """
-+ if self.have_MPI == True:
- myid = self.comm.Get_rank()
- MPIsize = self.comm.Get_size()
- else:
- myid = 0
- MPIsize = 1
--
-+
- # --- Interpolate (or map) in parallel the fluid interface loads on the solid interface ---
-- #self.MappingMatrix.transpose()
-+ #self.MappingMatrix.transpose()
- if FSI_config['MATCHING_MESH'] == 'NO' and (FSI_config['MESH_INTERP_METHOD'] == 'RBF' or FSI_config['MESH_INTERP_METHOD'] == 'TPS'):
- if self.have_MPI == True:
- gamma_array_LoadX = PETSc.Vec().create(self.comm)
-@@ -1280,10 +1280,10 @@
- self.solidLoads_array_X_recon = None
- self.solidLoads_array_Y_recon = None
- self.solidLoads_array_Z_recon = None
-- if myid == self.rootProcess:
-- self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
-- self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
-- self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
-+ if myid == self.rootProcess:
-+ self.solidLoads_array_X_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
-+ self.solidLoads_array_Y_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
-+ self.solidLoads_array_Z_recon = np.zeros(self.nSolidInterfacePhysicalNodes+self.d_RBF)
- myNumberOfNodes = self.solidLoads_array_X.getArray().shape[0]
- sendBuffNumber = np.array([myNumberOfNodes], dtype=int)
- rcvBuffNumber = np.zeros(MPIsize, dtype=int)
-@@ -1293,9 +1293,9 @@
- displ = np.zeros(MPIsize, dtype=int)
- for ii in range(rcvBuffNumber.shape[0]):
- displ[ii] = rcvBuffNumber[0:ii].sum()
-- displ = tuple(displ)
-+ displ = tuple(displ)
-
-- del sendBuffNumber, rcvBuffNumber
-+ del sendBuffNumber, rcvBuffNumber
-
- self.comm.Gatherv(self.solidLoads_array_X.getArray(), [self.solidLoads_array_X_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
- self.comm.Gatherv(self.solidLoads_array_Y.getArray(), [self.solidLoads_array_Y_recon, counts, displ, self.MPI.DOUBLE], root=self.rootProcess)
-@@ -1336,25 +1336,25 @@
-
-
- '''def getSolidInterfacePosition(self, SolidSolver):
-- """
-- Gets the current solid interface position from the solid solver.
-- """
-+ """
-+ Gets the current solid interface position from the solid solver.
-+ """
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
- GlobalIndex = int()
- localIndex = 0
-- for iVertex in range(self.nLocalSolidInterfaceNodes):
-+ for iVertex in range(self.nLocalSolidInterfaceNodes):
- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
- if GlobalIndex in self.SolidHaloNodeList[myid].keys():
- pass
- else:
-- newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
-- newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
-- newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
-+ newPosx = SolidSolver.getInterfaceNodePosX(self.solidInterfaceIdentifier, iVertex)
-+ newPosy = SolidSolver.getInterfaceNodePosY(self.solidInterfaceIdentifier, iVertex)
-+ newPosz = SolidSolver.getInterfaceNodePosZ(self.solidInterfaceIdentifier, iVertex)
- iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
- self.solidInterface_array_X.setValues([iGlobalVertex],newPosx)
- self.solidInterface_array_Y.setValues([iGlobalVertex],newPosy)
-@@ -1375,25 +1375,25 @@
- #print("DEBUG MESSAGE From PROC {} : array_X = {}".format(myid, self.solidInterface_array_X.getArray()))'''
-
- def getSolidInterfaceDisplacement(self, SolidSolver):
-- """
-- Gets the current solid interface position from the solid solver.
-- """
-+ """
-+ Gets the current solid interface position from the solid solver.
-+ """
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- # --- Get the solid interface position from the solid solver and directly fill the corresponding PETSc vector ---
- GlobalIndex = int()
- localIndex = 0
-- for iVertex in range(self.nLocalSolidInterfaceNodes):
-+ for iVertex in range(self.nLocalSolidInterfaceNodes):
- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
- if GlobalIndex in self.SolidHaloNodeList[myid].keys():
- pass
- else:
-- newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
-- newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
-- newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
-+ newDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
-+ newDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
-+ newDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
- iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
- self.solidInterface_array_DispX.setValues([iGlobalVertex],newDispx)
- self.solidInterface_array_DispY.setValues([iGlobalVertex],newDispy)
-@@ -1408,9 +1408,9 @@
- self.solidInterface_array_DispZ.assemblyEnd()
-
- def getFluidInterfaceNodalForce(self, FSI_config, FluidSolver):
-- """
-- Gets the fluid interface loads from the fluid solver.
-- """
-+ """
-+ Gets the fluid interface loads from the fluid solver.
-+ """
- if self.have_MPI == True:
- myid = self.comm.Get_rank()
- else:
-@@ -1422,17 +1422,17 @@
- FZ = 0.0
-
- # --- Get the fluid interface loads from the fluid solver and directly fill the corresponding PETSc vector ---
-- for iVertex in range(self.nLocalFluidInterfaceNodes):
-- halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
-- if halo==False:
-- if FSI_config['CSD_SOLVER'] == 'GETDP':
-- newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
-- newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
-- newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
-- else:
-- newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
-- newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
-- newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
-+ for iVertex in range(self.nLocalFluidInterfaceNodes):
-+ halo = FluidSolver.ComputeVertexForces(self.fluidInterfaceIdentifier, iVertex) # !!we have to ignore halo node coming from mesh partitioning because they introduice non-physical forces
-+ if halo==False:
-+ if FSI_config['CSD_SOLVER'] == 'GETDP':
-+ newFx = FluidSolver.GetVertexForceDensityX(self.fluidInterfaceIdentifier, iVertex)
-+ newFy = FluidSolver.GetVertexForceDensityY(self.fluidInterfaceIdentifier, iVertex)
-+ newFz = FluidSolver.GetVertexForceDensityZ(self.fluidInterfaceIdentifier, iVertex)
-+ else:
-+ newFx = FluidSolver.GetVertexForceX(self.fluidInterfaceIdentifier, iVertex)
-+ newFy = FluidSolver.GetVertexForceY(self.fluidInterfaceIdentifier, iVertex)
-+ newFz = FluidSolver.GetVertexForceZ(self.fluidInterfaceIdentifier, iVertex)
- iGlobalVertex = self.__getGlobalIndex('fluid', myid, localIndex)
- self.fluidLoads_array_X.setValues([iGlobalVertex], newFx)
- self.fluidLoads_array_Y.setValues([iGlobalVertex], newFy)
-@@ -1457,22 +1457,22 @@
- FX_b = self.fluidLoads_array_X.sum()
- FY_b = self.fluidLoads_array_Y.sum()
- FZ_b = self.fluidLoads_array_Z.sum()
--
-+
-
- def setFluidInterfaceVarCoord(self, FluidSolver):
-- """
-- Communicate the change of coordinates of the fluid interface to the fluid solver.
-- Prepare the fluid solver for mesh deformation.
-- """
-+ """
-+ Communicate the change of coordinates of the fluid interface to the fluid solver.
-+ Prepare the fluid solver for mesh deformation.
-+ """
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- # --- Send the new fluid interface position to the fluid solver (on each partition, halo nodes included) ---
- localIndex = 0
-- for iVertex in range(self.nLocalFluidInterfaceNodes):
-- GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
-+ for iVertex in range(self.nLocalFluidInterfaceNodes):
-+ GlobalIndex = FluidSolver.GetVertexGlobalIndex(self.fluidInterfaceIdentifier, iVertex)
- if GlobalIndex in self.FluidHaloNodeList[myid].keys():
- posX0, posY0, posZ0 = self.haloNodesPositionsInit[GlobalIndex]
- DispX, DispY, DispZ = self.haloNodesDisplacements[GlobalIndex]
-@@ -1491,32 +1491,32 @@
- FluidSolver.SetVertexCoordZ(self.fluidInterfaceIdentifier, iVertex, posZ)
- localIndex += 1
- # Prepares the mesh deformation in the fluid solver
-- nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
-+ nodalVarCoordNorm = FluidSolver.SetVertexVarCoord(self.fluidInterfaceIdentifier, iVertex)
-+
-
--
- def setSolidInterfaceLoads(self, SolidSolver, FSI_config, time):
-- """
-- Communicates the new solid interface loads to the solid solver.
-- In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
-- """
-+ """
-+ Communicates the new solid interface loads to the solid solver.
-+ In case of rigid body motion, calculates the new resultant forces (lift, drag, ...).
-+ """
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
-
-- FY = 0.0 # solid-side resultant forces
-+ FY = 0.0 # solid-side resultant forces
- FX = 0.0
- FZ = 0.0
-- FFX = 0.0 # fluid-side resultant forces
-- FFY = 0.0
-- FFZ = 0.0
-+ FFX = 0.0 # fluid-side resultant forces
-+ FFY = 0.0
-+ FFZ = 0.0
-
- # --- Check for total force conservation after interpolation
- FFX = self.fluidLoads_array_X.sum()
- FFY = self.fluidLoads_array_Y.sum()
- FFZ = self.fluidLoads_array_Z.sum()
-
--
-+
- for iVertex in range(self.nLocalSolidInterfaceNodes):
- FX += self.localSolidLoads_array_X[iVertex]
- FY += self.localSolidLoads_array_Y[iVertex]
-@@ -1527,9 +1527,9 @@
- FY = self.comm.allreduce(FY)
- FZ = self.comm.allreduce(FZ)
-
-- self.MPIPrint("Checking f/s interface total force...")
-- self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
-- self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
-+ self.MPIPrint("Checking f/s interface total force...")
-+ self.MPIPrint('Solid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FX, FY, FZ))
-+ self.MPIPrint('Fluid side (Fx, Fy, Fz) = ({}, {}, {})'.format(FFX, FFY, FFZ))
-
- # --- Send the new solid interface loads to the solid solver (on each partition, halo nodes included) ---
- GlobalIndex = int()
-@@ -1541,25 +1541,25 @@
- pass
- else:
- Fx = self.localSolidLoads_array_X[localIndex]
-- Fy = self.localSolidLoads_array_Y[localIndex]
-- Fz = self.localSolidLoads_array_Z[localIndex]
-+ Fy = self.localSolidLoads_array_Y[localIndex]
-+ Fz = self.localSolidLoads_array_Z[localIndex]
- SolidSolver.applyload(iVertex, Fx, Fy, Fz, time)
- localIndex += 1
-- if FSI_config['CSD_SOLVER'] == 'NATIVE':
-+ if FSI_config['CSD_SOLVER'] == 'NATIVE':
- SolidSolver.setGeneralisedForce()
-- SolidSolver.setGeneralisedMoment()
-+ SolidSolver.setGeneralisedMoment()
-
- def computeSolidInterfaceResidual(self, SolidSolver):
-- """
-- Computes the solid interface FSI displacement residual.
-- """
-+ """
-+ Computes the solid interface FSI displacement residual.
-+ """
-
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
-
-- normInterfaceResidualSquare = 0.0
-+ normInterfaceResidualSquare = 0.0
-
- # --- Create and fill the PETSc vector for the predicted solid interface position (predicted by the solid computation) ---
- if self.have_MPI == True:
-@@ -1575,27 +1575,27 @@
- predDisp_array_Y = PETSc.Vec().create()
- predDisp_array_Y.setType('seq')
- predDisp_array_Z = PETSc.Vec().create()
-- predDisp_array_Z.setType('seq')
-+ predDisp_array_Z.setType('seq')
- predDisp_array_X.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
- predDisp_array_Y.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
- predDisp_array_Z.setSizes(self.nSolidInterfacePhysicalNodes+self.d_RBF)
--
-- if myid in self.solidSolverProcessors:
-- for iVertex in range(self.nLocalSolidInterfaceNodes):
-- predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
-- predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
-- predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
-+
-+ if myid in self.solidSolverProcessors:
-+ for iVertex in range(self.nLocalSolidInterfaceNodes):
-+ predDispx = SolidSolver.getInterfaceNodeDispX(self.solidInterfaceIdentifier, iVertex)
-+ predDispy = SolidSolver.getInterfaceNodeDispY(self.solidInterfaceIdentifier, iVertex)
-+ predDispz = SolidSolver.getInterfaceNodeDispZ(self.solidInterfaceIdentifier, iVertex)
- iGlobalVertex = self.__getGlobalIndex('solid', myid, iVertex)
- predDisp_array_X.setValues([iGlobalVertex], predDispx)
- predDisp_array_Y.setValues([iGlobalVertex], predDispy)
- predDisp_array_Z.setValues([iGlobalVertex], predDispz)
--
-- predDisp_array_X.assemblyBegin()
-- predDisp_array_X.assemblyEnd()
-- predDisp_array_Y.assemblyBegin()
-- predDisp_array_Y.assemblyEnd()
-- predDisp_array_Z.assemblyBegin()
-- predDisp_array_Z.assemblyEnd()
-+
-+ predDisp_array_X.assemblyBegin()
-+ predDisp_array_X.assemblyEnd()
-+ predDisp_array_Y.assemblyBegin()
-+ predDisp_array_Y.assemblyEnd()
-+ predDisp_array_Z.assemblyBegin()
-+ predDisp_array_Z.assemblyEnd()
-
- # --- Calculate the residual (vector and norm) ---
- self.solidInterfaceResidual_array_X = predDisp_array_X - self.solidInterface_array_DispX
-@@ -1615,45 +1615,45 @@
- del predDisp_array_Y
- del predDisp_array_Z
-
-- return sqrt(normInterfaceResidualSquare)
-+ return sqrt(normInterfaceResidualSquare)
-
- def relaxSolidPosition(self,FSI_config):
-- """
-- Apply solid displacement under-relaxation.
-- """
-+ """
-+ Apply solid displacement under-relaxation.
-+ """
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
-
- # --- Set the Aitken coefficient for the relaxation ---
-- if FSI_config['AITKEN_RELAX'] == 'STATIC':
-- self.aitkenParam = FSI_config['AITKEN_PARAM']
-- elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
-- self.setAitkenCoefficient(FSI_config)
-- else:
-- self.aitkenParam = 1.0
-+ if FSI_config['AITKEN_RELAX'] == 'STATIC':
-+ self.aitkenParam = FSI_config['AITKEN_PARAM']
-+ elif FSI_config['AITKEN_RELAX'] == 'DYNAMIC':
-+ self.setAitkenCoefficient(FSI_config)
-+ else:
-+ self.aitkenParam = 1.0
-
-- self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
-+ self.MPIPrint('Aitken under-relaxation step with parameter {}'.format(self.aitkenParam))
-
- # --- Relax the solid interface position ---
- self.solidInterface_array_DispX += self.aitkenParam*self.solidInterfaceResidual_array_X
- self.solidInterface_array_DispY += self.aitkenParam*self.solidInterfaceResidual_array_Y
- self.solidInterface_array_DispZ += self.aitkenParam*self.solidInterfaceResidual_array_Z
--
-+
-
- def setAitkenCoefficient(self, FSI_config):
-- """
-- Computes the Aitken coefficients for solid displacement under-relaxation.
-- """
--
-- deltaResNormSquare = 0.0
-- prodScalRes = 0.0
--
-+ """
-+ Computes the Aitken coefficients for solid displacement under-relaxation.
-+ """
-+
-+ deltaResNormSquare = 0.0
-+ prodScalRes = 0.0
-+
- # --- Create the PETSc vector for the difference between the residuals (current and previous FSI iter) ---
-- if self.FSIIter == 0:
-- self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
-- else:
-+ if self.FSIIter == 0:
-+ self.aitkenParam = max(FSI_config['AITKEN_PARAM'], self.aitkenParam)
-+ else:
- if self.have_MPI:
- deltaResx_array_X = PETSc.Vec().create(self.comm)
- deltaResx_array_X.setType('mpi')
-@@ -1688,9 +1688,9 @@
- deltaResNormSquare_X = (deltaResx_array_X.norm())**2
- deltaResNormSquare_Y = (deltaResx_array_Y.norm())**2
- deltaResNormSquare_Z = (deltaResx_array_Z.norm())**2
-- deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
-+ deltaResNormSquare = deltaResNormSquare_X + deltaResNormSquare_Y + deltaResNormSquare_Z
-
-- self.aitkenParam *= -prodScalRes/deltaResNormSquare
-+ self.aitkenParam *= -prodScalRes/deltaResNormSquare
-
- deltaResx_array_X.destroy()
- deltaResx_array_Y.destroy()
-@@ -1708,27 +1708,27 @@
- self.solidInterfaceResidual_array_Z.copy(self.solidInterfaceResidualnM1_array_Z)
-
- def displacementPredictor(self, FSI_config , SolidSolver, deltaT):
-- """
-- Calculates a prediciton for the solid interface position for the next time step.
-- """
-+ """
-+ Calculates a prediciton for the solid interface position for the next time step.
-+ """
-
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-+ myid = self.comm.Get_rank()
- else:
- myid = 0
-
-- if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
-- self.MPIPrint("First order predictor")
-- alpha_0 = 1.0
-- alpha_1 = 0.0
-- elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
-- self.MPIPrint("Second order predictor")
-- alpha_0 = 1.0
-- alpha_1 = 0.5
-- else:
-- self.MPIPrint("No predictor")
-- alpha_0 = 0.0
-- alpha_1 = 0.0
-+ if FSI_config['DISP_PRED'] == 'FIRST_ORDER':
-+ self.MPIPrint("First order predictor")
-+ alpha_0 = 1.0
-+ alpha_1 = 0.0
-+ elif FSI_config['DISP_PRED'] == 'SECOND_ORDER':
-+ self.MPIPrint("Second order predictor")
-+ alpha_0 = 1.0
-+ alpha_1 = 0.5
-+ else:
-+ self.MPIPrint("No predictor")
-+ alpha_0 = 0.0
-+ alpha_1 = 0.0
-
- # --- Create the PETSc vectors to store the solid interface velocity ---
- if self.have_MPI == True:
-@@ -1774,18 +1774,18 @@
- # --- Fill the PETSc vectors ---
- GlobalIndex = int()
- localIndex = 0
-- for iVertex in range(self.nLocalSolidInterfaceNodes):
-- GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
-+ for iVertex in range(self.nLocalSolidInterfaceNodes):
-+ GlobalIndex = SolidSolver.getInterfaceNodeGlobalIndex(self.solidInterfaceIdentifier, iVertex)
- if GlobalIndex in self.SolidHaloNodeList[myid].keys():
- pass
- else:
- iGlobalVertex = self.__getGlobalIndex('solid', myid, localIndex)
-- velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
-- vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
-- velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
-- velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
-- velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
-- velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
-+ velx = SolidSolver.getInterfaceNodeVelX(self.solidInterfaceIdentifier, iVertex)
-+ vely = SolidSolver.getInterfaceNodeVelY(self.solidInterfaceIdentifier, iVertex)
-+ velz = SolidSolver.getInterfaceNodeVelZ(self.solidInterfaceIdentifier, iVertex)
-+ velxNm1 = SolidSolver.getInterfaceNodeVelXNm1(self.solidInterfaceIdentifier, iVertex)
-+ velyNm1 = SolidSolver.getInterfaceNodeVelYNm1(self.solidInterfaceIdentifier, iVertex)
-+ velzNm1 = SolidSolver.getInterfaceNodeVelZNm1(self.solidInterfaceIdentifier, iVertex)
- Vel_array_X.setValues([iGlobalVertex],velx)
- Vel_array_Y.setValues([iGlobalVertex],vely)
- Vel_array_Z.setValues([iGlobalVertex],velz)
-@@ -1822,27 +1822,27 @@
- del VelnM1_array_X, VelnM1_array_Y, VelnM1_array_Z
-
- def writeFSIHistory(self, TimeIter, time, varCoordNorm, FSIConv):
-- """
-- Write the FSI history file of the computaion.
-- """
-+ """
-+ Write the FSI history file of the computaion.
-+ """
-
- if self.have_MPI == True:
- myid = self.comm.Get_rank()
- else:
- myid = 0
--
-+
- if myid == self.rootProcess:
-- if self.unsteady:
-- if TimeIter == 0:
-- histFile = open('FSIhistory.dat', "w")
-+ if self.unsteady:
-+ if TimeIter == 0:
-+ histFile = open('FSIhistory.dat', "w")
- histFile.write("TimeIter\tTime\tFSIRes\tFSINbIter\n")
-- else:
-- histFile = open('FSIhistory.dat', "a")
-- if FSIConv:
-- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
-- else:
-- histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
-- histFile.close()
-+ else:
-+ histFile = open('FSIhistory.dat', "a")
-+ if FSIConv:
-+ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter+1) + '\n')
-+ else:
-+ histFile.write(str(TimeIter) + '\t' + str(time) + '\t' + str(varCoordNorm) + '\t' + str(self.FSIIter) + '\n')
-+ histFile.close()
- else:
- if self.FSIIter == 0:
- histFile = open('FSIhistory.dat', "w")
-@@ -1851,7 +1851,7 @@
- histFile = open('FSIhistory.dat', "a")
- histFile.write(str(self.FSIIter) + '\t' + str(varCoordNorm) + '\n')
- histFile.close()
--
-+
-
- self.MPIBarrier()
-
-@@ -1868,254 +1868,254 @@
- globalIndex = globalStartIndex + iLocalVertex
-
- return globalIndex
--
-+
-
- def UnsteadyFSI(self,FSI_config, FluidSolver, SolidSolver):
-- """
-- Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
-- F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
-- """
-+ """
-+ Run the unsteady FSI computation by synchronizing the fluid and solid solvers.
-+ F/s interface data are exchanged through interface mapping and interpolation (if non mathcing meshes).
-+ """
-
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-- numberPart = self.comm.Get_size()
-+ myid = self.comm.Get_rank()
-+ numberPart = self.comm.Get_size()
- else:
- myid = 0
- numberPart = 1
-
-- # --- Set some general variables for the unsteady computation --- #
-- deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
-- totTime = FSI_config['UNST_TIME'] # physical simulation time
-- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
-- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
-- TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
--
-- if FSI_config['RESTART_SOL'] == 'YES':
-- startTime = FSI_config['START_TIME']
-- NbTimeIter = ((totTime)/deltaT)-1
-- time = startTime
-- TimeIter = FSI_config['RESTART_ITER']
-- else:
-- NbTimeIter = (totTime/deltaT)-1 # number of time iterations
-- time = 0.0 # initial time
-- TimeIter = 0 # initial time iteration
--
-- NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
--
-- varCoordNorm = 0.0 # FSI residual
-- FSIConv = False # FSI convergence flag
--
-- self.MPIPrint('\n**********************************')
-- self.MPIPrint('* Begin unsteady FSI computation *')
-- self.MPIPrint('**********************************\n')
--
-- # --- Initialize the coupled solution --- #
-- #If restart (DOES NOT WORK YET)
-- if FSI_config['RESTART_SOL'] == 'YES':
-- TimeIterTreshold = -1
-- FluidSolver.setTemporalIteration(TimeIter)
-- if myid == self.rootProcess:
-- SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
-+ # --- Set some general variables for the unsteady computation --- #
-+ deltaT = FSI_config['UNST_TIMESTEP'] # physical time step
-+ totTime = FSI_config['UNST_TIME'] # physical simulation time
-+ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
-+ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
-+ TimeIterTreshold = 0 # time iteration from which we allow the solid to deform
-+
-+ if FSI_config['RESTART_SOL'] == 'YES':
-+ startTime = FSI_config['START_TIME']
-+ NbTimeIter = ((totTime)/deltaT)-1
-+ time = startTime
-+ TimeIter = FSI_config['RESTART_ITER']
-+ else:
-+ NbTimeIter = (totTime/deltaT)-1 # number of time iterations
-+ time = 0.0 # initial time
-+ TimeIter = 0 # initial time iteration
-+
-+ NbTimeIter = int(NbTimeIter) # be sure that NbTimeIter is an integer
-+
-+ varCoordNorm = 0.0 # FSI residual
-+ FSIConv = False # FSI convergence flag
-+
-+ self.MPIPrint('\n**********************************')
-+ self.MPIPrint('* Begin unsteady FSI computation *')
-+ self.MPIPrint('**********************************\n')
-+
-+ # --- Initialize the coupled solution --- #
-+ #If restart (DOES NOT WORK YET)
-+ if FSI_config['RESTART_SOL'] == 'YES':
-+ TimeIterTreshold = -1
-+ FluidSolver.setTemporalIteration(TimeIter)
-+ if myid == self.rootProcess:
-+ SolidSolver.outputDisplacements(FluidSolver.getInterRigidDispArray(), True)
-+ if self.have_MPI == True:
-+ self.comm.barrier()
-+ FluidSolver.setInitialMesh(True)
-+ if myid == self.rootProcess:
-+ SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
- if self.have_MPI == True:
-- self.comm.barrier()
-- FluidSolver.setInitialMesh(True)
-- if myid == self.rootProcess:
-- SolidSolver.displacementPredictor(FluidSolver.getInterRigidDispArray())
-- if self.have_MPI == True:
-- self.comm.barrier()
-- if myid == self.rootProcess:
-- SolidSolver.updateSolution()
-- #If no restart
-- else:
-- self.MPIPrint('Setting FSI initial conditions')
-+ self.comm.barrier()
-+ if myid == self.rootProcess:
-+ SolidSolver.updateSolution()
-+ #If no restart
-+ else:
-+ self.MPIPrint('Setting FSI initial conditions')
- if myid in self.solidSolverProcessors:
-- SolidSolver.setInitialDisplacements()
-+ SolidSolver.setInitialDisplacements()
- self.getSolidInterfaceDisplacement(SolidSolver)
-- self.interpolateSolidPositionOnFluidMesh(FSI_config)
-- self.setFluidInterfaceVarCoord(FluidSolver)
-- FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
-- self.MPIPrint('\nFSI initial conditions are set')
-- self.MPIPrint('Beginning time integration\n')
--
-- # --- External temporal loop --- #
-- while TimeIter <= NbTimeIter:
--
-- if TimeIter > TimeIterTreshold:
-- NbFSIIter = NbFSIIterMax
-- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
-- else:
-- NbFSIIter = 1
--
-- self.FSIIter = 0
-- FSIConv = False
-- FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
--
-- # --- Internal FSI loop --- #
-- while self.FSIIter <= (NbFSIIter-1):
-+ self.interpolateSolidPositionOnFluidMesh(FSI_config)
-+ self.setFluidInterfaceVarCoord(FluidSolver)
-+ FluidSolver.SetInitialMesh() # if there is an initial deformation in the solid, it has to be communicated to the fluid solver
-+ self.MPIPrint('\nFSI initial conditions are set')
-+ self.MPIPrint('Beginning time integration\n')
-+
-+ # --- External temporal loop --- #
-+ while TimeIter <= NbTimeIter:
-+
-+ if TimeIter > TimeIterTreshold:
-+ NbFSIIter = NbFSIIterMax
-+ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI on time iteration {} ***************'.format(TimeIter))
-+ else:
-+ NbFSIIter = 1
-+
-+ self.FSIIter = 0
-+ FSIConv = False
-+ FluidSolver.PreprocessExtIter(TimeIter) # set some parameters before temporal fluid iteration
-
-- self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
-+ # --- Internal FSI loop --- #
-+ while self.FSIIter <= (NbFSIIter-1):
-
-- # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
-- self.interpolateSolidPositionOnFluidMesh(FSI_config)
-+ self.MPIPrint("\n>>>> Time iteration {} / FSI iteration {} <<<<".format(TimeIter,self.FSIIter))
-+
-+ # --- Mesh morphing step (displacements interpolation, displacements communication, and mesh morpher call) --- #
-+ self.interpolateSolidPositionOnFluidMesh(FSI_config)
- self.MPIPrint('\nPerforming dynamic mesh deformation (ALE)...\n')
- self.setFluidInterfaceVarCoord(FluidSolver)
- FluidSolver.DynamicMeshUpdate(TimeIter)
--
-- # --- Fluid solver call for FSI subiteration --- #
-- self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
-+
-+ # --- Fluid solver call for FSI subiteration --- #
-+ self.MPIPrint('\nLaunching fluid solver for one single dual-time iteration...')
- self.MPIBarrier()
-- FluidSolver.ResetConvergence()
-- FluidSolver.Run()
-+ FluidSolver.ResetConvergence()
-+ FluidSolver.Run()
- self.MPIBarrier()
-
-- # --- Surface fluid loads interpolation and communication --- #
-- self.MPIPrint('\nProcessing interface fluid loads...\n')
-+ # --- Surface fluid loads interpolation and communication --- #
-+ self.MPIPrint('\nProcessing interface fluid loads...\n')
- self.MPIBarrier()
-- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
-+ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
- self.MPIBarrier()
-- if TimeIter > TimeIterTreshold:
-- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
-- self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
-+ if TimeIter > TimeIterTreshold:
-+ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
-+ self.setSolidInterfaceLoads(SolidSolver, FSI_config, time)
-
-- # --- Solid solver call for FSI subiteration --- #
-- self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
-+ # --- Solid solver call for FSI subiteration --- #
-+ self.MPIPrint('\nLaunching solid solver for a single time iteration...\n')
- if myid in self.solidSolverProcessors:
-- if FSI_config['CSD_SOLVER'] == 'NATIVE':
-- SolidSolver.timeIteration(time)
-- elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
-- SolidSolver.run(time-deltaT, time)
--
-- # --- Compute and monitor the FSI residual --- #
-- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
-- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
-- if varCoordNorm < FSITolerance:
-- FSIConv = True
-- break
-+ if FSI_config['CSD_SOLVER'] == 'NATIVE':
-+ SolidSolver.timeIteration(time)
-+ elif FSI_config['CSD_SOLVER'] == 'METAFOR' or FSI_config['CSD_SOLVER'] == 'GETDP' or FSI_config['CSD_SOLVER'] == 'TESTER':
-+ SolidSolver.run(time-deltaT, time)
-+
-+ # --- Compute and monitor the FSI residual --- #
-+ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
-+ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
-+ if varCoordNorm < FSITolerance:
-+ FSIConv = True
-+ break
-
-- # --- Relaxe the solid position --- #
-+ # --- Relaxe the solid position --- #
- self.MPIPrint('\nProcessing interface displacements...\n')
-- self.relaxSolidPosition(FSI_config)
--
-- self.FSIIter += 1
-- # --- End OF FSI loop --- #
-+ self.relaxSolidPosition(FSI_config)
-+
-+ self.FSIIter += 1
-+ # --- End OF FSI loop --- #
-
- self.MPIBarrier()
-
-- # --- Update the FSI history file --- #
-- if TimeIter > TimeIterTreshold:
-- self.MPIPrint('\nBGS is converged (strong coupling)')
-- self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
--
-- # --- Update, monitor and output the fluid solution before the next time step ---#
-- FluidSolver.Update()
-- FluidSolver.Monitor(TimeIter)
-- FluidSolver.Output(TimeIter)
--
-- if TimeIter >= TimeIterTreshold:
-- if myid in self.solidSolverProcessors:
-- # --- Output the solid solution before thr next time step --- #
-- SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
--
-- # --- Displacement predictor for the next time step and update of the solid solution --- #
-- self.MPIPrint('\nSolid displacement prediction for next time step')
-- self.displacementPredictor(FSI_config, SolidSolver, deltaT)
-+ # --- Update the FSI history file --- #
-+ if TimeIter > TimeIterTreshold:
-+ self.MPIPrint('\nBGS is converged (strong coupling)')
-+ self.writeFSIHistory(TimeIter, time, varCoordNorm, FSIConv)
-+
-+ # --- Update, monitor and output the fluid solution before the next time step ---#
-+ FluidSolver.Update()
-+ FluidSolver.Monitor(TimeIter)
-+ FluidSolver.Output(TimeIter)
-+
-+ if TimeIter >= TimeIterTreshold:
-+ if myid in self.solidSolverProcessors:
-+ # --- Output the solid solution before thr next time step --- #
-+ SolidSolver.writeSolution(time, self.FSIIter, TimeIter, NbTimeIter)
-+
-+ # --- Displacement predictor for the next time step and update of the solid solution --- #
-+ self.MPIPrint('\nSolid displacement prediction for next time step')
-+ self.displacementPredictor(FSI_config, SolidSolver, deltaT)
- if myid in self.solidSolverProcessors:
-- SolidSolver.updateSolution()
--
-- TimeIter += 1
-- time += deltaT
-- #--- End of the temporal loop --- #
-+ SolidSolver.updateSolution()
-+
-+ TimeIter += 1
-+ time += deltaT
-+ #--- End of the temporal loop --- #
-
- self.MPIBarrier()
-
-- self.MPIPrint('\n*************************')
-- self.MPIPrint('* End FSI computation *')
-- self.MPIPrint('*************************\n')
-+ self.MPIPrint('\n*************************')
-+ self.MPIPrint('* End FSI computation *')
-+ self.MPIPrint('*************************\n')
-
- def SteadyFSI(self, FSI_config,FluidSolver, SolidSolver):
-- """
-- Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
-- """
-+ """
-+ Runs the steady FSI computation by synchronizing the fluid and solid solver with data exchange at the f/s interface.
-+ """
-
- if self.have_MPI == True:
-- myid = self.comm.Get_rank()
-- numberPart = self.comm.Get_size()
-+ myid = self.comm.Get_rank()
-+ numberPart = self.comm.Get_size()
- else:
- myid = 0
- numberPart = 1
-
-- # --- Set some general variables for the steady computation --- #
-- NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
-- NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
-- FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
-- varCoordNorm = 0.0
--
-- self.MPIPrint('\n********************************')
-- self.MPIPrint('* Begin steady FSI computation *')
-- self.MPIPrint('********************************\n')
-- self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
-+ # --- Set some general variables for the steady computation --- #
-+ NbIter = FSI_config['NB_EXT_ITER'] # number of fluid iteration at each FSI step
-+ NbFSIIterMax = FSI_config['NB_FSI_ITER'] # maximum number of FSI iteration (for each time step)
-+ FSITolerance = FSI_config['FSI_TOLERANCE'] # f/s interface tolerance
-+ varCoordNorm = 0.0
-+
-+ self.MPIPrint('\n********************************')
-+ self.MPIPrint('* Begin steady FSI computation *')
-+ self.MPIPrint('********************************\n')
-+ self.MPIPrint('\n*************** Enter Block Gauss Seidel (BGS) method for strong coupling FSI ***************')
-
- self.getSolidInterfaceDisplacement(SolidSolver)
-
-- # --- External FSI loop --- #
-- self.FSIIter = 0
-- while self.FSIIter < NbFSIIterMax:
-- self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
-- self.MPIPrint('\nLaunching fluid solver for a steady computation...')
-- # --- Fluid solver call for FSI subiteration ---#
-- Iter = 0
-- FluidSolver.ResetConvergence()
-- while Iter < NbIter:
-- FluidSolver.PreprocessExtIter(Iter)
-- FluidSolver.Run()
-- StopIntegration = FluidSolver.Monitor(Iter)
-- FluidSolver.Output(Iter)
-- if StopIntegration:
-- break;
-- Iter += 1
--
-- # --- Surface fluid loads interpolation and communication ---#
-- self.MPIPrint('\nProcessing interface fluid loads...\n')
-+ # --- External FSI loop --- #
-+ self.FSIIter = 0
-+ while self.FSIIter < NbFSIIterMax:
-+ self.MPIPrint("\n>>>> FSI iteration {} <<<<".format(self.FSIIter))
-+ self.MPIPrint('\nLaunching fluid solver for a steady computation...')
-+ # --- Fluid solver call for FSI subiteration ---#
-+ Iter = 0
-+ FluidSolver.ResetConvergence()
-+ while Iter < NbIter:
-+ FluidSolver.PreprocessExtIter(Iter)
-+ FluidSolver.Run()
-+ StopIntegration = FluidSolver.Monitor(Iter)
-+ FluidSolver.Output(Iter)
-+ if StopIntegration:
-+ break;
-+ Iter += 1
-+
-+ # --- Surface fluid loads interpolation and communication ---#
-+ self.MPIPrint('\nProcessing interface fluid loads...\n')
- self.MPIBarrier()
-- self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
-+ self.getFluidInterfaceNodalForce(FSI_config, FluidSolver)
- self.MPIBarrier()
-- self.interpolateFluidLoadsOnSolidMesh(FSI_config)
-- self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
--
-- # --- Solid solver call for FSI subiteration --- #
-- self.MPIPrint('\nLaunching solid solver for a static computation...\n')
-+ self.interpolateFluidLoadsOnSolidMesh(FSI_config)
-+ self.setSolidInterfaceLoads(SolidSolver, FSI_config, 0.05)
-+
-+ # --- Solid solver call for FSI subiteration --- #
-+ self.MPIPrint('\nLaunching solid solver for a static computation...\n')
- if myid in self.solidSolverProcessors:
-- if FSI_config['CSD_SOLVER'] == 'NATIVE':
-- SolidSolver.staticComputation()
-+ if FSI_config['CSD_SOLVER'] == 'NATIVE':
-+ SolidSolver.staticComputation()
- else:
- SolidSolver.run(0.0, 0.05)
-- SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
-+ SolidSolver.writeSolution(0.0, self.FSIIter, Iter, NbIter)
-
-- # --- Compute and monitor the FSI residual --- #
-- varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
-- self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
-+ # --- Compute and monitor the FSI residual --- #
-+ varCoordNorm = self.computeSolidInterfaceResidual(SolidSolver)
-+ self.MPIPrint('\nFSI displacement norm : {}\n'.format(varCoordNorm))
- self.writeFSIHistory(0, 0.0, varCoordNorm, False)
-- if varCoordNorm < FSITolerance:
-- break
-+ if varCoordNorm < FSITolerance:
-+ break
-
- # --- Relaxe the solid displacement and update the solid solution --- #
- self.MPIPrint('\nProcessing interface displacements...\n')
-- self.relaxSolidPosition(FSI_config)
-+ self.relaxSolidPosition(FSI_config)
- if myid in self.solidSolverProcessors:
- SolidSolver.updateSolution()
--
-- # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
-- self.interpolateSolidPositionOnFluidMesh(FSI_config)
-- self.MPIPrint('\nPerforming static mesh deformation...\n')
-- self.setFluidInterfaceVarCoord(FluidSolver)
-- FluidSolver.StaticMeshUpdate()
-- self.FSIIter += 1
-+
-+ # --- Mesh morphing step (displacement interpolation, displacements communication, and mesh morpher call) --- #
-+ self.interpolateSolidPositionOnFluidMesh(FSI_config)
-+ self.MPIPrint('\nPerforming static mesh deformation...\n')
-+ self.setFluidInterfaceVarCoord(FluidSolver)
-+ FluidSolver.StaticMeshUpdate()
-+ self.FSIIter += 1
-
- self.MPIBarrier()
-
-- self.MPIPrint('\nBGS is converged (strong coupling)')
-- self.MPIPrint(' ')
-- self.MPIPrint('*************************')
-- self.MPIPrint('* End FSI computation *')
-- self.MPIPrint('*************************')
-- self.MPIPrint(' ')
-+ self.MPIPrint('\nBGS is converged (strong coupling)')
-+ self.MPIPrint(' ')
-+ self.MPIPrint('*************************')
-+ self.MPIPrint('* End FSI computation *')
-+ self.MPIPrint('*************************')
-+ self.MPIPrint(' ')
-diff -Naur old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py
---- old/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-01 19:09:18.000000000 +0300
-+++ new/SU2_PY/FSI/PitchPlungeAirfoilStructuralTester.py 2020-05-10 16:17:07.000000000 +0300
-@@ -174,9 +174,9 @@
-
- with open(self.Config_file) as configfile:
- while 1:
-- line = configfile.readline()
-- if not line:
-- break
-+ line = configfile.readline()
-+ if not line:
-+ break
-
- # remove line returns
- line = line.strip('\r\n')
-@@ -189,41 +189,41 @@
- this_value = line[1].strip()
-
- for case in switch(this_param):
-- #integer values
-- #if case("NB_FSI_ITER") :
-- #self.Config[this_param] = int(this_value)
-- #break
--
-- #float values
-- if case("DELTA_T") : pass
-- if case("START_TIME") : pass
-- if case("STOP_TIME") : pass
-- if case("SPRING_MASS") : pass
-- if case("INERTIA_FLEXURAL") : pass
-- if case("SPRING_STIFFNESS") : pass
-- if case("SPRING_DAMPING") : pass
-- if case("TORSIONAL_STIFFNESS") : pass
-- if case("TORSIONAL_DAMPING") : pass
-- if case("CORD") : pass
-- if case("FLEXURAL_AXIS") : pass
-- if case("GRAVITY_CENTER") : pass
-- if case("INITIAL_DISP") : pass
-- if case("INITIAL_ANGLE") : pass
-- if case("RHO") :
-- self.Config[this_param] = float(this_value)
-- break
--
-- #string values
-- if case("TIME_MARCHING") : pass
-- if case("MESH_FILE") : pass
-- if case("CSD_SOLVER") : pass
-- if case("MOVING_MARKER") : pass
-- if case("STRUCT_TYPE") :
-- self.Config[this_param] = this_value
-- break
-+ #integer values
-+ #if case("NB_FSI_ITER") :
-+ #self.Config[this_param] = int(this_value)
-+ #break
-+
-+ #float values
-+ if case("DELTA_T") : pass
-+ if case("START_TIME") : pass
-+ if case("STOP_TIME") : pass
-+ if case("SPRING_MASS") : pass
-+ if case("INERTIA_FLEXURAL") : pass
-+ if case("SPRING_STIFFNESS") : pass
-+ if case("SPRING_DAMPING") : pass
-+ if case("TORSIONAL_STIFFNESS") : pass
-+ if case("TORSIONAL_DAMPING") : pass
-+ if case("CORD") : pass
-+ if case("FLEXURAL_AXIS") : pass
-+ if case("GRAVITY_CENTER") : pass
-+ if case("INITIAL_DISP") : pass
-+ if case("INITIAL_ANGLE") : pass
-+ if case("RHO") :
-+ self.Config[this_param] = float(this_value)
-+ break
-+
-+ #string values
-+ if case("TIME_MARCHING") : pass
-+ if case("MESH_FILE") : pass
-+ if case("CSD_SOLVER") : pass
-+ if case("MOVING_MARKER") : pass
-+ if case("STRUCT_TYPE") :
-+ self.Config[this_param] = this_value
-+ break
-
-- if case():
-- print(this_param + " is an invalid option !")
-+ if case():
-+ print(this_param + " is an invalid option !")
- break
-
- def __readSU2Mesh(self):
-@@ -233,78 +233,78 @@
- print('Opened mesh file ' + self.Mesh_file + '.')
- while 1:
- line = meshfile.readline()
-- if not line:
-- break
-+ if not line:
-+ break
-
-- pos = line.find('NDIM')
-- if pos != -1:
-- line = line.strip('\r\n')
-+ pos = line.find('NDIM')
-+ if pos != -1:
-+ line = line.strip('\r\n')
- line = line.split("=",1)
-- self.nDim = int(line[1])
-- continue
--
-- pos = line.find('NELEM')
-- if pos != -1:
-- line = line.strip('\r\n')
-+ self.nDim = int(line[1])
-+ continue
-+
-+ pos = line.find('NELEM')
-+ if pos != -1:
-+ line = line.strip('\r\n')
- line = line.split("=",1)
-- self.nElem = int(line[1])
-- continue
-+ self.nElem = int(line[1])
-+ continue
-
-- pos = line.find('NPOIN')
-- if pos != -1:
-- line = line.strip('\r\n')
-+ pos = line.find('NPOIN')
-+ if pos != -1:
-+ line = line.strip('\r\n')
- line = line.split("=",1)
-- self.nPoint = int(line[1])
-+ self.nPoint = int(line[1])
- for iPoint in range(self.nPoint):
-- self.node.append(Point())
-- line = meshfile.readline()
-- line = line.strip('\r\n')
-- line = line.split(' ',self.nDim)
-- x = float(line[0])
-- y = float(line[1])
-+ self.node.append(Point())
-+ line = meshfile.readline()
-+ line = line.strip('\r\n')
-+ line = line.split(' ',self.nDim)
-+ x = float(line[0])
-+ y = float(line[1])
- z = 0.0
-- if self.nDim == 3:
-- z = float(line[2])
-- self.node[iPoint].SetCoord((x,y,z))
-+ if self.nDim == 3:
-+ z = float(line[2])
-+ self.node[iPoint].SetCoord((x,y,z))
- self.node[iPoint].SetCoord0((x,y,z))
-- self.node[iPoint].SetCoord_n((x,y,z))
-- continue
-+ self.node[iPoint].SetCoord_n((x,y,z))
-+ continue
-
-- pos = line.find('NMARK')
-- if pos != -1:
-- line = line.strip('\r\n')
-+ pos = line.find('NMARK')
-+ if pos != -1:
-+ line = line.strip('\r\n')
- line = line.split("=",1)
-- self.nMarker = int(line[1])
-- continue
-+ self.nMarker = int(line[1])
-+ continue
-
-- pos = line.find('MARKER_TAG')
-- if pos != -1:
-- line = line.strip('\r\n')
-- line = line.replace(" ", "")
-+ pos = line.find('MARKER_TAG')
-+ if pos != -1:
-+ line = line.strip('\r\n')
-+ line = line.replace(" ", "")
- line = line.split("=",1)
-- markerTag = line[1]
-- if markerTag == self.FSI_marker:
-- self.markers[markerTag] = []
-- line = meshfile.readline()
-- line = line.strip('\r\n')
-- line = line.split("=",1)
-- nElem = int(line[1])
-- for iElem in range(nElem):
-- line = meshfile.readline()
-- line = line.strip('\r\n')
-- line = line.split(' ',1)
-- elemType = int(line[0])
-- if elemType == 3:
-- nodes = line[1].split(' ', 1)
-- if not int(nodes[0]) in self.markers[markerTag]:
-- self.markers[markerTag].append(int(nodes[0]))
-- if not int(nodes[1]) in self.markers[markerTag]:
-- self.markers[markerTag].append(int(nodes[1]))
-- else:
-- print("Element type {} is not recognized !!".format(elemType))
-- continue
-- else:
-- continue
-+ markerTag = line[1]
-+ if markerTag == self.FSI_marker:
-+ self.markers[markerTag] = []
-+ line = meshfile.readline()
-+ line = line.strip('\r\n')
-+ line = line.split("=",1)
-+ nElem = int(line[1])
-+ for iElem in range(nElem):
-+ line = meshfile.readline()
-+ line = line.strip('\r\n')
-+ line = line.split(' ',1)
-+ elemType = int(line[0])
-+ if elemType == 3:
-+ nodes = line[1].split(' ', 1)
-+ if not int(nodes[0]) in self.markers[markerTag]:
-+ self.markers[markerTag].append(int(nodes[0]))
-+ if not int(nodes[1]) in self.markers[markerTag]:
-+ self.markers[markerTag].append(int(nodes[1]))
-+ else:
-+ print("Element type {} is not recognized !!".format(elemType))
-+ continue
-+ else:
-+ continue
-
- print("Number of dimensions: {}".format(self.nDim))
- print("Number of elements: {}".format(self.nElem))
-@@ -441,23 +441,23 @@
- Coord_n = self.node[iPoint].GetCoord_n()
-
- if self.Unsteady:
-- r = Coord_n - self.centerOfRotation_n
-- else:
-- r = Coord - self.centerOfRotation
-+ r = Coord_n - self.centerOfRotation_n
-+ else:
-+ r = Coord - self.centerOfRotation
-
-- rotCoord = rotMatrix.dot(r)
-+ rotCoord = rotMatrix.dot(r)
-
- newCoord = newCenter + rotCoord
- newVel[0] = Centerdot[0]+psidot*(newCoord[1]-newCenter[1])
-- newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
-- newVel[2] = Centerdot[2]+0.0
-+ newVel[1] = Centerdot[1]-psidot*(newCoord[0]-newCenter[0])
-+ newVel[2] = Centerdot[2]+0.0
-
- self.node[iPoint].SetCoord((newCoord[0], newCoord[1], newCoord[2]))
- self.node[iPoint].SetVel((newVel[0], newVel[1], newVel[2]))
-
-- if initialize:
-- self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
-- self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
-+ if initialize:
-+ self.node[iPoint].SetCoord_n((newCoord[0], newCoord[1], newCoord[2]))
-+ self.node[iPoint].SetVel_n((newVel[0], newVel[1], newVel[2]))
-
- self.centerOfRotation = np.copy(newCenter)
-
-diff -Naur old/SU2_PY/FSI/io/FSI_config.py new/SU2_PY/FSI/io/FSI_config.py
---- old/SU2_PY/FSI/io/FSI_config.py 2020-05-01 19:09:18.000000000 +0300
-+++ new/SU2_PY/FSI/io/FSI_config.py 2020-05-10 16:17:07.000000000 +0300
-@@ -58,23 +58,23 @@
- self.readConfig()
-
- def __str__(self):
-- tempString = str()
-- for key, value in self._ConfigContent.items():
-- tempString += "{} = {}\n".format(key,value)
-- return tempString
-+ tempString = str()
-+ for key, value in self._ConfigContent.items():
-+ tempString += "{} = {}\n".format(key,value)
-+ return tempString
-
- def __getitem__(self,key):
-- return self._ConfigContent[key]
-+ return self._ConfigContent[key]
-
- def __setitem__(self, key, value):
-- self._ConfigContent[key] = value
-+ self._ConfigContent[key] = value
-
- def readConfig(self):
- input_file = open(self.ConfigFileName)
- while 1:
-- line = input_file.readline()
-- if not line:
-- break
-+ line = input_file.readline()
-+ if not line:
-+ break
- # remove line returns
- line = line.strip('\r\n')
- # make sure it has useful data
-@@ -86,46 +86,46 @@
- this_value = line[1].strip()
-
- for case in switch(this_param):
-- #integer values
-- if case("NDIM") : pass
-- #if case("MESH_DEF_LIN_ITER") : pass
-- #if case("MESH_DEF_NONLIN_ITER") : pass
-- if case("RESTART_ITER") : pass
-- if case("NB_EXT_ITER") : pass
-- if case("NB_FSI_ITER") :
-- self._ConfigContent[this_param] = int(this_value)
-- break
-+ #integer values
-+ if case("NDIM") : pass
-+ #if case("MESH_DEF_LIN_ITER") : pass
-+ #if case("MESH_DEF_NONLIN_ITER") : pass
-+ if case("RESTART_ITER") : pass
-+ if case("NB_EXT_ITER") : pass
-+ if case("NB_FSI_ITER") :
-+ self._ConfigContent[this_param] = int(this_value)
-+ break
-
-- #float values
-+ #float values
- if case("RBF_RADIUS") : pass
-- if case("AITKEN_PARAM") : pass
-- if case("START_TIME") : pass
-- if case("UNST_TIMESTEP") : pass
-- if case("UNST_TIME") : pass
-- if case("FSI_TOLERANCE") :
-- self._ConfigContent[this_param] = float(this_value)
-- break
--
-- #string values
-- if case("CFD_CONFIG_FILE_NAME") : pass
-- if case("CSD_SOLVER") : pass
-- if case("CSD_CONFIG_FILE_NAME") : pass
-- if case("RESTART_SOL") : pass
-- if case("MATCHING_MESH") : pass
-+ if case("AITKEN_PARAM") : pass
-+ if case("START_TIME") : pass
-+ if case("UNST_TIMESTEP") : pass
-+ if case("UNST_TIME") : pass
-+ if case("FSI_TOLERANCE") :
-+ self._ConfigContent[this_param] = float(this_value)
-+ break
-+
-+ #string values
-+ if case("CFD_CONFIG_FILE_NAME") : pass
-+ if case("CSD_SOLVER") : pass
-+ if case("CSD_CONFIG_FILE_NAME") : pass
-+ if case("RESTART_SOL") : pass
-+ if case("MATCHING_MESH") : pass
- if case("MESH_INTERP_METHOD") : pass
-- if case("DISP_PRED") : pass
-- if case("AITKEN_RELAX") : pass
-- if case("TIME_MARCHING") : pass
-- if case("INTERNAL_FLOW") :
-- #if case("MESH_DEF_METHOD") : pass
-- self._ConfigContent[this_param] = this_value
-- break
--
-- if case():
-- print(this_param + " is an invalid option !")
-- break
-- #end for
--
-+ if case("DISP_PRED") : pass
-+ if case("AITKEN_RELAX") : pass
-+ if case("TIME_MARCHING") : pass
-+ if case("INTERNAL_FLOW") :
-+ #if case("MESH_DEF_METHOD") : pass
-+ self._ConfigContent[this_param] = this_value
-+ break
-+
-+ if case():
-+ print(this_param + " is an invalid option !")
-+ break
-+ #end for
-+
-
-
- #def dump()
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/
@ 2021-03-10 20:02 Sergey Torokhov
0 siblings, 0 replies; 6+ messages in thread
From: Sergey Torokhov @ 2021-03-10 20:02 UTC (permalink / raw
To: gentoo-commits
commit: 4f6a6e7e7832087550137e56580251d7e3f7ca58
Author: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
AuthorDate: Wed Mar 10 20:02:19 2021 +0000
Commit: Sergey Torokhov <torokhov-s-a <AT> yandex <DOT> ru>
CommitDate: Wed Mar 10 20:02:19 2021 +0000
URL: https://gitweb.gentoo.org/repo/proj/guru.git/commit/?id=4f6a6e7e
sci-physics/SU2: drop old 7.0.8
Signed-off-by: Sergey Torokhov <torokhov-s-a <AT> yandex.ru>
sci-physics/SU2/Manifest | 4 -
sci-physics/SU2/SU2-7.0.8-r1.ebuild | 115 --------------------------
sci-physics/SU2/files/SU2-7.0.8-fix-env.patch | 12 ---
3 files changed, 131 deletions(-)
diff --git a/sci-physics/SU2/Manifest b/sci-physics/SU2/Manifest
index c5fb9ec1..9fc17003 100644
--- a/sci-physics/SU2/Manifest
+++ b/sci-physics/SU2/Manifest
@@ -1,7 +1,3 @@
-DIST SU2-7.0.7-TestCases.tar.gz 448969006 BLAKE2B 6c886824b55d7f8516d2ea69e2f7bef36a40986f4f715da46d91f851eb59390329433c6941a280eca34ad675633b2f1b01a897d1db8d177a5c4f770b286d0625 SHA512 0884b4f750dbcfd3f2cb0e71d6005932e4edd90a50fa84eb484f6c0c523930ddebfb3ed4315161b8fdeff911a52fa72b6d79739c8e19cd634b9823e007520213
-DIST SU2-7.0.7-Tutorials.tar.gz 64282235 BLAKE2B 7a6b780ee6f01d26a7a7d4751ca39798af56cfd7b99ca3e13fdff61aecd631a3aa4c98a487f48a8b2593c711ee25bd1ddc90a316bde2c287e95a383321f1d5e9 SHA512 189b5da96f08689b62ba3c42ee349edd2e145f371112895587e53497f16de3d6fdbf17308af39961775d76e3169c40872ced8e267146b6da5ae12d31a4c70fa9
-DIST SU2-7.0.8-fix-python-optimize.patch 104379 BLAKE2B e2cb9f58af5f600f25d01a082b55a344d3b05c455d44cfcb3370fc77546b6821d616e80f577b7af4b19ad519f086495eb6f6bde1f1712451ac477d6dd538fa0e SHA512 f21ed5fc1e61b78f3858a162a899c552777143118f05db009247c6224b06b922b6280de5725726afe21a3283a18ad9c38fc1c65dadb709d440e10cf3fe754d8c
-DIST SU2-7.0.8.tar.gz 20554206 BLAKE2B ac291eeae696197f5875839defa98bde550c5250b5a5cff8049e0d1c8184c5ec6d225b77618d079a8fb3d5a7f4d0531a0b2931fd15e045299fa55c7dd03d9fd1 SHA512 26ed8c39d2dfcdb3fc4e1c986c64f683f44cd4123ce5f7a482c4557a82b04b4bd8ef1f04332ab36ad0f987cfb98053fb8b30ef81faf51c7c56aebc1a30467df9
DIST SU2-7.1.0-TestCases.tar.gz 449701778 BLAKE2B 8a7cb400d330b4bbf62023525e6009a8c05af5a375b7479087b3b579d7f3162a11091f09294381fcd11d7bc358e77f10f69efc0999a304135f8850e3fe005000 SHA512 a0e354b073b4cfaeb47a9bd33ac66a636b12a065ca7caff4bd50c54e2b191809fd463f980568e0b7d4d559714d2dca99fe9a3e5854aa345be24010cd1239e772
DIST SU2-7.1.0-Tutorials.tar.gz 66570840 BLAKE2B fdbc23503abd7a250464ca8da52ab87f67069d2a3fca130038c1ea83c3b18aa476fed3a21f725b142a97923b101d7906a720f6dff28341a13915ed6216660d7f SHA512 7843a7ed5b3c606ea9821745390c70bbfd3cb66a17494360b4139e8735a8e83546acd0912462b7fc3b63ff9f2097610294c1c9af25d104962f727f599cdb8ae9
DIST SU2-7.1.0.tar.gz 20542933 BLAKE2B e6e6d82a990bf4043d1c6c58a5fe7a100a923b2c6ae6a29f7bb969e02279eeb5587f7930b2372cf67e26f451c598cf81e2515eeb7e516d6ab060ac7e986a0110 SHA512 309b4e06c5fc084ea2a06797ae400392b924c3654d354170c3159327c9e022081745bee28b1537ed3ad4e7c1333afb8c38a1ca033ff28662b8d5f5badfdc8465
diff --git a/sci-physics/SU2/SU2-7.0.8-r1.ebuild b/sci-physics/SU2/SU2-7.0.8-r1.ebuild
deleted file mode 100644
index f7e5b83f..00000000
--- a/sci-physics/SU2/SU2-7.0.8-r1.ebuild
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright 1999-2020 Gentoo Authors
-# Distributed under the terms of the GNU General Public License v2
-
-EAPI=7
-
-PYTHON_COMPAT=( python3_{7,8,9} )
-
-inherit meson python-single-r1
-
-DESCRIPTION="SU2: An Open-Source Suite for Multiphysics Simulation and Design"
-HOMEPAGE="https://su2code.github.io/"
-SRC_URI="
- https://github.com/su2code/SU2/archive/v${PV}.tar.gz -> ${P}.tar.gz
- https://github.com/band-a-prend/gentoo-overlay/raw/master/sci-physics/${PN}/files/${P}-fix-python-optimize.patch
- test? ( https://github.com/su2code/TestCases/archive/v7.0.7.tar.gz -> ${PN}-7.0.7-TestCases.tar.gz )
- tutorials? ( https://github.com/su2code/Tutorials/archive/v7.0.7.tar.gz -> ${PN}-7.0.7-Tutorials.tar.gz )
-"
-
-LICENSE="LGPL-2.1"
-SLOT="0"
-KEYWORDS="~amd64"
-
-# cgns, metis, parmetis are bundled;
-# omp is disable as it's experimental;
-# pastix is disabled as it's try to find bundled libs;
-IUSE="cgns -mkl +mpi openblas tecio test tutorials"
-RESTRICT="!test? ( test )"
-REQUIRED_USE="
- ${PYTHON_REQUIRED_USE}
- mkl? ( !openblas )
-"
-
-RDEPEND="
- ${PYTHON_DEPS}
- mpi? ( virtual/mpi[cxx] )
- mkl? ( sci-libs/mkl )
- openblas? ( sci-libs/openblas )
-"
-DEPEND="
- ${RDEPEND}
- tecio? ( dev-libs/boost:= )
-"
-BDEPEND="virtual/pkgconfig"
-
-PATCHES=(
- "${FILESDIR}/${PN}-7.0.4-unbundle_boost.patch"
- "${FILESDIR}/${P}-fix-env.patch"
- "${DISTDIR}/${P}-fix-python-optimize.patch"
-)
-
-DOCS=( "README.md" "SU2_PY/documentation.txt" )
-
-src_unpack() {
- unpack "${P}.tar.gz"
- if use test ; then
- einfo "Unpacking ${PN}-7.0.7-TestCases.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}/TestCases"
- tar -C "${P}"/TestCases --strip-components=1 -xzf "${DISTDIR}/${PN}-7.0.7-TestCases.tar.gz" || die
- fi
- if use tutorials ; then
- einfo "Unpacking ${PN}-7.0.7-Tutorials.tar.gz to /var/tmp/portage/sci-physics/${P}/work/${P}"
- mkdir "${P}"/Tutorials || die
- tar -C "${P}"/Tutorials --strip-components=1 -xzf "${DISTDIR}/${PN}-7.0.7-Tutorials.tar.gz" || die
- fi
-}
-
-src_configure() {
- local emesonargs=(
- -Denable-autodiff=false
- -Denable-directdiff=false
- -Denable-pastix=false
- -Denable-pywrapper=false
- -Dwith-omp=false
- $(meson_feature mpi with-mpi)
- $(meson_use cgns enable-cgns)
- $(meson_use mkl enable-mkl)
- $(meson_use openblas enable-openblas)
- $(meson_use tecio enable-tecio)
- $(meson_use test enable-tests)
- )
- meson_src_configure
-}
-
-src_test() {
- ln -s ../../${P}-build/SU2_CFD/src/SU2_CFD SU2_PY/SU2_CFD
- ln -s ../../${P}-build/SU2_DEF/src/SU2_DEF SU2_PY/SU2_DEF
- ln -s ../../${P}-build/SU2_DOT/src/SU2_DOT SU2_PY/SU2_DOT
- ln -s ../../${P}-build/SU2_GEO/src/SU2_GEO SU2_PY/SU2_GEO
- ln -s ../../${P}-build/SU2_SOL/src/SU2_SOL SU2_PY/SU2_SOL
-
- export SU2_RUN="${S}/SU2_PY"
- export SU2_HOME="${S}"
- export PATH=$PATH:$SU2_RUN
- export PYTHONPATH=$PYTHONPATH:$SU2_RUN
-
- einfo "Running UnitTests ..."
- ../${P}-build/UnitTests/test_driver
-
- pushd TestCases/
- use mpi && python parallel_regression.py
- use mpi || python serial_regression.py
- use tutorials && use mpi && python tutorials.py
- popd
-}
-
-src_install() {
- meson_src_install
- mkdir -p "${ED}$(python_get_sitedir)" || die
- mv "${ED}"/usr/bin/{FSI,SU2,*.py} -t "${ED}$(python_get_sitedir)" || die
- python_optimize "${D}/$(python_get_sitedir)"
-
- if use tutorials ; then
- insinto "/usr/share/${P}"
- doins -r Tutorials
- fi
-}
diff --git a/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch b/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch
deleted file mode 100644
index baf7955d..00000000
--- a/sci-physics/SU2/files/SU2-7.0.8-fix-env.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -Naur old_env/UnitTests/meson.build new_env/UnitTests/meson.build
---- old_env/UnitTests/meson.build 2020-06-15 17:03:43.000000000 +0300
-+++ new_env/UnitTests/meson.build 2020-06-15 17:04:35.000000000 +0300
-@@ -26,7 +26,7 @@
- test_driver = executable(
- 'test_driver',
- unit_test_files,
-- install : true,
-+ install : false,
- dependencies : [su2_cfd_dep, common_dep, su2_deps, catch2_dep],
- cpp_args: ['-fPIC', default_warning_flags, su2_cpp_args]
- )
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2021-03-10 20:02 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-05-11 23:04 [gentoo-commits] repo/proj/guru:dev commit in: sci-physics/SU2/, sci-physics/SU2/files/ Sergey Torokhov
-- strict thread matches above, loose matches on Subject: below --
2020-06-16 14:34 Sergey Torokhov
2020-10-10 0:29 Sergey Torokhov
2020-12-09 23:40 Sergey Torokhov
2021-01-05 15:21 Andrew Ammerlaan
2021-03-10 20:02 Sergey Torokhov
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox