From bbced1533982f46f4e3ab6ca9e9f8b4c2d8577d5 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 22:06:20 -0800 Subject: [PATCH 1/7] build(fpm): update to Julienne 3.6.0 This commit adopts a newer version of the Julienne dependency for a fix in reporting test execution times when compiling with LLVM flang. --- fpm.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fpm.toml b/fpm.toml index b79772c6..a4cb6874 100644 --- a/fpm.toml +++ b/fpm.toml @@ -4,7 +4,7 @@ name = "MOLE" armadillo-code = {git = "https://gitlab.com/rouson/armadillo-code.git", tag = "fpm"} [dev-dependencies] -julienne = {git = "https://github.com/berkeleylab/julienne.git", tag = "3.3.0"} +julienne = {git = "https://github.com/berkeleylab/julienne.git", tag = "3.6.0"} [install] library = true From 5a03c9b47c0c475e306877273062d7dd0a505598 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 22:33:22 -0800 Subject: [PATCH 2/7] feat(quadrature): Ext Gauss Div, compiler workards Given properly initialized `vector_1D_t v` and `scalar_1D_t f` objects, this commit adds types, type-bound functions, and operators supporting the evaluation of the following residual formed from the extended Gauss Divergence Theorem of Castillo & Miranda (2013): ```fortran associate(dV => f%dV, dA => v%dA()) residual = .SSS. (v .dot. .grad. f)*dV + .SSS. (f * .div. v)*dV - .SS. (f .x. (v .dot. dA)) end associate ``` where `.SS.` and `.SSS.` represent double and triple integrals over the problem domain and its bounding surface, respectively. feat(tensors_1D_t): add dV, dA calculators This commit 1. Adds a dV generic binding to tensor_1D_t for computing differential volumes for volume integration. In 1D, volume integrals are calculated on a per-unit-of-normal- surface-area basis so dV = dx*dy*dz = dx(1)(1). 2. Adds a dA procedure binding to vector_1D_t for computing surface-area differentials for surface integrals involving the dot product of a vector with a surface-normal. In 1D, surface integrals are calculated on a per-unit-of-normal- surface basis so dA = dy*dz = 1. build(ifx): work around ifx bugs 1. Symptom: crashing surface-integral unit test at grid resolutions above ~413 points. Fix: when ifx is detected, reduce grid points to 400 and slightly increase the error tolerance. 2. Symptom: test hangs if the `gradient_1D_t` type has a `weights` generic binding associated with the grandparent `tensor_1D_t` type's `gradient_1D_weights` binding. Fix: when ifx is detected, move the generic binding up to the parent `vector_1D_t` type. build(gfortran): work around gfortran issues 1. To eliminate gfortran 13-14 internal compiler errors on Ubuntu in CI, replace two `weights` generic binding invocations with invocations of the corresponding type-bound functions: `gradient_1D_weights` and `divergence_1D_weights`. 2. To work around missing featuers in gfortran 13-14, remove do-concurrent type-specs and locality specifiers when gfortran is detected. --- src/fortran/divergence_1D_s.F90 | 44 +++++- src/fortran/divergence_operator_1D_s.F90 | 2 +- src/fortran/gradient_1D_s.F90 | 60 ++++++++ src/fortran/mimetic_operators_1D_m.F90 | 3 +- src/fortran/mole_m.f90 | 17 ++- src/fortran/scalar_1D_s.F90 | 32 ++++- src/fortran/scalar_x_divergence_1D_s.F90 | 14 ++ src/fortran/tensor_1D_s.f90 | 5 + src/fortran/tensors_1D_m.F90 | 168 ++++++++++++++++++++--- src/fortran/vector_1D_s.F90 | 125 ++++++++++++++++- src/fortran/vector_dot_gradient_1D_s.F90 | 14 ++ src/fortran/weighted_product_1D_s.F90 | 10 ++ 12 files changed, 457 insertions(+), 37 deletions(-) create mode 100644 src/fortran/gradient_1D_s.F90 create mode 100644 src/fortran/scalar_x_divergence_1D_s.F90 create mode 100644 src/fortran/vector_dot_gradient_1D_s.F90 create mode 100644 src/fortran/weighted_product_1D_s.F90 diff --git a/src/fortran/divergence_1D_s.F90 b/src/fortran/divergence_1D_s.F90 index 254948b2..1c3e762a 100644 --- a/src/fortran/divergence_1D_s.F90 +++ b/src/fortran/divergence_1D_s.F90 @@ -1,4 +1,10 @@ +#include "julienne-assert-macros.h" + submodule(tensors_1D_m) divergence_1D_s + use julienne_m, only : & + call_julienne_assert_ & + ,operator(.equalsExpected.) & + ,operator(.isAtLeast.) implicit none contains @@ -18,8 +24,20 @@ pure function cell_center_locations(x_min, x_max, cells) result(x) #endif - module procedure construct_from_tensor - divergence_1D%tensor_1D_t = tensor_1D + module procedure premultiply_scalar_1D + call_julienne_assert(size(scalar_1D%values_) .equalsExpected. size(divergence_1D%values_) + 2) + scalar_x_divergence_1D%tensor_1D_t = & + tensor_1D_t(scalar_1D%values_(2:size(scalar_1D%values_)-1) * divergence_1D%values_, scalar_1D%x_min_, scalar_1D%x_max_, scalar_1D%cells_, scalar_1D%order_) +#ifndef __GFORTRAN__ + scalar_x_divergence_1D%weights_ = divergence_1D%weights() +#else + scalar_x_divergence_1D%weights_ = divergence_1D%divergence_1D_weights() +#endif + call_julienne_assert(size(scalar_x_divergence_1D%weights_) .equalsExpected. size(divergence_1D%values_)+2) + end procedure + + module procedure postmultiply_scalar_1D + scalar_x_divergence_1D = premultiply_scalar_1D(scalar_1D, divergence_1D) end procedure module procedure divergence_1D_values @@ -30,4 +48,26 @@ pure function cell_center_locations(x_min, x_max, cells) result(x) cell_centers = cell_center_locations(self%x_min_, self%x_max_, self%cells_) end procedure + module procedure divergence_1D_weights + integer c + + double precision, allocatable :: skin(:) + + select case(self%order_) + case(2) + skin = [double precision::] + case(4) + skin = [1D0, 2186/1943D0, 1992/2651D0, 1993/1715D0, 649/674D0, 699/700D0, 18170/18171D0, 471744/471745D0] + case default + error stop "unsupported order" + end select + + associate(depth => size(skin)) + weights = [skin, [(1D0, c = depth+1, self%cells_+2-depth)], skin(depth:1:-1) ] ! m+2 values, where m = self%cells_ + end associate ! cf. Corbino & Castillo (2020) Eqs. 14-15 & 19 + + call_julienne_assert(self%cells_ .isAtLeast. 2*size(skin)) + call_julienne_assert(size(weights) .equalsExpected. self%cells_+2) + end procedure + end submodule divergence_1D_s \ No newline at end of file diff --git a/src/fortran/divergence_operator_1D_s.F90 b/src/fortran/divergence_operator_1D_s.F90 index ba019d9e..b7aeaf3b 100644 --- a/src/fortran/divergence_operator_1D_s.F90 +++ b/src/fortran/divergence_operator_1D_s.F90 @@ -164,7 +164,7 @@ pure function M(k, dx) result(row) block integer col do concurrent(col=1:cols) - D(:,col) = divergence_matrix_multiply(self, e(dir=col, length=rows)) + D(:,col) = divergence_matrix_multiply(self, e(dir=col, length=cols)) ! work around gfortran 13-14 on Ubuntu (https://github.com/rouson/mole/actions/runs/19996603010/job/57345130923) end do end block diff --git a/src/fortran/gradient_1D_s.F90 b/src/fortran/gradient_1D_s.F90 new file mode 100644 index 00000000..f984ba13 --- /dev/null +++ b/src/fortran/gradient_1D_s.F90 @@ -0,0 +1,60 @@ +#include "julienne-assert-macros.h" + +submodule(tensors_1D_m) gradient_1D_s + use julienne_m, only : & + call_julienne_assert_ & + ,operator(.approximates.) & + ,operator(.equalsExpected.) & + ,operator(.isAtLeast.) & + ,operator(.within.) + implicit none + + double precision, parameter :: double_equivalence = 1D-15 + +contains + + module procedure gradient_1D_weights + + integer face + double precision, allocatable :: skin(:) + + select case(self%order_) + case(2) + skin = [3/8D0, 9/8D0] + case(4) + skin = [227/641D0, 941/766D0, 811/903D0, 1373/1348D0, 1401/1400D0, 36343/36342D0, 943491/943490D0] + case default + error stop "unsupported order" + end select + + associate(depth => size(skin)) + call_julienne_assert(self%cells_ .isAtLeast. 2*depth) + weights = [skin, [(1D0, face = 1, self%cells_ + 1 - 2*depth)], skin(depth:1:-1) ] + end associate + + call_julienne_assert(size(weights) .equalsExpected. self%cells_ + 1) + + end procedure + + module procedure dot + call_julienne_assert(size(gradient_1D%values_) .equalsExpected. size(vector_1D%values_)) + call_julienne_assert(gradient_1D%order_ .equalsExpected. vector_1D%order_) + call_julienne_assert(gradient_1D%cells_ .equalsExpected. vector_1D%cells_) + call_julienne_assert(gradient_1D%x_min_ .approximates. vector_1D%x_min_ .within. double_equivalence) + call_julienne_assert(gradient_1D%x_max_ .approximates. vector_1D%x_max_ .within. double_equivalence) + + vector_dot_gradient_1D%tensor_1D_t = tensor_1D_t( & + values = gradient_1D%values_ * vector_1D%values_ & + ,x_min = gradient_1D%x_min_ & + ,x_max = gradient_1D%x_max_ & + ,cells = gradient_1D%cells_ & + ,order = gradient_1D%order_ & + ) +#ifndef __GFORTRAN__ + vector_dot_gradient_1D%weights_ = gradient_1D%weights() +#else + vector_dot_gradient_1D%weights_ = gradient_1D%gradient_1D_weights() +#endif + end procedure + +end submodule gradient_1D_s diff --git a/src/fortran/mimetic_operators_1D_m.F90 b/src/fortran/mimetic_operators_1D_m.F90 index 279be943..47f9ddd2 100644 --- a/src/fortran/mimetic_operators_1D_m.F90 +++ b/src/fortran/mimetic_operators_1D_m.F90 @@ -1,7 +1,8 @@ #include "mole-language-support.F90" module mimetic_operators_1D_m - !! Define 1D scalar and vector abstractions and associated mimetic gradient and divergence operators. + !! Define sparse matrix storage formats and operators tailored to the one-dimensional (1D) mimetic discretizations + !! detaild by Corbino & Castillo (2020) https://doi.org/10.1016/j.cam.2019.06.042. use julienne_m, only : file_t implicit none diff --git a/src/fortran/mole_m.f90 b/src/fortran/mole_m.f90 index 1eda112a..16ae7a19 100644 --- a/src/fortran/mole_m.f90 +++ b/src/fortran/mole_m.f90 @@ -1,13 +1,16 @@ module mole_m - !! All public MOLE Fortran entities: + !! This module contains all public MOLE Fortran entities. For descriptions of the public procedures bound to the derived types + !! below, see the interface bodies in the corresponding module (e.g., tensors_1D_m). Please see the programs in the `example` + !! subdirectory for demonstrations of how to use the entities in this module. use tensors_1D_m, only : & - scalar_1D_t & ! discrete 1D scalar abstraction supporting mimetic gradient (.grad.) and Laplacian (.laplacian.) operators - ,vector_1D_t & ! discrete 1D vector abstraction supporting a mimetic divergence (.div.). operator - ,divergence_1D_t & ! result of applying the unary .div. operator to a vector_1D_t operand - ,laplacian_1D_t & ! result of applying the unary .laplacian. operator to a scalar_1D_t operand - ,scalar_1D_initializer_i & ! abstract interface for a scalar_1D_t initialization function - ,vector_1D_initializer_i ! abstract interface for a vector_1D_t initialization function + scalar_1D_t & ! discrete 1D scalar field abstraction + ,vector_1D_t & ! discrete 1D vector field abstraction + ,gradient_1D_t & ! result of an expression such as `.grad. s` for a scalar_2D_t s + ,divergence_1D_t & ! result of an expression such as `.div. v` for a vector_1D_t v + ,laplacian_1D_t & ! result of an expression such as `.laplacian. s` for a scalar_1D_t s + ,scalar_1D_initializer_i & ! abstract interface for a scalar_1D_t initialization function + ,vector_1D_initializer_i ! abstract interface for a vector_1D_t initialization function implicit none diff --git a/src/fortran/scalar_1D_s.F90 b/src/fortran/scalar_1D_s.F90 index 9eae4be5..19247138 100644 --- a/src/fortran/scalar_1D_s.F90 +++ b/src/fortran/scalar_1D_s.F90 @@ -1,7 +1,18 @@ #include "julienne-assert-macros.h" submodule(tensors_1D_m) scalar_1D_s - use julienne_m, only : call_julienne_assert_, operator(.greaterThan.), operator(.isAtLeast.) + use julienne_m, only : & + call_julienne_assert_ & + ,julienne_assert & + ,operator(//) & + ,operator(.all.) & + ,operator(.approximates.) & + ,operator(.equalsExpected.) & + ,operator(.csv.) & + ,operator(.isAtLeast.) & + ,operator(.greaterThan.) & + ,operator(.within.) & + ,string_t implicit none contains @@ -51,12 +62,21 @@ pure function cell_center_locations(x_min, x_max, cells) result(x) #endif - module procedure grad - gradient_1D = vector_1D_t( & - tensor_1D_t(self%gradient_operator_1D_ .x. self%values_, self%x_min_, self%x_max_, self%cells_, self%order_) & - ,divergence_operator_1D_t(self%order_, (self%x_max_-self%x_min_)/self%cells_, self%cells_) & - ) + + integer c + + associate(dx => (self%x_max_ - self%x_min_)/self%cells_) + associate(G => gradient_operator_1D_t(self%order_, dx, self%cells_)) + gradient_1D%tensor_1D_t = tensor_1D_t(G .x. self%values_, self%x_min_, self%x_max_, cells=self%cells_, order=self%order_) + gradient_1D%divergence_operator_1D_ = divergence_operator_1D_t(self%order_, dx, self%cells_) + check_corbino_castillo_eq_17: & + associate(p => gradient_1D%weights(), b => [-1D0, [(0D0, c = 1, self%cells_)], 1D0]) + call_julienne_assert((.all. (matmul(transpose(G%assemble()), p) .approximates. b/dx .within. 2D-3))) + end associate check_corbino_castillo_eq_17 + end associate + end associate + end procedure module procedure laplacian diff --git a/src/fortran/scalar_x_divergence_1D_s.F90 b/src/fortran/scalar_x_divergence_1D_s.F90 new file mode 100644 index 00000000..5ac431e2 --- /dev/null +++ b/src/fortran/scalar_x_divergence_1D_s.F90 @@ -0,0 +1,14 @@ +#include "julienne-assert-macros.h" + +submodule(tensors_1D_m) scalar_x_divergence_1D_s + use julienne_m, only : call_julienne_assert_, operator(.equalsExpected.) + implicit none + +contains + + module procedure volume_integrate_scalar_x_divergence_1D + call_julienne_assert(size(integrand%weights_ ) .equalsExpected. size(integrand%values_)+2) + integral = sum(integrand%weights_ * [0D0, integrand%values_, 0D0]) + end procedure + +end submodule scalar_x_divergence_1D_s diff --git a/src/fortran/tensor_1D_s.f90 b/src/fortran/tensor_1D_s.f90 index 15630a1a..08de9329 100644 --- a/src/fortran/tensor_1D_s.f90 +++ b/src/fortran/tensor_1D_s.f90 @@ -10,4 +10,9 @@ tensor_1D%order_ = order end procedure + module procedure dx + dx = (self%x_max_ - self%x_min_)/self%cells_ + end procedure + + end submodule tensor_1D_s diff --git a/src/fortran/tensors_1D_m.F90 b/src/fortran/tensors_1D_m.F90 index d9e76219..f0af63e6 100644 --- a/src/fortran/tensors_1D_m.F90 +++ b/src/fortran/tensors_1D_m.F90 @@ -2,7 +2,8 @@ module tensors_1D_m !! Define public 1D scalar and vector abstractions and associated mimetic gradient, - !! divergence, and Laplacian operators. + !! divergence, and Laplacian operators as detailed by Corbino & Castillo (2020) + !! https://doi.org/10.1016/j.cam.2019.06.042. use julienne_m, only : file_t use mimetic_operators_1D_m, only : divergence_operator_1D_t, gradient_operator_1D_t @@ -12,6 +13,7 @@ module tensors_1D_m public :: scalar_1D_t public :: vector_1D_t + public :: gradient_1D_t public :: laplacian_1D_t public :: divergence_1D_t public :: scalar_1D_initializer_i @@ -45,6 +47,11 @@ pure function vector_1D_initializer_i(x) result(v) integer cells_ !! number of grid cells spanning the domain integer order_ !! order of accuracy of mimetic discretization double precision, allocatable :: values_(:) !! tensor components at spatial locations + contains + procedure, non_overridable, private :: gradient_1D_weights + procedure, non_overridable, private :: divergence_1D_weights + generic :: dV => dx + procedure, non_overridable :: dx end type interface tensor_1D_t @@ -93,18 +100,32 @@ pure module function construct_1D_scalar_from_function(initializer, order, cells end interface type, extends(tensor_1D_t) :: vector_1D_t - !! Encapsulate 1D vector values at cell faces (nodes in 1D) and corresponding operators + !! Encapsulate 1D vector values at cell faces (of unit area for 1D) and corresponding operators private type(divergence_operator_1D_t) divergence_operator_1D_ contains + generic :: operator(.x.) => weighted_premultiply generic :: operator(.div.) => div + generic :: operator(.dot.) => dot_surface_normal generic :: grid => vector_1D_grid generic :: values => vector_1D_values +#ifdef __INTEL_COMPILER + generic :: weights => gradient_1D_weights +#endif + procedure, non_overridable :: dA + procedure, non_overridable, pass(vector_1D) :: weighted_premultiply procedure, non_overridable, private :: div + procedure, non_overridable, private :: dot_surface_normal procedure, non_overridable, private :: vector_1D_grid procedure, non_overridable, private :: vector_1D_values end type + type, extends(tensor_1D_t) :: weighted_product_1D_t + contains + generic :: operator(.SS.) => surface_integrate_vector_x_scalar_1D + procedure, non_overridable, private :: surface_integrate_vector_x_scalar_1D + end type + interface vector_1D_t pure module function construct_1D_vector_from_function(initializer, order, cells, x_min, x_max) result(vector_1D) @@ -128,25 +149,46 @@ pure module function construct_from_components(tensor_1D, divergence_operator_1D end interface + type, extends(vector_1D_t) :: gradient_1D_t + !! A 1D mimetic gradient vector field abstraction with a public method that produces corresponding numerical quadrature weights + contains + generic :: operator(.dot.) => dot +#ifndef __INTEL_COMPILER + generic :: weights => gradient_1D_weights +#endif + procedure, non_overridable, private, pass(gradient_1D) :: dot + end type + + type, extends(tensor_1D_t) :: vector_dot_gradient_1D_t + !! Result is the dot product of a 1D vector field and a 1D gradient field + private + double precision, allocatable :: weights_(:) + contains + generic :: operator(.SSS.) => volume_integrate_vector_dot_grad_scalar_1D + procedure, non_overridable, private, pass(integrand) ::volume_integrate_vector_dot_grad_scalar_1D + end type + type, extends(tensor_1D_t) :: divergence_1D_t !! Encapsulate divergences at cell centers contains generic :: grid => divergence_1D_grid generic :: values => divergence_1D_values + generic :: weights => divergence_1D_weights + generic :: operator(*) => premultiply_scalar_1D, postmultiply_scalar_1D + procedure, non_overridable, private, pass(divergence_1D) :: premultiply_scalar_1D + procedure, non_overridable, private :: postmultiply_scalar_1D procedure, non_overridable, private :: divergence_1D_values procedure, non_overridable, private :: divergence_1D_grid end type - interface divergence_1D_t - - pure module function construct_from_tensor(tensor_1D) result(divergence_1D) - !! Result is a 1D divergence with the provided parent component - implicit none - type(tensor_1D_t), intent(in) :: tensor_1D - type(divergence_1D_t) divergence_1D - end function - - end interface + type, extends(tensor_1D_t) :: scalar_x_divergence_1D_t + !! product of a 1D scalar field and a 1D divergence field + private + double precision, allocatable :: weights_(:) + contains + generic :: operator(.SSS.) => volume_integrate_scalar_x_divergence_1D + procedure, non_overridable, private, pass(integrand) :: volume_integrate_scalar_x_divergence_1D + end type type, extends(divergence_1D_t) :: laplacian_1D_t private @@ -157,6 +199,21 @@ pure module function construct_from_tensor(tensor_1D) result(divergence_1D) interface + pure module function dA(self) + !! Result is the grid's discrete surface-area differential for use in surface integrals of the form + !! .SS. (f .x. (v .dot. dA)) + implicit none + class(vector_1D_t), intent(in) :: self + double precision dA + end function + + pure module function dx(self) + !! Result is the uniform cell width + implicit none + class(tensor_1D_t), intent(in) :: self + double precision dx + end function + pure module function scalar_1D_grid(self) result(cell_centers_extended) !! Result is the array of locations at which 1D scalars are defined: cell centers agumented by spatial boundaries implicit none @@ -165,7 +222,7 @@ pure module function scalar_1D_grid(self) result(cell_centers_extended) end function pure module function vector_1D_grid(self) result(cell_faces) - !! Result is the array of cell face locations (nodes in 1D) at which 1D vectors are defined + !! Result is the array of cell face locations (of unit area for 1D) at which 1D vectors are defined implicit none class(vector_1D_t), intent(in) :: self double precision, allocatable :: cell_faces(:) @@ -186,7 +243,7 @@ pure module function scalar_1D_values(self) result(cell_centers_extended_values) end function pure module function vector_1D_values(self) result(face_centered_values) - !! Result is an array of the 1D vector values at cell faces (nodes in 1D) + !! Result is an array of the 1D vector values at cell faces (of unit area 1D) implicit none class(vector_1D_t), intent(in) :: self double precision, allocatable :: face_centered_values(:) @@ -203,14 +260,14 @@ pure module function grad(self) result(gradient_1D) !! Result is mimetic gradient of the scalar_1D_t "self" implicit none class(scalar_1D_t), intent(in) :: self - type(vector_1D_t) gradient_1D !! discrete gradient + type(gradient_1D_t) gradient_1D end function pure module function laplacian(self) result(laplacian_1D) !! Result is mimetic Laplacian of the scalar_1D_t "self" implicit none class(scalar_1D_t), intent(in) :: self - type(laplacian_1D_t) laplacian_1D !! discrete gradient + type(laplacian_1D_t) laplacian_1D end function pure module function reduced_order_boundary_depth(self) result(num_nodes) @@ -227,6 +284,85 @@ pure module function div(self) result(divergence_1D) type(divergence_1D_t) divergence_1D !! discrete divergence end function + pure module function volume_integrate_vector_dot_grad_scalar_1D(integrand) result(integral) + !! Result is the mimetic quadrature corresponding to a volume integral of a vector-gradient dot product + implicit none + class(vector_dot_gradient_1D_t), intent(in) :: integrand + double precision integral + end function + + pure module function volume_integrate_scalar_x_divergence_1D(integrand) result(integral) + !! Result is the mimetic quadrature corresponding to a volume integral of a scalar-divergence product + implicit none + class(scalar_x_divergence_1D_t), intent(in) :: integrand + double precision integral + end function + + pure module function surface_integrate_vector_x_scalar_1D(integrand) result(integral) + !! Result is the mimetic quadrature correspondingto a surface integral of a scalar-vector product + implicit none + class(weighted_product_1D_t), intent(in) :: integrand + double precision integral + end function + + pure module function dot(vector_1D, gradient_1D) result(vector_dot_gradient_1D) + !! Result is the mimetic divergence of the vector_1D_t "self" + implicit none + class(gradient_1D_t), intent(in) :: gradient_1D + type(vector_1D_t), intent(in) :: vector_1D + type(vector_dot_gradient_1D_t) vector_dot_gradient_1D + end function + + pure module function dot_surface_normal(vector_1D, dS) result(v_dot_dS) + !! Result is magnitude of a vector/surface-normal dot product for use in surface integrals of the form + !! `.SS. (f .x. (v .dot. dA))` + !! The sign of the dot-product is incorporated into the weights in the weighted multiplication operator(.x.). + implicit none + class(vector_1D_t), intent(in) :: vector_1D + double precision, intent(in) :: dS + type(vector_1D_t) v_dot_dS + end function + + pure module function weighted_premultiply(scalar_1D, vector_1D) result(weighted_product_1D) + !! Result is the product of a boundary-weighted vector_1D_t with a scalar_1D_t + implicit none + type(scalar_1D_t), intent(in) :: scalar_1D + class(vector_1D_t), intent(in) :: vector_1D + type(weighted_product_1D_t) weighted_product_1D + end function + + pure module function gradient_1D_weights(self) result(weights) + !! Result is an array of quadrature coefficients that can be used to compute a weighted + !! inner product of a vector_1D_t object and a gradient_1D_t object. + implicit none + class(tensor_1D_t), intent(in) :: self + double precision, allocatable :: weights(:) + end function + + pure module function divergence_1D_weights(self) result(weights) + !! Result is an array of quadrature coefficients that can be used to compute a weighted + !! inner product of a vector_1D_t object and a gradient_1D_t object. + implicit none + class(tensor_1D_t), intent(in) :: self + double precision, allocatable :: weights(:) + end function + + pure module function premultiply_scalar_1D(scalar_1D, divergence_1D) result(scalar_x_divergence_1D) + !! Result is the point-wise product of a 1D scalar field and the divergence of a 1D vector field + implicit none + type(scalar_1D_t), intent(in) :: scalar_1D + class(divergence_1D_t), intent(in) :: divergence_1D + type(scalar_x_divergence_1D_t) scalar_x_divergence_1D + end function + + pure module function postmultiply_scalar_1D(divergence_1D, scalar_1D) result(scalar_x_divergence_1D) + !! Result is the point-wise product of a 1D scalar field and the divergence of a 1D vector field + implicit none + class(divergence_1D_t), intent(in) :: divergence_1D + type(scalar_1D_t), intent(in) :: scalar_1D + type(scalar_x_divergence_1D_t) scalar_x_divergence_1D + end function + end interface #ifndef __GFORTRAN__ diff --git a/src/fortran/vector_1D_s.F90 b/src/fortran/vector_1D_s.F90 index 3099248f..11c7a061 100644 --- a/src/fortran/vector_1D_s.F90 +++ b/src/fortran/vector_1D_s.F90 @@ -1,11 +1,29 @@ #include "julienne-assert-macros.h" submodule(tensors_1D_m) vector_1D_s - use julienne_m, only : call_julienne_assert_, operator(.greaterThan.), operator(.isAtLeast.), operator(.equalsExpected.) + use julienne_m, only : & + call_julienne_assert_ & + ,operator(//) & + ,operator(.all.) & + ,operator(.approximates.) & + ,operator(.csv.) & + ,operator(.cat.) & + ,operator(.sv.) & + ,operator(.equalsExpected.) & + ,operator(.isAtLeast.) & + ,operator(.greaterThan.) & + ,operator(.within.) implicit none + double precision, parameter :: double_equivalence = 2D-4 + contains + module procedure dot_surface_normal + v_dot_dS%tensor_1D_t = tensor_1D_t(vector_1D%values_*dS, vector_1D%x_min_, vector_1D%x_max_, vector_1D%cells_, vector_1D%order_) + v_dot_dS%divergence_operator_1D_ = vector_1D%divergence_operator_1D_ + end procedure + #ifndef __GFORTRAN__ module procedure construct_1D_vector_from_function @@ -45,10 +63,24 @@ pure module function construct_1D_vector_from_function(initializer, order, cells end procedure module procedure div - associate(Dv => self%divergence_operator_1D_ .x. self%values_) - call_julienne_assert(size(Dv) .equalsExpected. self%cells_ + 2) - divergence_1D = divergence_1D_t( tensor_1D_t(Dv(2:size(Dv)-1), self%x_min_, self%x_max_, self%cells_, self%order_) ) + + integer center + + associate(D => (self%divergence_operator_1D_)) + associate(Dv => D .x. self%values_) + divergence_1D%tensor_1D_t = tensor_1D_t(Dv(2:size(Dv)-1), self%x_min_, self%x_max_, self%cells_, self%order_) + associate( & + q => divergence_1D%weights() & + ,dx => (self%x_max_ - self%x_min_)/self%cells_ & + ,b => [-1D0, [(0D0, center = 1, self%cells_-1)], 1D0] & + ) + call_julienne_assert(.all. ([size(Dv), size(q)] .equalsExpected. self%cells_+2)) + call_julienne_assert((.all. (matmul(transpose(D%assemble()), q) .approximates. b/dx .within. double_equivalence))) + ! Check D^T * a = b_{m+1}, Eq. (19), Corbino & Castillo (2020) + end associate + end associate end associate + end procedure module procedure vector_1D_values @@ -70,4 +102,89 @@ pure function faces(x_min, x_max, cells) result(x) cell_faces = faces(self%x_min_, self%x_max_, self%cells_) end procedure + module procedure weighted_premultiply + + !! vector values at faces scalar values at cell centers + boundaries + call_julienne_assert(size(vector_1D%values_) .equalsExpected. size(scalar_1D%values_)-1) + call_julienne_assert( vector_1D%cells_ .equalsExpected. scalar_1D%cells_ ) + call_julienne_assert( vector_1D%order_ .equalsExpected. scalar_1D%order_ ) + call_julienne_assert(vector_1D%x_min_ .approximates. scalar_1D%x_min_ .within. double_equivalence) + call_julienne_assert(vector_1D%x_max_ .approximates. scalar_1D%x_max_ .within. double_equivalence) + + associate( & + q => vector_1D%divergence_1D_weights() & + ,p => vector_1D%gradient_1D_weights() & + ,D => vector_1D%divergence_operator_1D_%assemble() & + ,G => scalar_1D%gradient_operator_1D_%assemble() & + ,m => scalar_1D%cells_ & + ) + call_julienne_assert(.all. (shape(G) .equalsExpected. [m+1,m+2])) + call_julienne_assert(.all. (shape(D) .equalsExpected. [m+2,m+1])) + associate( & + QD => premultiply_diagonal(q,D) & + ,GTP => postmultiply_diagonal(transpose(G),p) & + ,dx => vector_1D%dx() & + ) + call_julienne_assert(.all. (shape(QD) .equalsExpected. shape(GTP))) + associate(B => QD + GTP) ! Eq. (7), Corbino & Castillo (2020) + weighted_product_1D%tensor_1D_t = & + tensor_1D_t(dx * matmul(B,vector_1D%values_) * scalar_1D%values_, vector_1D%x_min_, vector_1D%x_max_, vector_1D%cells_, vector_1D%order_) + end associate + end associate + end associate + + contains + + pure function premultiply_diagonal(d,A) result(DA) + double precision, intent(in) :: d(:), A(:,:) + double precision, allocatable :: DA(:,:) + + call_julienne_assert(size(d) .equalsExpected. size(A,1)) + + allocate(DA, mold=A) + +#ifdef HAVE_DO_CONCURRENT_TYPE_SPEC_SUPPORT && HAVE_LOCALITY_SPECIFIER_SUPPORT + do concurrent(integer :: row = 1 : size(A,1)) default(none) shared(d, A, DA) + DA(row,:) = d(row) * A(row,:) + end do +#else + block + integer row + do concurrent(row = 1 : size(A,1)) + DA(row,:) = d(row) * A(row,:) + end do + end block +#endif + + end function + + pure function postmultiply_diagonal(A,d) result(AD) + double precision, intent(in) :: A(:,:), d(:) + double precision, allocatable :: AD(:,:) + + call_julienne_assert(size(d) .equalsExpected. size(A,2)) + + allocate(AD, mold=A) + +#ifdef HAVE_DO_CONCURRENT_TYPE_SPEC_SUPPORT && HAVE_LOCALITY_SPECIFIER_SUPPORT + do concurrent(integer :: column = 1 : size(A,2)) default(none) shared(d, A, AD) + AD(:,column) = A(:,column) * d(column) + end do +#else + block + integer column + do concurrent(column = 1 : size(A,2)) + AD(:,column) = A(:,column) * d(column) + end do + end block +#endif + + end function + + end procedure + + module procedure dA + dA = 1D0 + end procedure + end submodule vector_1D_s \ No newline at end of file diff --git a/src/fortran/vector_dot_gradient_1D_s.F90 b/src/fortran/vector_dot_gradient_1D_s.F90 new file mode 100644 index 00000000..32ea0b7d --- /dev/null +++ b/src/fortran/vector_dot_gradient_1D_s.F90 @@ -0,0 +1,14 @@ +#include "julienne-assert-macros.h" + +submodule(tensors_1D_m) vector_dot_gradient_1D_s + use julienne_m, only: call_julienne_assert_, operator(.equalsExpected.) + implicit none + +contains + + module procedure volume_integrate_vector_dot_grad_scalar_1D + call_julienne_assert(size(integrand%weights_ ) .equalsExpected. size(integrand%values_)) + integral = sum(integrand%weights_ * integrand%values_) + end procedure + +end submodule vector_dot_gradient_1D_s diff --git a/src/fortran/weighted_product_1D_s.F90 b/src/fortran/weighted_product_1D_s.F90 new file mode 100644 index 00000000..b821959a --- /dev/null +++ b/src/fortran/weighted_product_1D_s.F90 @@ -0,0 +1,10 @@ +submodule(tensors_1D_m) weighted_product_s + implicit none + +contains + + module procedure surface_integrate_vector_x_scalar_1D + integral = sum(integrand%values_) + end procedure + +end submodule weighted_product_s From 100702217edb0cd584cc178e7a026b64302fd451 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 22:40:41 -0800 Subject: [PATCH 3/7] doc(README): update ifx build/test command --- src/fortran/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fortran/README.md b/src/fortran/README.md index ebdb131b..ed62634f 100644 --- a/src/fortran/README.md +++ b/src/fortran/README.md @@ -15,7 +15,7 @@ and placing the resulting executable program in your `PATH` suffices. | Vendor | Compiler | Version | Build/Test Command | |--------|-------------|----------|-------------------------------------------------------| | GCC | `gfortran` | 13 | fpm test --compiler gfortran --profile release | -| Intel | `ifx` | 2025.1.2 | fpm test --compiler ifx --profile release --flag -fpp | +| Intel | `ifx` | 2025.1.2 | FOR_COARRAY_NUM_IMAGES=1 fpm test --compiler ifx --flag "-fpp -O3 -coarray" --profile release | | LLVM | `flang-new` | 19 | fpm test --compiler flang-new --flag "-O3" | | NAG | `nagfor` | 7.2 | fpm test --compiler nagfor --flag "-O3 -fpp" | From c241dca51682593e7b4e7de0aa16f03e033415b4 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 22:42:28 -0800 Subject: [PATCH 4/7] doc(UML): add newest entities to class diagram --- doc/fortran-classes.md | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/doc/fortran-classes.md b/doc/fortran-classes.md index 76e4ea30..c10d957a 100644 --- a/doc/fortran-classes.md +++ b/doc/fortran-classes.md @@ -10,6 +10,7 @@ classDiagram class tensor_1D_t class scalar_1D_t class vector_1D_t +class gradient_1D_t class divergence_1D_t class laplacian_1D_t class gradient_operator_1D_t @@ -19,14 +20,20 @@ class mimetic_matrix_1D_t tensor_1D_t <|-- scalar_1D_t : is a tensor_1D_t <|-- vector_1D_t : is a tensor_1D_t <|-- divergence_1D_t : is a + +tensor_1D_t <|-- weighted_product_1D_t +tensor_1D_t <|-- vector_dot_gradient_1D_t +tensor_1D_t <|-- scalar_x_divergence_1D_t + divergence_1D_t <|-- laplacian_1D_t : is a +vector_1D_t <|-- gradient_1D_t : is a mimetic_matrix_1D_t <|-- gradient_operator_1D_t : is a mimetic_matrix_1D_t <|-- divergence_operator_1D_t : is a class scalar_1D_t{ - gradient_operator_1D_ : gradient_operator_1D_t - + operator(.grad.) vector_1D_t - + operator(.laplacian.) scalar_1D_t + + operator(.grad.) gradient_1D_t + + operator(.laplacian.) laplacian_1D_t } class vector_1D_t{ @@ -34,10 +41,14 @@ class vector_1D_t{ + operator(.div.) divergence_1D_t } +class gradient_1D_t{ + - weights : double precision[] +} + class mimetic_matrix_1D_t{ - - upper_ :: double precision - - inner_ :: double precision - - lower_ :: double precision + - upper_ :: double precision[] + - inner_ :: double precision[] + - lower_ :: double precision[] } class gradient_operator_1D_t{ From fea591a4cf311db24f6d62fb952eb3d64dd61280 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 23:01:16 -0800 Subject: [PATCH 5/7] test(quadrature): ext Gauss div thm terms,residual The tests exhibit the following numerical integration behaviors: 1. At 500-cell resolution, a. 2nd-order quadrature converges quadratically: O(dx^2), b. 4th-order quadrature converges linearly: O(dx), and c. 4th-order quadrature yields maximum absolute errors roughly, three orders of magnitude smaller than the 2nd-order scheme. 2. At even just a 20-cell resolution, 2nd- and 4th-order schemes satisfy the discrete Extended Gauss Divergence Theorem down to a residual near machine zero: ~10^-14. This commit adds passing tests that check the 1D equivalents of the following three integral terms in the extended Gauss divergence theorem using 2nd & 4th-order mimetic quadratures. refact(tensors_1D): absorb child procs This commit moves the gradient_1D_weights and divergence_1D_weights type-bound procedures up to the parent tensor_1D_t type, making these procedures accessible via any tensor_1D_t child type. This facilitates assembly of the B boundary matrix required for inner products corresponding to surface integrals of the form .SS. (v*f) = for a vector_1D_t v and a scalar_1D_t f. Previously, the weights functions were bound to the gradient_1D_t and divergence_1D_t types and were thus inaccessible in a calculation not involving those types. Now the multiplication operator above can query its operands for the weights and use them to consturct B via B = QD + (G^T)*P, Eq. (6) of Corbino & Castillo (2020) --- test/divergence_operator_1D_test_m.F90 | 37 ++-- test/driver.f90 | 2 + test/gradient_operator_1D_test_m.F90 | 73 +++--- test/integration_operators_1D_test_m.F90 | 271 +++++++++++++++++++++++ test/laplacian_operator_1D_test_m.F90 | 26 ++- 5 files changed, 357 insertions(+), 52 deletions(-) create mode 100644 test/integration_operators_1D_test_m.F90 diff --git a/test/divergence_operator_1D_test_m.F90 b/test/divergence_operator_1D_test_m.F90 index 8ed321bb..ce65ddc1 100644 --- a/test/divergence_operator_1D_test_m.F90 +++ b/test/divergence_operator_1D_test_m.F90 @@ -7,8 +7,8 @@ module divergence_operator_1D_test_m ,operator(.all.) & ,operator(.also.) & ,operator(.approximates.) & - ,operator(.csv.) & ,operator(.within.) & + ,passing_test & ,string_t & ,test_t & ,test_description_t & @@ -27,7 +27,7 @@ module divergence_operator_1D_test_m procedure, nopass :: results end type - double precision, parameter :: tight_tolerance = 1D-14, loose_tolerance = 1D-08, rough_tolerance = 1D-02, crude_tolerance = 2D-02 + double precision, parameter :: tight_tolerance = 5D-14, loose_tolerance = 1D-08, rough_tolerance = 1D-02, crude_tolerance = 2D-02 contains @@ -68,12 +68,15 @@ function check_2nd_order_div_grad_parabola() result(test_diagnosis) double precision, parameter :: expected_divergence = 1D0 #ifdef __GFORTRAN__ type(divergence_1D_t) div_grad_scalar - div_grad_scalar = .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=5D0)) + div_grad_scalar = .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=5D0)) #else - associate(div_grad_scalar => .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=5D0))) + associate(div_grad_scalar => .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=5D0))) #endif - test_diagnosis = .all. (div_grad_scalar%values() .approximates. expected_divergence .within. tight_tolerance) & + + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (div_grad_scalar%values() .approximates. expected_divergence .within. tight_tolerance)) & // " (2nd-order .div. (.grad. (x**2)/2))" + #ifndef __GFORTRAN__ end associate #endif @@ -85,30 +88,31 @@ function check_4th_order_div_grad_parabola() result(test_diagnosis) double precision, parameter :: expected_divergence = 1D0 #ifdef __GFORTRAN__ type(divergence_1D_t) div_grad_scalar - div_grad_scalar = .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=9D0)) + div_grad_scalar = .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=9D0)) #else - associate(div_grad_scalar => .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=9D0))) + associate(div_grad_scalar => .div. (.grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=9D0))) #endif - test_diagnosis = .all. (div_grad_scalar%values() .approximates. expected_divergence .within. tight_tolerance) & + + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (div_grad_scalar%values() .approximates. expected_divergence .within. tight_tolerance)) & // " (4th-order .div. (.grad. (x**2)/2))" + #ifndef __GFORTRAN__ end associate #endif end function - pure function sinusoid(x) result(y) double precision, intent(in) :: x(:) double precision, allocatable :: y(:) y = sin(x) + cos(x) end function - function check_2nd_order_div_sinusoid_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer => sinusoid double precision, parameter :: pi = 3.141592653589793D0 - integer, parameter :: order_desired = 2, coarse_cells=100, fine_cells=200 + integer, parameter :: order_desired = 2, coarse_cells=100, fine_cells=coarse_cells+1 #ifdef __GFORTRAN__ type(divergence_1D_t) div_coarse, div_fine div_coarse = .div. vector_1D_t(vector_1D_initializer , order=order_desired, cells=coarse_cells, x_min=0D0, x_max=2*pi) @@ -128,7 +132,8 @@ function check_2nd_order_div_sinusoid_convergence() result(test_diagnosis) ,div_coarse_values => div_coarse%values() & ,div_fine_values => div_fine%values() & ) - test_diagnosis = .all. (div_coarse_values .approximates. grad_coarse .within. rough_tolerance) & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (div_coarse_values .approximates. grad_coarse .within. rough_tolerance)) & // " (coarse-grid 2nd-order .div. [sin(x) + cos(x)])" test_diagnosis = test_diagnosis .also. (.all. (div_fine_values .approximates. grad_fine .within. rough_tolerance)) & // " (fine-grid 2nd-order .div. [sin(x) + cos(x)])" @@ -148,12 +153,11 @@ function check_2nd_order_div_sinusoid_convergence() result(test_diagnosis) #endif end function - function check_4th_order_div_sinusoid_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer => sinusoid double precision, parameter :: pi = 3.141592653589793D0 - integer, parameter :: order_desired = 4, coarse_cells=300, fine_cells=1500 + integer, parameter :: order_desired = 4, coarse_cells=500, fine_cells=coarse_cells+1 #ifdef __GFORTRAN__ type(divergence_1D_t) div_coarse, div_fine div_coarse = .div. vector_1D_t(vector_1D_initializer , order=order_desired, cells=coarse_cells, x_min=0D0, x_max=2*pi) @@ -174,10 +178,13 @@ function check_4th_order_div_sinusoid_convergence() result(test_diagnosis) ,div_coarse_values => div_coarse%values() & ,div_fine_values => div_fine%values() & ) - test_diagnosis = .all. (div_coarse_values .approximates. div_coarse_expected .within. loose_tolerance) & + + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (div_coarse_values .approximates. div_coarse_expected .within. loose_tolerance)) & // " (coarse-grid 4th-order .div. [sin(x) + cos(x)])" test_diagnosis = test_diagnosis .also. (.all. (div_fine_values .approximates. div_fine_expected .within. loose_tolerance)) & // " (fine-grid 4th-order .div. [sin(x) + cos(x)])" + associate( & error_coarse_max => maxval(abs(div_coarse_values - div_coarse_expected)) & ,error_fine_max => maxval(abs(div_fine_values - div_fine_expected)) & diff --git a/test/driver.f90 b/test/driver.f90 index 408507bd..53e3759a 100644 --- a/test/driver.f90 +++ b/test/driver.f90 @@ -3,12 +3,14 @@ program test_suite_driver use gradient_operator_1D_test_m, only : gradient_operator_1D_test_t use divergence_operator_1D_test_m, only : divergence_operator_1D_test_t use laplacian_operator_1D_test_m, only : laplacian_operator_1D_test_t + use integration_operators_1D_test_m, only : integration_operators_1D_test_t implicit none associate(test_harness => test_harness_t([ & test_fixture_t(gradient_operator_1D_test_t()) & ,test_fixture_t(divergence_operator_1D_test_t()) & ,test_fixture_t(laplacian_operator_1D_test_t()) & + ,test_fixture_t(integration_operators_1D_test_t()) & ])) call test_harness%report_results end associate diff --git a/test/gradient_operator_1D_test_m.F90 b/test/gradient_operator_1D_test_m.F90 index fbab341f..35b62126 100644 --- a/test/gradient_operator_1D_test_m.F90 +++ b/test/gradient_operator_1D_test_m.F90 @@ -8,6 +8,7 @@ module gradient_operator_1D_test_m ,operator(.also.) & ,operator(.approximates.) & ,operator(.within.) & + ,passing_test & ,string_t & ,test_t & ,test_description_t & @@ -16,7 +17,7 @@ module gradient_operator_1D_test_m ,usher use mole_m, only : scalar_1D_t, scalar_1D_initializer_i #ifdef __GFORTRAN__ - use mole_m, only : vector_1D_t, vector_1D_initializer_i + use mole_m, only : vector_1D_t, vector_1D_initializer_i, gradient_1D_t #endif type, extends(test_t) :: gradient_operator_1D_test_t @@ -25,7 +26,7 @@ module gradient_operator_1D_test_m procedure, nopass :: results end type - double precision, parameter :: tight_tolerance = 1D-14, loose_tolerance = 1D-12, rough_tolerance = 2D-02 + double precision, parameter :: tight_tolerance = 1D-14, loose_tolerance = 1D-12, rough_tolerance = 5D-02 contains @@ -64,26 +65,32 @@ function check_grad_const() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis double precision, parameter :: grad_expected = 0. procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => const + #ifdef __GFORTRAN__ - type(vector_1D_t) grad + type(gradient_1D_t) grad - grad = .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=4D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=4D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=4D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=4D0)) #endif - test_diagnosis = .all. (grad%values() .approximates. grad_expected .within. loose_tolerance) & + + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (2nd-order .grad.(5))" + #ifndef __GFORTRAN__ end associate #endif #ifdef __GFORTRAN__ - grad = .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=8D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=8D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=8D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=8D0)) #endif + test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (4th-order .grad.(5))" + #ifndef __GFORTRAN__ end associate #endif @@ -100,30 +107,33 @@ function check_grad_line() result(test_diagnosis) double precision, parameter :: grad_expected = 14D0 procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => line #ifdef __GFORTRAN__ - type(vector_1D_t) grad + type(gradient_1D_t) grad - grad = .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=4D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=4D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=4D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=4D0)) #endif - test_diagnosis = .all. (grad%values() .approximates. grad_expected .within. loose_tolerance) & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (2nd-order .grad.(14*x + 3))" + #ifndef __GFORTRAN__ end associate #endif #ifdef __GFORTRAN__ - grad = .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=8D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=8D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=9, x_min=0D0, x_max=8D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=8D0)) #endif + test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (4th-order .grad.(14*x + 3))" + #ifndef __GFORTRAN__ end associate #endif - end function pure function parabola(x) result(y) @@ -137,33 +147,39 @@ function check_grad_parabola() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => parabola #ifdef __GFORTRAN__ - type(vector_1D_t) grad + type(gradient_1D_t) grad - grad = .grad. scalar_1D_t(scalar_1D_initializer , order=2, cells=5, x_min=0D0, x_max=4D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer , order=2, cells=16, x_min=0D0, x_max=4D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer , order=2, cells=5, x_min=0D0, x_max=4D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer , order=2, cells=16, x_min=0D0, x_max=4D0)) #endif + + test_diagnosis = passing_test() + associate(x => grad%grid()) associate(grad_expected => 14*x + 3) - test_diagnosis = .all. (grad%values() .approximates. grad_expected .within. loose_tolerance) & + test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (2nd-order .grad.(7*x**2 + 3*x + 5))" end associate end associate + #ifndef __GFORTRAN__ end associate #endif #ifdef __GFORTRAN__ - grad = .grad. scalar_1D_t(scalar_1D_initializer , order=4, cells=9, x_min=0D0, x_max=8D0) + grad = .grad. scalar_1D_t(scalar_1D_initializer , order=4, cells=16, x_min=0D0, x_max=8D0) #else - associate(grad => .grad. scalar_1D_t(scalar_1D_initializer , order=4, cells=9, x_min=0D0, x_max=8D0)) + associate(grad => .grad. scalar_1D_t(scalar_1D_initializer , order=4, cells=16, x_min=0D0, x_max=8D0)) #endif + associate(x => grad%grid()) associate(grad_expected => 14*x + 3) test_diagnosis = test_diagnosis .also. (.all. (grad%values() .approximates. grad_expected .within. loose_tolerance)) & // " (4th-order .grad.(7*x**2 + 3*x + 5))" end associate end associate + #ifndef __GFORTRAN__ end associate #endif @@ -180,9 +196,9 @@ function check_2nd_order_grad_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => sinusoid double precision, parameter :: pi = 3.141592653589793D0 - integer, parameter :: order_desired = 2, coarse_cells=500, fine_cells=1500 + integer, parameter :: order_desired = 2, coarse_cells=200, fine_cells=coarse_cells+1 #ifdef __GFORTRAN__ - type(vector_1D_t) grad_coarse, grad_fine + type(gradient_1D_t) grad_coarse, grad_fine grad_coarse = .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=coarse_cells, x_min=0D0, x_max=2*pi) grad_fine = .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=fine_cells , x_min=0D0, x_max=2*pi) @@ -202,7 +218,8 @@ function check_2nd_order_grad_convergence() result(test_diagnosis) ,grad_coarse_values => grad_coarse%values() & ,grad_fine_values => grad_fine%values() & ) - test_diagnosis = .all. (grad_coarse_values .approximates. grad_coarse_expected .within. rough_tolerance) & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (grad_coarse_values .approximates. grad_coarse_expected .within. rough_tolerance)) & // " (coarse-grid 2nd-order .grad. [sin(x) + cos(x)])" test_diagnosis = test_diagnosis .also. (.all. (grad_fine_values .approximates. grad_fine_expected .within. rough_tolerance)) & // " (fine-grid 4th-order .grad. [sin(x) + cos(x)])" @@ -226,13 +243,14 @@ function check_4th_order_grad_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => sinusoid double precision, parameter :: pi = 3.141592653589793D0 - integer, parameter :: order_desired = 4, coarse_cells=100, fine_cells=1600 #ifdef __GFORTRAN__ - type(vector_1D_t) grad_coarse, grad_fine + integer, parameter :: order_desired = 4, coarse_cells=300, fine_cells=coarse_cells+1 + type(gradient_1D_t) grad_coarse, grad_fine grad_coarse = .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=coarse_cells, x_min=0D0, x_max=2*pi) grad_fine = .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=fine_cells , x_min=0D0, x_max=2*pi) #else + integer, parameter :: order_desired = 4, coarse_cells=400, fine_cells=coarse_cells+1 associate( & grad_coarse => .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=coarse_cells, x_min=0D0, x_max=2*pi) & ,grad_fine => .grad. scalar_1D_t(scalar_1D_initializer , order=order_desired, cells=fine_cells , x_min=0D0, x_max=2*pi) & @@ -248,7 +266,8 @@ function check_4th_order_grad_convergence() result(test_diagnosis) ,grad_coarse_values => grad_coarse%values() & ,grad_fine_values => grad_fine%values() & ) - test_diagnosis = .all. (grad_coarse_values .approximates. grad_coarse_expected .within. rough_tolerance) & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (grad_coarse_values .approximates. grad_coarse_expected .within. rough_tolerance)) & // " (4th-order d(sinusoid)/dx point-wise errors)" test_diagnosis = test_diagnosis .also. (.all. (grad_fine_values .approximates. grad_fine_expected .within. rough_tolerance)) & // " (4th-order d(sinusoid)/dx point-wise)" diff --git a/test/integration_operators_1D_test_m.F90 b/test/integration_operators_1D_test_m.F90 new file mode 100644 index 00000000..02a3174a --- /dev/null +++ b/test/integration_operators_1D_test_m.F90 @@ -0,0 +1,271 @@ +#include "language-support.F90" + !! include Julienne preprocessor macros + +module integration_operators_1D_test_m + use julienne_m, only : & + operator(//) & + ,operator(.also.) & + ,operator(.approximates.) & + ,operator(.isAtMost.) & + ,operator(.withinPercentage.) & + ,passing_test & + ,string_t & + ,test_t & + ,test_description_t & + ,test_diagnosis_t & + ,test_result_t & + ,usher + use mole_m, only : scalar_1D_t, scalar_1D_initializer_i, vector_1D_t, vector_1D_initializer_i + implicit none + + type, extends(test_t) :: integration_operators_1D_test_t + contains + procedure, nopass :: subject + procedure, nopass :: results + end type + + character(len=*), parameter, dimension(*) :: ordinal = [" ", "2nd", " ", "4th"] + double precision, parameter :: residual_tolerance = 1D-15 + +contains + + pure function subject() result(test_subject) + character(len=:), allocatable :: test_subject + test_subject = 'The set of 2nd- and 4th-order 1D mimetic integration operators' + end function + + function results() result(test_results) + type(integration_operators_1D_test_t) integration_operators_1D_test + type(test_result_t), allocatable :: test_results(:) + + test_results = integration_operators_1D_test%run([ & + test_description_t( & + 'computing the volume integral .SSS. (v .dot. .grad. f)*dV' & + ,usher(check_volume_integral_of_v_dot_grad_f)) & + ,test_description_t( & + 'computing the volume integral .SSS. (f * .div. v)*dV' & + ,usher(check_volume_integral_of_f_div_v)) & + ,test_description_t( & + 'computing the surface integral .SS. (f .x. (v .dot. dA))' & + ,usher(check_surface_integral_of_vf)) & + ,test_description_t( & + 'satisfying the extended Gauss Divergence Theorem within a residual tolerance of ' // string_t(residual_tolerance) & + ,usher(check_gauss_divergence_theorem)) & + ]) + end function + + pure function parabola(x) result(f) + double precision, intent(in) :: x(:) + double precision, allocatable :: f(:) + f = (x**2)/2 + end function + + pure function line(x) result(v) + double precision, intent(in) :: x(:) + double precision, allocatable :: v(:) + v = x + end function + + pure function SSS_v_dot_grad_f(x) result(integral) + double precision, intent(in) :: x + double precision integral + integral = (x**3)/3 + end function + + pure function SSS_f_div_v(x) result(integral) + double precision, intent(in) :: x + double precision integral + integral = (x**3)/6 + end function + + function check_volume_integral_of_v_dot_grad_f() result(test_diagnosis) + type(test_diagnosis_t) test_diagnosis + procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer + procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer + double precision, parameter :: x_min = 0D0, x_max = 1D0 + integer, parameter :: cells = 500, cells_ = cells + 1 + double precision, parameter, dimension(*) :: expected = [0, 2, 0, 1] + double precision, parameter, dimension(*) :: order_tolerance = [0, 1, 0, 1] + double precision, parameter, dimension(*) :: solution_tolerance = [0D0, 5D-7, 0D0, 5D-10] + integer order + + scalar_1D_initializer => parabola + vector_1D_initializer => line + + test_diagnosis = passing_test() + + do order = 2, 4, 2 + associate( & + f => scalar_1D_t(scalar_1D_initializer, order, cells , x_min, x_max) & + ,f_ => scalar_1D_t(scalar_1D_initializer, order, cells_, x_min, x_max) & + ,v => vector_1D_t(vector_1D_initializer, order, cells , x_min, x_max) & + ,v_ => vector_1D_t(vector_1D_initializer, order, cells_, x_min, x_max) & + ,expected_integral => SSS_v_dot_grad_f(x_max) - SSS_v_dot_grad_f(x_min) & + ) + associate( & + dV => f%dV() & + ,dV_ => f_%dV() & + ) + associate( & + lo_res => abs((.SSS. (v .dot. .grad. f ) * dV - expected_integral)) & + ,hi_res => abs((.SSS. (v_ .dot. .grad. f_) * dV_ - expected_integral)) & + ) + test_diagnosis = test_diagnosis .also. & + (hi_res .isAtMost. solution_tolerance(order)) & + // " for " // ordinal(order) // "-order discretization of .SSS. (v .dot. .grad. f) * dV" + associate(calculated_order => log(lo_res/hi_res)/log(dble(cells_)/cells)) + test_diagnosis = test_diagnosis .also. & + (calculated_order .approximates. expected(order) .withinPercentage. order_tolerance(order)) & + // " for convergence rate of " // ordinal(order) // "-order discretization of .SSS. (v .dot. .grad. f) * dV" + end associate + end associate + end associate + end associate + end do + + end function + + function check_volume_integral_of_f_div_v() result(test_diagnosis) + type(test_diagnosis_t) test_diagnosis + procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer + procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer + double precision, parameter :: x_min = 0D0, x_max = 1D0 + integer, parameter :: cells = 500, cells_ = cells + 1 + double precision, parameter, dimension(*) :: expected = [0, 2, 0, 1] + double precision, parameter, dimension(*) :: order_tolerance = [0, 1, 0, 2] + double precision, parameter, dimension(*) :: solution_tolerance = [0D0, 2D-7, 0D0, 4D-10] + integer order + + scalar_1D_initializer => parabola + vector_1D_initializer => line + + test_diagnosis = passing_test() + + do order = 2, 4, 2 + associate( & + f => scalar_1D_t(scalar_1D_initializer, order, cells , x_min, x_max) & + ,f_ => scalar_1D_t(scalar_1D_initializer, order, cells_, x_min, x_max) & + ,v => vector_1D_t(vector_1D_initializer, order, cells , x_min, x_max) & + ,v_ => vector_1D_t(vector_1D_initializer, order, cells_, x_min, x_max) & + ,expected_integral => SSS_f_div_v(x_max) - SSS_f_div_v(x_min) & + ) + associate( & + dV => f%dV() & + ,dV_ => f_%dV() & + ) + associate( & + lo_res => abs( (.SSS. (f * .div. v ) * dV ) - expected_integral) & + ,hi_res => abs( (.SSS. (f_ * .div. v_) * dV_) - expected_integral) & + ) + test_diagnosis = test_diagnosis .also. & + (hi_res .isAtMost. solution_tolerance(order)) & + // " for " // ordinal(order) // "-order discretization of .SSS. (f .div. v) * dV" + associate(calculated_order => log(lo_res/hi_res)/log(dble(cells_)/cells)) + test_diagnosis = test_diagnosis .also. & + (calculated_order .approximates. expected(order) .withinPercentage. order_tolerance(order)) & + // " for convergence rate of " // ordinal(order) // "-order discretization of .SSS. (f * .div. v) * dV" + end associate + end associate + end associate + end associate + end do + + end function + + function check_surface_integral_of_vf() result(test_diagnosis) + type(test_diagnosis_t) test_diagnosis + procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer + procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer + double precision, parameter :: x_min = 0D0, x_max = 1D0 +#ifndef __INTEL_COMPILER + integer, parameter :: cells = 500, cells_ = cells+1 + double precision, parameter, dimension(*) :: order_tolerance = [0, 1, 0, 4] +#else + integer, parameter :: cells = 400, cells_ = cells+1 + double precision, parameter, dimension(*) :: order_tolerance = [0, 1, 0, 5] +#endif + double precision, parameter, dimension(*) :: expected = [0, 2, 0, 1] + double precision, parameter, dimension(*) :: solution_tolerance = [0D0, 2D-6, 0D0, 2D-9] + integer order + + scalar_1D_initializer => parabola + vector_1D_initializer => line + + test_diagnosis = passing_test() + + do order = 2, 4, 2 + associate( & + f => scalar_1D_t(scalar_1D_initializer, order, cells , x_min, x_max) & + ,f_ => scalar_1D_t(scalar_1D_initializer, order, cells_, x_min, x_max) & + ,v => vector_1D_t(vector_1D_initializer, order, cells , x_min, x_max) & + ,v_ => vector_1D_t(vector_1D_initializer, order, cells_, x_min, x_max) & + ,expected_integral => parabola([x_max])*line([x_max]) - parabola([x_min])*line([x_min]) & + ) + associate( & + dA => v%dA() & + ,dA_ => v_%dA() & + ) + associate( & + lo_res => abs(.SS. (f .x. (v .dot. dA )) - expected_integral(1)) & + ,hi_res => abs(.SS. (f_ .x. (v_ .dot. dA_)) - expected_integral(1)) & + ) + test_diagnosis = test_diagnosis .also. & + (hi_res .isAtMost. solution_tolerance(order)) & + // " for " // ordinal(order) // "-order discretization of .SS. (f .x. (v .dot. dA))" + associate(calculated_order => log(lo_res/hi_res)/log(dble(cells_)/cells)) + test_diagnosis = test_diagnosis .also. & + (calculated_order .approximates. expected(order) .withinPercentage. order_tolerance(order)) & + // " for convergence rate of " // ordinal(order) // "-order discretization of .SS. (f .x. (v .dot. dA)))" + end associate + end associate + end associate + end associate + end do + + end function + + pure function quartic(x) result(f) + double precision, intent(in) :: x(:) + double precision, allocatable :: f(:) + f = (x**4)/4 + end function + + pure function exponential(x) result(v) + double precision, intent(in) :: x(:) + double precision, allocatable :: v(:) + v = exp(x) + end function + + function check_gauss_divergence_theorem() result(test_diagnosis) + type(test_diagnosis_t) test_diagnosis + procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer + procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer + integer, parameter :: cells=20 + double precision, parameter :: x_min = 0D0, x_max = 1D0 + integer order + + scalar_1D_initializer => quartic + vector_1D_initializer => exponential + + test_diagnosis = passing_test() + + do order = 2, 4, 2 + associate( & + f => scalar_1D_t(scalar_1D_initializer, order, cells , x_min, x_max) & + ,v => vector_1D_t(vector_1D_initializer, order, cells , x_min, x_max) & + ) + associate( & + dA => v%dA() & + ,dV => f%dV() & + ) + associate(residual => (.SSS. (v .dot. .grad. f )*dV) + (.SSS. (f * .div. v )*dV) - .SS. (f .x. (v .dot. dA))) + test_diagnosis = test_diagnosis .also. (abs(residual) .isAtMost. residual_tolerance) & + // " for " // ordinal(order) // "-order Extended Gauss Divergence Theorem residual" + end associate + end associate + end associate + end do + + end function + +end module integration_operators_1D_test_m diff --git a/test/laplacian_operator_1D_test_m.F90 b/test/laplacian_operator_1D_test_m.F90 index a6e96af7..efb0f5e0 100644 --- a/test/laplacian_operator_1D_test_m.F90 +++ b/test/laplacian_operator_1D_test_m.F90 @@ -7,6 +7,7 @@ module laplacian_operator_1D_test_m ,operator(.approximates.) & ,operator(.separatedBy.) & ,operator(.within.) & + ,passing_test & ,string_t & ,test_t & ,test_description_t & @@ -25,7 +26,7 @@ module laplacian_operator_1D_test_m procedure, nopass :: results end type - double precision, parameter :: tight_tolerance = 1D-14, loose_tolerance = 1D-09, crude_tolerance = 1D-02 + double precision, parameter :: tight_tolerance = 5D-14, loose_tolerance = 1D-09, crude_tolerance = 1D-02 contains @@ -66,12 +67,15 @@ function check_2nd_order_laplacian_parabola() result(test_diagnosis) double precision, parameter :: expected_laplacian = 1D0 #ifdef __GFORTRAN__ type(laplacian_1D_t) laplacian_scalar - laplacian_scalar = .laplacian. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=5D0) + laplacian_scalar = .laplacian. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=5D0) #else - associate(laplacian_scalar => .laplacian. scalar_1D_t(scalar_1D_initializer, order=2, cells=5, x_min=0D0, x_max=5D0)) + associate(laplacian_scalar => .laplacian. scalar_1D_t(scalar_1D_initializer, order=2, cells=16, x_min=0D0, x_max=5D0)) #endif - test_diagnosis = .all. (laplacian_scalar%values() .approximates. expected_laplacian .within. tight_tolerance) & + + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (laplacian_scalar%values() .approximates. expected_laplacian .within. tight_tolerance)) & // " (2nd-order .laplacian. [(x**2)/2]" + #ifndef __GFORTRAN__ end associate #endif @@ -88,14 +92,15 @@ function check_4th_order_laplacian_of_quartic() result(test_diagnosis) procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => quartic #ifndef __GFORTRAN__ - associate(laplacian_quartic => .laplacian. scalar_1D_t(scalar_1D_initializer, order=4, cells=20, x_min=0D0, x_max=40D0)) + associate(laplacian_quartic => .laplacian. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=40D0)) #else type(laplacian_1D_t) laplacian_quartic - laplacian_quartic = .laplacian. scalar_1D_t(scalar_1D_initializer, order=4, cells=20, x_min=0D0, x_max=40D0) + laplacian_quartic = .laplacian. scalar_1D_t(scalar_1D_initializer, order=4, cells=16, x_min=0D0, x_max=40D0) #endif associate(x => laplacian_quartic%grid()) associate(expected_laplacian => x**2, actual_laplacian => laplacian_quartic%values()) - test_diagnosis = .all. (actual_laplacian .approximates. expected_laplacian .within. loose_tolerance) & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. (.all. (actual_laplacian .approximates. expected_laplacian .within. loose_tolerance)) & // " (4th-order .laplacian. [(x**4)/24]" end associate end associate @@ -118,12 +123,12 @@ pure function d2f_dx2(x) function check_2nd_order_laplacian_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis - test_diagnosis = check_laplacian_convergence(order_desired=2, coarse_cells=500, fine_cells=1000) + test_diagnosis = check_laplacian_convergence(order_desired=2, coarse_cells=400, fine_cells=401) end function function check_4th_order_laplacian_convergence() result(test_diagnosis) type(test_diagnosis_t) test_diagnosis - test_diagnosis = check_laplacian_convergence(order_desired = 4, coarse_cells=100, fine_cells=200) + test_diagnosis = check_laplacian_convergence(order_desired = 4, coarse_cells=150, fine_cells=151) end function function check_laplacian_convergence(order_desired, coarse_cells, fine_cells) result(test_diagnosis) @@ -155,7 +160,8 @@ function check_laplacian_convergence(order_desired, coarse_cells, fine_cells) re ,actual_fine => laplacian_fine%values() & ,depth => laplacian_coarse%reduced_order_boundary_depth() & ) - test_diagnosis = & + test_diagnosis = passing_test() + test_diagnosis = test_diagnosis .also. & .all. (actual_coarse .approximates. expected_coarse .within. crude_tolerance) & // " (coarse-grid 2nd-order .laplacian. sin(x))" From bf8d168a34d84824955b7e185a35d11ff49aa3c7 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 23:04:26 -0800 Subject: [PATCH 6/7] feat(example): add optional args in EGDT program This commit adds optional arguments that can be used to print only specific terms in the extended Gauss Divergence Theorem example. The default behavior is still that all terms are computed, printed, and summed to produce a residual. Usage: fpm run \ --example extended-gauss-divergence \ --compiler flang-new \ --flag "-O3" \ -- [--help|-h] | [[--cells ] [--order ] [--xmin ] [--xmax ] [--div|d] [--grad|g] [--vf|f]] where pipes (|) separate square-bracketed optional arguments and angular brackets indicate user input values. fix(integral): .SSS. (f * .div. v)*dV This commit zero-extends divergence_1D_t values when computing f * .div. v to match the array sizes of the operands. feat(example): increase resolution, fmt output, work around gfortran bug This commit edits print-assembled-1D-operators.F90 to 1. Raise the grid resolution above the new minimum of 16 as required for computing the gradient quadrature weights when constructing a `gradient_1D_t` object and 2. Adds a `stop` statement to work arounnd an apparent gfortran bug: a malloc error. This commit edits div-grad-laplacian-1D.F90 to 1. Raise the grid resolution above the new minimum of 16 for as reqiured for computing the quadrature weights (which are not used in this program) when defining a `gradient_1D_t` object and 2. Improve the program output format. doc(example): Gauss divergence theorem AI prompt --- example/div-grad-laplacian-1D.F90 | 41 ++++--- example/extended-gauss-divergence.F90 | 149 +++++++++++++++++++++++ example/print-assembled-1D-operators.F90 | 82 +++++++++++++ 3 files changed, 256 insertions(+), 16 deletions(-) create mode 100644 example/extended-gauss-divergence.F90 create mode 100644 example/print-assembled-1D-operators.F90 diff --git a/example/div-grad-laplacian-1D.F90 b/example/div-grad-laplacian-1D.F90 index eafc1b49..997dd242 100644 --- a/example/div-grad-laplacian-1D.F90 +++ b/example/div-grad-laplacian-1D.F90 @@ -22,25 +22,34 @@ double precision elemental function d2f_dx2(x) end module functions_m program div_grad_laplacian_1D + !! Compute the 2nd- and 4th-order mimetic approximations to the gradient and Laplacian of the + !! above function f(x) on a 1D uniform, staggered grid. use functions_m use julienne_m, only : file_t, string_t, operator(.separatedBy.) use mole_m, only : scalar_1D_t, scalar_1D_initializer_i #ifdef __GFORTRAN__ - use mole_m, only : vector_1D_t, laplacian_1D_t + use mole_m, only : vector_1D_t, laplacian_1D_t, gradient_1D_t #endif implicit none procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => f print *,new_line('') - print *," 2nd-order approximations" - print *," ========================" + print *," Functions" + print *," ========================" + call execute_command_line("grep 'f =' example/div-grad-laplacian-1D.F90 | grep -v execute_command", wait=.true.) + call execute_command_line("grep 'df_dx =' example/div-grad-laplacian-1D.F90 | grep -v execute_command", wait=.true.) + call execute_command_line("grep 'd2f_dx2 =' example/div-grad-laplacian-1D.F90 | grep -v execute_command", wait=.true.) + + print *,new_line('') + print *," 2nd-order approximations" + print *," ========================" call output(order=2) print *,new_line('') - print *," 4th-order approximations" - print *," ========================" + print *," 4th-order approximations" + print *," ========================" call output(order=4) @@ -51,22 +60,22 @@ program div_grad_laplacian_1D subroutine output(order) integer, intent(in) :: order - associate( s => scalar_1D_t(scalar_1D_initializer, order=order, cells=10, x_min=0D0, x_max=20D0)) + associate( s => scalar_1D_t(scalar_1D_initializer, order=order, cells=20, x_min=0D0, x_max=20D0)) associate( grad_s => .grad. s & ,laplacian_s => .laplacian. s) associate( s_grid => s%grid() & ,grad_s_grid => grad_s%grid() & ,laplacian_s_grid => laplacian_s%grid()) associate( s_table => tabulate( & - string_t([character(len=18)::"x", "f(x) exp" , "f(x) act" ]) & + string_t([character(len=22)::"x", "f(x) expected" , "f(x) actual" ]) & ,s_grid, f(s_grid), s%values() & ) & ,grad_s_table => tabulate( & - string_t([character(len=18)::"x", ".grad. f exp" , ".grad. f act" ]) & + string_t([character(len=22)::"x", ".grad. f expected" , ".grad. f actual" ]) & ,grad_s_grid, df_dx(grad_s_grid), grad_s%values() & ) & ,laplacian_s_table => tabulate( & - string_t([character(len=18)::"x", ".laplacian. f exp", ".laplacian. f act"]) & + string_t([character(len=22)::"x", ".laplacian. f expected", ".laplacian. f actual"]) & ,laplacian_s_grid, d2f_dx2(laplacian_s_grid), laplacian_s%values()) & ) call s_table%write_lines() @@ -84,12 +93,12 @@ subroutine output(order) integer, intent(in) :: order type(scalar_1D_t) s - type(vector_1D_t) grad_s + type(gradient_1D_t) grad_s type(laplacian_1D_t) laplacian_s type(file_t) s_table, grad_s_table, laplacian_s_table double precision, allocatable,dimension(:) :: s_grid, grad_s_grid, laplacian_s_grid - s = scalar_1D_t(scalar_1D_initializer, order=order, cells=10, x_min=0D0, x_max=20D0) + s = scalar_1D_t(scalar_1D_initializer, order=order, cells=20, x_min=0D0, x_max=20D0) grad_s = .grad. s laplacian_s = .laplacian. s @@ -98,15 +107,15 @@ subroutine output(order) laplacian_s_grid = laplacian_s%grid() s_table = tabulate( & - string_t([character(len=18)::"x", "f(x) exp." , "f(x) act." ]) & + string_t([character(len=22)::"x", "f(x) expected" , "f(x) actual" ]) & ,s_grid, f(s_grid), s%values() & ) grad_s_table = tabulate( & - string_t([character(len=18)::"x", ".grad. f exp." , ".grad. f act." ]) & + string_t([character(len=22)::"x", ".grad. f expected" , ".grad. f actual" ]) & ,grad_s_grid, df_dx(grad_s_grid), grad_s%values() & ) laplacian_s_table = tabulate( & - string_t([character(len=18)::"x", ".laplacian. f exp.", ".laplacian. f act."]) & + string_t([character(len=22)::"x", ".laplacian. f expected", ".laplacian. f actual"]) & ,laplacian_s_grid, d2f_dx2(laplacian_s_grid), laplacian_s%values() & ) call s_table%write_lines() @@ -125,8 +134,8 @@ pure function tabulate(headings, abscissa, expected, actual) result(file) file = file_t([ & string_t("") & ,headings .separatedBy. " " & - ,string_t("----------------------------------------------------------") & - ,[( string_t(abscissa(line)) // " " // string_t(expected(line)) // " " // string_t(actual(line)), line = 1, size(abscissa))] & + ,string_t("------------------------------------------------------------------") & + ,[( string_t(abscissa(line)) // " " // string_t(expected(line)) // " " // string_t(actual(line)), line = 1, size(abscissa))] & ]) end function diff --git a/example/extended-gauss-divergence.F90 b/example/extended-gauss-divergence.F90 new file mode 100644 index 00000000..3a8df830 --- /dev/null +++ b/example/extended-gauss-divergence.F90 @@ -0,0 +1,149 @@ +module integrand_operands_m + implicit none +contains + + pure function scalar(x) result(f) + double precision, intent(in) :: x(:) + double precision, allocatable :: f(:) + f = (x**2)/2 ! <-- scalar function + end function + + pure function vector(x) result(v) + double precision, intent(in) :: x(:) + double precision, allocatable :: v(:) + v = x ! <-- vector function + end function + +end module + +program extended_gauss_divergence + !! Print each term in the following residual formed from the extended Gauss-divergence + !! theorem using one-dimensional (1D) 4th-(default) or 2nd-order mimetic discretizations: + !! `residual = .SSS. (v .dot. .grad. f) * dV +.SSS. (f * .div. v) * dV - .SS. (f .x. (v .dot. dA))` + !! where `.SSS.` and `.SS.` are the 1D equivalents of a volume integral over the whole + !! domain and a surface integral over a domain boundary of unit area, respectively. + use julienne_m, only : command_line_t + use integrand_operands_m, only : scalar, vector + use mole_m, only : scalar_1D_t, scalar_1D_initializer_i, vector_1D_t, vector_1D_initializer_i + implicit none + procedure(scalar_1D_initializer_i), pointer :: scalar_1D_initializer => scalar + procedure(vector_1D_initializer_i), pointer :: vector_1D_initializer => vector + + type numerical_arguments_t + !! Define default initializations that can be overridden with the command-line arguments + !! detailed by the usage information below + integer :: cells_=200, order_=4 + double precision :: x_min_=0D0, x_max_=1D0 + end type + + type text_flags_t + logical div_, grad_, vf_ + end type + + type(command_line_t) command_line + double precision SSS_v_dot_grad_f_dV, SSS_f_div_v_dV, SS_f_v_dot_dA + + if (command_line%argument_present([character(len=len("--help")) :: ("--help"), "-h"])) then + stop new_line('') // new_line('') & + // 'Usage:' // new_line('') & + // ' fpm run \' // new_line('') & + // ' --example extended-gauss-divergence \' // new_line('') & + // ' --compiler flang-new \' // new_line('') & + // ' --flag "-O3" \' // new_line('') & + // ' -- [--help|-h] | [[--cells ] [--order ] [--xmin ] [--xmax ] [--div|d] [--grad|g] [--vf|f]]' & + // new_line('') // new_line('') & + // 'where pipes (|) separate square-bracketed optional arguments and angular brackets indicate user input values.' // new_line('') + end if + + call execute_command_line("grep '<-- scalar' example/extended-gauss-divergence.F90 | grep -v execute_command", wait=.true.) + call execute_command_line("grep '<-- vector' example/extended-gauss-divergence.F90 | grep -v execute_command", wait=.true.) + +#ifdef __GFORTRAN__ + command_line_arguments: & + block + type(numerical_arguments_t) args + args = get_numerical_arguments() +#else + command_line_arguments: & + associate(args => get_numerical_arguments()) +#endif + text_flags: & + associate(flags => text_flags_t( & + div_ = command_line%argument_present( [ character(len=len("--div" )) :: "--div" , "-d" ] ) & + ,grad_ = command_line%argument_present( [ character(len=len("--grad")) :: "--grad", "-g" ] ) & + ,vf_ = command_line%argument_present( [ character(len=len("--vf" )) :: "--vf" , "-f" ] ) & + )) + print_all: & + associate(all_terms => merge(.true., .false., all([flags%div_, flags%grad_, flags%vf_]) .or. .not. any([flags%div_, flags%grad_, flags%vf_]))) + integrand_factors: & + associate( & + f => scalar_1D_t(scalar_1D_initializer, args%order_, args%cells_, args%x_min_, args%x_max_) & + ,v => vector_1D_t(vector_1D_initializer, args%order_, args%cells_, args%x_min_, args%x_max_) & + ) + differential_volume: & + associate(dV => f%dV()) + + if (flags%grad_ .or. all_terms) then + SSS_v_dot_grad_f_dV = .SSS. (v .dot. .grad. f) * dV + print '(a,g0)', ".SSS. (v .dot. .grad. f) * dV = ", SSS_v_dot_grad_f_dV + end if + + if (flags%div_ .or. all_terms) then + SSS_f_div_v_dV = .SSS. (f * .div. v) * dV + print '(a,g0)', ".SSS. ( f * .div. v) * dV = ", SSS_f_div_v_dV + end if + + end associate differential_volume + + differential_area: & + associate(dA => v%dA()) + if (flags%vf_ .or. all_terms) then + SS_f_v_dot_dA = .SS. (f .x. (v .dot. dA)) + print '(a,g0)', " -.SS. (f .x. (v .dot. dA)) = ", -SS_f_v_dot_dA + end if + + if (all_terms) then + print '(a)' , "----------------------------------------------------" + print '(26x,a,g0,a)',"sum = ", SSS_v_dot_grad_f_dV + SSS_f_div_v_dV - SS_f_v_dot_dA, " (residual)" + end if + + end associate differential_area + end associate integrand_factors + end associate print_all + end associate text_flags +#ifndef __GFORTRAN__ + end associate command_line_arguments +#else + end block command_line_arguments +#endif + +contains + + function get_numerical_arguments() result(numerical_arguments) + type(numerical_arguments_t) numerical_arguments + +#ifdef __GFORTRAN__ + character(len=:), allocatable :: cells_string, order_string, x_min_string, x_max_string + cells_string = command_line%flag_value("--cells") + order_string = command_line%flag_value("--order") + x_min_string = command_line%flag_value("--x_min") + x_max_string = command_line%flag_value("--x_max") +#else + associate( & + cells_string => command_line%flag_value("--cells") & + ,order_string => command_line%flag_value("--order") & + ,x_min_string => command_line%flag_value("--x_min") & + ,x_max_string => command_line%flag_value("--x_max") & + ) +#endif + if (len(cells_string)/=0) read(cells_string,*) numerical_arguments%cells_ + if (len(order_string)/=0) read(order_string,*) numerical_arguments%order_ + if (len(x_min_string)/=0) read(x_min_string,*) numerical_arguments%x_min_ + if (len(x_max_string)/=0) read(x_max_string,*) numerical_arguments%x_max_ +#ifndef __GFORTRAN__ + end associate +#endif + + end function + +end program diff --git a/example/print-assembled-1D-operators.F90 b/example/print-assembled-1D-operators.F90 new file mode 100644 index 00000000..319ffcc1 --- /dev/null +++ b/example/print-assembled-1D-operators.F90 @@ -0,0 +1,82 @@ +program print_assembled_1D_operators + !! Print the fully assembled 2nd- and 4th-order mimetic gradient, divergence, and Laplacian + !! operator matrices as comma-separated rows for 1D grids with the minimum number of cells (16) + !! required for computing 4th-order gradient quadrature weights (Q). + use julienne_m, only : operator(.csv.), string_t, command_line_t + use mimetic_operators_1D_m, only : gradient_operator_1D_t, divergence_operator_1D_t + implicit none + + type(command_line_t) command_line + integer row + + command_line_settings: & + associate( & + gradient => command_line%argument_present(["--grad" ]) & + ,divergence => command_line%argument_present(["--div" ]) & + ,order => command_line%flag_value("--order") & + ) + + if (command_line%argument_present([character(len=len("--help")) :: ("--help"), "-h"])) then + stop new_line('') // new_line('') & + // 'Usage:' // new_line('') & + // ' fpm run \' // new_line('') & + // ' --example print-assembled-1D-operators \' // new_line('') & + // ' --compiler flang-new \' // new_line('') & + // ' --flag "-O3" \' // new_line('') & + // ' -- [--help|-h] | [--grad] [--div] [--order ]' // new_line('') // new_line('') & + // 'where square brackets indicate optional arguments and angular brackets indicate user input values.' // new_line('') + end if + + default_usage: & + associate(print_all => .not. any([gradient, divergence, len(order)/=0])) + + if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="2")) call print_gradient_operator( k=2, dx=1D0, m=16) + if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="2")) call print_divergence_operator(k=2, dx=1D0, m=16) + if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="4")) call print_gradient_operator( k=4, dx=1D0, m=16) + if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="4")) call print_divergence_operator(k=4, dx=1D0, m=16) + + end associate default_usage +#ifdef __GFORTRAN__ + stop +#endif + end associate command_line_settings + +contains + + subroutine print_gradient_operator(k, dx, m) + integer, intent(in) :: k, m + double precision, intent(in) :: dx + + print *, new_line(""), "Gradient operator: order = ", k, " | cells = ", m, " | dx = ", dx + + associate(grad_op => gradient_operator_1D_t(k, dx, cells=m)) + associate(G => grad_op%assemble()) + do row = 1, size(G,1) + associate(csv_row => .csv. string_t(G(row,:))) + print '(a)', csv_row%string() + end associate + end do + end associate + end associate + + end subroutine + + subroutine print_divergence_operator(k, dx, m) + integer, intent(in) :: k, m + double precision, intent(in) :: dx + + print *, new_line(""), "Divergence operator: order = ", k, " | cells = ", m, " | dx = ", dx + + associate(div_op => divergence_operator_1D_t(k, dx, cells=m)) + associate(D => div_op%assemble()) + do row = 1, size(D,1) + associate(csv_row => .csv. string_t(D(row,:))) + print '(a)', csv_row%string() + end associate + end do + end associate + end associate + + end subroutine + +end program From 01908a7993181ab7907c89f8dd45af13b7c50645 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 1 Jan 2026 23:06:55 -0800 Subject: [PATCH 7/7] feat(example): add optional args in EGDT program This commit adds optional arguments that can be used to print only specific terms in the extended Gauss Divergence Theorem example. The default behavior is still that all terms are computed, printed, and summed to produce a residual. Usage: fpm run \ --example extended-gauss-divergence \ --compiler flang-new \ --flag "-O3" \ -- [--help|-h] | [[--cells ] [--order ] [--xmin ] [--xmax ] [--div|d] [--grad|g] [--vf|f]] where pipes (|) separate square-bracketed optional arguments and angular brackets indicate user input values. fix(integral): .SSS. (f * .div. v)*dV This commit zero-extends divergence_1D_t values when computing f * .div. v to match the array sizes of the operands. feat(example): increase resolution, fmt output, work around gfortran bug This commit edits print-assembled-1D-operators.F90 to 1. Raise the grid resolution above the new minimum of 16 as required for computing the gradient quadrature weights when constructing a `gradient_1D_t` object and 2. Adds a `stop` statement to work arounnd an apparent gfortran bug: a malloc error. This commit edits div-grad-laplacian-1D.F90 to 1. Raise the grid resolution above the new minimum of 16 for as reqiured for computing the quadrature weights (which are not used in this program) when defining a `gradient_1D_t` object and 2. Improve the program output format. doc(example): Gauss divergence theorem AI prompt --- example/print-assembled-1D-operators.f90 | 78 ------------------------ 1 file changed, 78 deletions(-) delete mode 100644 example/print-assembled-1D-operators.f90 diff --git a/example/print-assembled-1D-operators.f90 b/example/print-assembled-1D-operators.f90 deleted file mode 100644 index cf93b91c..00000000 --- a/example/print-assembled-1D-operators.f90 +++ /dev/null @@ -1,78 +0,0 @@ -program print_assembled_1D_operators - !! Print fully assembled memetic 1D gradient, divergence, and Laplacian matrices, - !! including the zero elements. - use julienne_m, only : operator(.csv.), string_t, command_line_t - use mimetic_operators_1D_m, only : gradient_operator_1D_t, divergence_operator_1D_t - implicit none - - type(command_line_t) command_line - integer row - - command_line_settings: & - associate( & - gradient => command_line%argument_present(["--grad" ]) & - ,divergence => command_line%argument_present(["--div" ]) & - ,order => command_line%flag_value("--order") & - ) - - if (command_line%argument_present([character(len=len("--help")) :: ("--help"), "-h"])) then - stop new_line('') // new_line('') & - // 'Usage:' // new_line('') & - // ' fpm run \' // new_line('') & - // ' --example print-assembled-1D-operators \' // new_line('') & - // ' --compiler flang-new \' // new_line('') & - // ' --flag "-O3" \' // new_line('') & - // ' -- [--help|-h] | [--grad] [--div] [--order ]' // new_line('') // new_line('') & - // 'where square brackets indicate optional arguments and angular brackets indicate user input values.' // new_line('') - end if - - default_usage: & - associate(print_all => .not. any([gradient, divergence, len(order)/=0])) - - if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="2")) call print_gradient_operator( k=2, dx=1D0, m=5) - if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="2")) call print_divergence_operator(k=2, dx=1D0, m=5) - if (print_all .or. (gradient .and. len(order)==0) .or. (gradient .and. order=="4")) call print_gradient_operator( k=4, dx=1D0, m=9) - if (print_all .or. (divergence .and. len(order)==0) .or. (divergence .and. order=="4")) call print_divergence_operator(k=4, dx=1D0, m=9) - - end associate default_usage - end associate command_line_settings - -contains - - subroutine print_gradient_operator(k, dx, m) - integer, intent(in) :: k, m - double precision, intent(in) :: dx - - print *, new_line(""), "Gradient operator: order = ", k, " | cells = ", m, " | dx = ", dx - - associate(grad_op => gradient_operator_1D_t(k, dx, cells=m)) - associate(G => grad_op%assemble()) - do row = 1, size(G,1) - associate(csv_row => .csv. string_t(G(row,:))) - print '(a)', csv_row%string() - end associate - end do - end associate - end associate - - end subroutine - - subroutine print_divergence_operator(k, dx, m) - integer, intent(in) :: k, m - double precision, intent(in) :: dx - - print *, new_line(""), "Divergence operator: order = ", k, " | cells = ", m, " | dx = ", dx - - associate(div_op => divergence_operator_1D_t(k, dx, cells=m)) - associate(D => div_op%assemble()) - do row = 1, size(D,1) - associate(csv_row => .csv. string_t(D(row,:))) - print '(a)', csv_row%string() - end associate - end do - end associate - end associate - - end subroutine - -end program