diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index ed6a52bb..3b5831eb 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -72,7 +72,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install mpi4py pytest fortranformat anesthetic + python -m pip install mpi4py pytest anesthetic - name: Install pypolychord run: pip install -v '.[test]' diff --git a/.gitignore b/.gitignore index 165d67f9..27117c7c 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ pypolychord/.ld_preload.sh # Chains folder (test results only) chains/* .ipynb_checkpoints/ +CLAUDE.md +.mcp* +uv.lock \ No newline at end of file diff --git a/Makefile b/Makefile index adca6c15..df6a24f1 100644 --- a/Makefile +++ b/Makefile @@ -20,23 +20,33 @@ MPI=1 # Whether to compile in debugging mode (default: false) DEBUG=0 -export MPI DEBUG +# Whether to use LAPACK for eigendecomposition (default: true) +# Set LAPACK=0 to use pure-Fortran Jacobi fallback +LAPACK=1 + +export MPI DEBUG LAPACK # We can autodetect the compiler type on unix systems via the shell. # if you want to override this then just run make with # make COMPILER_TYPE= -# where is gnu or intel +# where is gnu, intel, intel_llvm, or cray ifeq "$(shell which ftn >/dev/null 2>&1; echo $$?)" "0" COMPILER_TYPE=cray -else ifeq "$(shell which ifort >/dev/null 2>&1; echo $$?)" "0" +else ifeq "$(shell which ifx >/dev/null 2>&1; echo $$?)" "0" +# Detected Intel LLVM-based compilers (ifx, icx, icpx) +COMPILER_TYPE=intel_llvm +else ifeq "$(shell which ifort >/dev/null 2>&1; echo $$?)" "0" +# Detected Intel classic compilers (ifort, icc, icpc) COMPILER_TYPE=intel else ifeq "$(shell which gfortran >/dev/null 2>&1; echo $$?)" "0" COMPILER_TYPE=gnu endif -ifeq ($(COMPILER_TYPE),intel) +ifeq ($(COMPILER_TYPE),intel_llvm) +include Makefile_intel_llvm +else ifeq ($(COMPILER_TYPE),intel) include Makefile_intel -else ifeq ($(COMPILER_TYPE),gnu) +else ifeq ($(COMPILER_TYPE),gnu) include Makefile_gnu else ifeq ($(COMPILER_TYPE),cray) include Makefile_cray @@ -105,6 +115,8 @@ print_CC: @echo $(CC) print_CXX: @echo $(CXX) +print_FC: + @echo $(FC) CLEANDIRS = $(POLYCHORD_DIR) $(PYPOLYCHORD_DIR) $(LIKELIHOOD_DIR) $(BIN_DIR) $(LIB_DIR) $(DRIVERS_DIR) .PHONY: clean veryclean print_CC print_CXX $(addsuffix clean,$(CLEANDIRS)) $(addsuffix veryclean,$(CLEANDIRS)) @@ -118,4 +130,3 @@ veryclean: clean $(addsuffix veryclean,$(CLEANDIRS)) $(RM) *~ build dist pypolychord.egg-info pypolychord/*.pyc pypolychord/__pycache__ __pycache__ pypolychord/lib/*.so $(addsuffix veryclean,$(CLEANDIRS)) : %veryclean: $(MAKE) -C $* veryclean - diff --git a/Makefile_cray b/Makefile_cray index 3456fe6c..f315809a 100644 --- a/Makefile_cray +++ b/Makefile_cray @@ -19,6 +19,14 @@ FFLAGS += -ffree-line-length-none -fallow-argument-mismatch CXXFLAGS += -ffree-line-length-none -fallow-argument-mismatch endif +# LAPACK support for eigendecomposition-based covariance regularization +# Set LAPACK=0 to disable (will use pure-Fortran Jacobi fallback) +# Cray systems typically have LAPACK via LibSci +ifeq ($(LAPACK),1) +FFLAGS += -DHAVE_LAPACK +# LibSci is usually linked automatically on Cray systems +endif + ifdef IPO # Archive tool for compiling with ipo. AR = xiar rv diff --git a/Makefile_gnu b/Makefile_gnu index f8200644..89f12ff7 100644 --- a/Makefile_gnu +++ b/Makefile_gnu @@ -43,6 +43,13 @@ else RPATH= endif +# LAPACK support for eigendecomposition-based covariance regularization +# Set LAPACK=0 to disable (will use pure-Fortran Jacobi fallback) +ifeq ($(LAPACK),1) +FFLAGS += -DHAVE_LAPACK +LDLIBS += -llapack -lblas +endif + ifeq ($(DEBUG),1) # Debugging mode # -------------- @@ -55,7 +62,7 @@ ifeq ($(DEBUG),1) # backtrace : produce backtrace of error # fpe-trap : search for floating point exceptions (dividing by zero etc) # fbounds-check : check array indices -FFLAGS += -g -O0 -Wall -Wextra -pedantic -fcheck=all -fimplicit-none -fbacktrace -ffpe-trap=zero,overflow +FFLAGS += -g -O0 -Wall -Wextra -pedantic -fcheck=all -fimplicit-none -fbacktrace -ffpe-trap=zero,overflow -DDEBUG # CXXFLAGS += -g -O0 -Wall -Wextra -Wshadow -Weffc++ CFLAGS += -g -O0 -Wall -Wextra -Wshadow -Weffc++ @@ -63,7 +70,7 @@ else # Optimised mode # -------------- # Ofast : maximum optimisation -FFLAGS += -Ofast +FFLAGS += -Ofast -fno-finite-math-only # no-finit-math-only is needed for detectinng NaNs CXXFLAGS += -Ofast CXFLAGS += -Ofast endif diff --git a/Makefile_intel b/Makefile_intel index c59228dd..04791a66 100644 --- a/Makefile_intel +++ b/Makefile_intel @@ -28,6 +28,14 @@ CXXFLAGS += -std=c++11 -fpic LDFLAGS += -nofor-main LDLIBS += -cxxlib +# LAPACK support for eigendecomposition-based covariance regularization +# Set LAPACK=0 to disable (will use pure-Fortran Jacobi fallback) +# Intel MKL provides LAPACK +ifeq ($(LAPACK),1) +FFLAGS += -DHAVE_LAPACK +LDLIBS += -mkl +endif + ifeq ($(DEBUG),1) # Debugging mode # -------------- @@ -41,7 +49,7 @@ ifeq ($(DEBUG),1) # gen-interfaces : generate an interface block for each routine # warn-interfaces: warn on these interface blocks # fpe0 : halts program if dividing by zero, etc -FFLAGS += -g -O0 -traceback -check all,noarg_temp_created -warn all -ftrapuv -debug all -gen-interfaces -warn-interfaces +FFLAGS += -g -O0 -traceback -check all,noarg_temp_created -warn all -ftrapuv -debug all -gen-interfaces -warn-interfaces -DDEBUG CXXFLAGS += -g -O0 -traceback -Wcheck -Wremarks -Wall -Weffc++ -ftrapuv -debug all LDFLAGS += -g else diff --git a/Makefile_intel_llvm b/Makefile_intel_llvm new file mode 100644 index 00000000..9583cf1a --- /dev/null +++ b/Makefile_intel_llvm @@ -0,0 +1,71 @@ +# Makefile_intel_llvm (for Intel LLVM-based compilers: ifx, icx, icpx) + +ifeq ($(MPI),1) +# Using specific MPI-enabled Intel LLVM compilers +FC = mpiifx +CC = mpiicx +CXX = mpiicpx +else +# Using standard Intel LLVM compilers +FC = ifx +CXX = icpx +CC = icx +endif +LDSHARED = $(LD) -shared + +# CPP uses the C compiler +CPP = $(CC) +# LD uses the C/C++ compiler as the primary linker driver for mixed-language projects +LD = $(CC) + +# Archive tool +AR = ar rv + + +# default flags +# -------------- +# fpp : perform preprocessing +# fpic : shared object libraries +# heap-arrays : allocate local arrays on heap +FFLAGS += -fpp -fpic -heap-arrays +CXXFLAGS += -std=c++11 -fpic +LDFLAGS += + +# Link specific Intel runtime libraries +LDLIBS += -lifcore -lifport -limf -lsvml -liomp5 + +# LAPACK support for eigendecomposition-based covariance regularization +# Set LAPACK=0 to disable (will use pure-Fortran Jacobi fallback) +# Intel MKL provides LAPACK (use -qmkl for LLVM-based compilers) +ifeq ($(LAPACK),1) +FFLAGS += -DHAVE_LAPACK -qmkl +LDLIBS += -qmkl +endif + +ifeq ($(DEBUG),1) +# Debugging mode +# -------------- +# g : enable gnu debugger compatibility +# O0 : no optimisation +# traceback : create a backtrace on system failure +# check all : all checks (whilst compiling) +# warn all : all warnings (whilst running) +# ftrapuv : Traps uninitialized variables by setting them to very large values +# debug all : additional debugging information +# gen-interfaces : generate an interface block for each routine +# warn-interfaces: warn on these interface blocks +# fpe0 : halts program if dividing by zero, etc +FFLAGS += -g -O0 -traceback -check all,noarg_temp_created -warn all -ftrapuv -debug all -gen-interfaces -warn-interfaces -DDEBUG +CXXFLAGS += -g -O0 -traceback -Wcheck -Wremarks -Wall -Weffc++ -ftrapuv -debug all +LDFLAGS += -g +else +# Optimised mode +# -------------- +# O2 : A high but safe level of optimisation. +# fp-model precise: Disables value-unsafe floating-point optimisations. This is crucial for numerical stability and cross-compiler consistency. +# xHost : maximise architecture usage (from $(HOST) variable) +# warn all : Enable all warnings instead of suppressing them with -w. +FFLAGS += -O2 -fp-model precise $(HOST) -warn all +CXXFLAGS += -O2 -fp-model precise $(HOST) -Wall + +endif \ No newline at end of file diff --git a/pypolychord/polychord.py b/pypolychord/polychord.py index f32efe67..1ce17e99 100644 --- a/pypolychord/polychord.py +++ b/pypolychord/polychord.py @@ -521,6 +521,7 @@ def run(loglikelihood, nDims, **kwargs): default_comm = None paramnames = kwargs.pop('paramnames', None) + cube_samples = kwargs.pop('cube_samples', None) default_kwargs = { 'nDerived': 0, @@ -583,8 +584,8 @@ def run(loglikelihood, nDims, **kwargs): (kwargs['file_root'] + ".paramnames")) - if 'cube_samples' in kwargs: - _make_resume_file(loglikelihood, kwargs['prior'], **kwargs) + if cube_samples is not None: + _make_resume_file(loglikelihood, cube_samples=cube_samples, **kwargs) read_resume = kwargs['read_resume'] kwargs['read_resume'] = True @@ -644,7 +645,7 @@ def wrap_prior(cube, theta): kwargs['seed'], kwargs['comm']) - if 'cube_samples' in kwargs: + if cube_samples is not None: kwargs['read_resume'] = read_resume try: @@ -658,14 +659,52 @@ def wrap_prior(cube, theta): +def _format_fortran_double(val, w=24, d=15, e=3): + """Format a float in Fortran E format (E24.15E3). + + Produces output like: ' 0.123456789012345E+001' + """ + import math + if math.isnan(val): + return f"{'NaN':>{w}}" + if math.isinf(val): + s = '-Infinity' if val < 0 else 'Infinity' + return f"{s:>{w}}" + if val == 0.0: + sign = '-' if math.copysign(1.0, val) < 0 else ' ' + return f"{sign}0.{'0' * d}E+{'0' * e}".rjust(w) + sign = '-' if val < 0 else ' ' + aval = abs(val) + # Get Python scientific notation with d digits of precision + # Python's :.{d}E gives d digits after the decimal with 1 digit before + py_str = f"{aval:.{d}E}" + # Parse mantissa and exponent from Python format: "d.dddE±ee" + mantissa_str, exp_str = py_str.split('E') + exp_val = int(exp_str) + # Python: d.ddd × 10^exp → Fortran: 0.dddd × 10^(exp+1) + # Shift decimal: remove '.', prepend '0.' + digits = mantissa_str.replace('.', '') # e.g. "1234567890123456" + # digits has d+1 characters; we need d digits after '0.' + fortran_mantissa = '0.' + digits[:d] + fortran_exp = exp_val + 1 + exp_sign = '+' if fortran_exp >= 0 else '-' + exp_digits = f"{abs(fortran_exp):0{e}d}" + result = f"{sign}{fortran_mantissa}E{exp_sign}{exp_digits}" + return result.rjust(w) + + +def _format_fortran_int(val, w=12): + """Format an integer right-justified in width w.""" + return f"{int(val):>{w}d}" + + def _make_resume_file(loglikelihood, **kwargs): - import fortranformat as ff resume_filename = Path(kwargs['base_dir']) / (kwargs['file_root'] + ".resume") try: from mpi4py import MPI - comm = MPI.COMM_WORLD + comm = kwargs.get('comm', None) or MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() except ImportError: @@ -675,20 +714,28 @@ def _make_resume_file(loglikelihood, **kwargs): lives = [] logL_birth = kwargs['logzero'] - for i in np.array_split(np.arange(len(kwargs['cube_samples'])), size)[rank]: - cube = kwargs['cube_samples'][i] - theta = kwargs['prior'](cube) - logL = loglikelihood(theta) - try: - logL, derived = logL - except TypeError: - derived = [] - nDims = len(theta) - nDerived = len(derived) - lives.append(np.concatenate([cube,theta,derived,[logL_birth, logL]])) - - if MPI: - sendbuf = np.array(lives).flatten() + n_samples = len(kwargs['cube_samples']) + + # In MPI mode with multiple ranks, only workers (rank > 0) evaluate + # likelihoods, matching PolyChord's controller/worker split where rank 0 + # is the administrator. In serial mode, rank 0 does everything. + if MPI and size > 1: + n_workers = size - 1 + if rank > 0: + worker_index = rank - 1 + for i in np.array_split(np.arange(n_samples), n_workers)[worker_index]: + cube = kwargs['cube_samples'][i] + theta = kwargs['prior'](cube) + logL = loglikelihood(theta) + try: + logL, derived = logL + except TypeError: + derived = [] + nDims = len(theta) + nDerived = len(derived) + lives.append(np.concatenate([cube,theta,derived,[logL_birth, logL]])) + + sendbuf = np.array(lives).flatten() if lives else np.array([], dtype=np.float64) sendcounts = np.array(comm.gather(len(sendbuf))) if rank == 0: recvbuf = np.empty(sum(sendcounts)) @@ -696,22 +743,36 @@ def _make_resume_file(loglikelihood, **kwargs): recvbuf = None comm.Gatherv(sendbuf=sendbuf, recvbuf=(recvbuf, sendcounts), root=0) + if rank == 0: + # Infer row width: nDims (cube) + nDims (theta) + nDerived + 2 (logL_birth, logL) + nDims = len(kwargs['cube_samples'][0]) + nDerived = kwargs.get('nDerived', 0) + row_width = 2 * nDims + nDerived + 2 + lives = np.reshape(recvbuf, (n_samples, row_width)) + else: + for i in range(n_samples): + cube = kwargs['cube_samples'][i] + theta = kwargs['prior'](cube) + logL = loglikelihood(theta) + try: + logL, derived = logL + except TypeError: + derived = [] + nDims = len(theta) + nDerived = len(derived) + lives.append(np.concatenate([cube,theta,derived,[logL_birth, logL]])) + lives = np.array(lives) + if rank == 0: - if MPI: - lives = np.reshape(recvbuf, (len(kwargs['cube_samples']), len(lives[0]))) - else: - lives = np.array(lives) with open(resume_filename,"w") as f: def write(var): var = np.atleast_1d(var) if isinstance(var[0], np.integer): - fmt = '(%iI12)' % var.size - elif isinstance(var[0], np.double): - fmt = '(%iE24.15E3)' % var.size + f.write(''.join(_format_fortran_int(v) for v in var) + '\n') + elif isinstance(var[0], (np.floating, float)): + f.write(''.join(_format_fortran_double(v) for v in var) + '\n') else: - fmt = '(A)' - writer = ff.FortranRecordWriter(fmt) - f.write(writer.write(var) + '\n') + f.write(str(var[0]) + '\n') write('=== Number of dimensions ===') write(nDims) diff --git a/pyproject.toml b/pyproject.toml index 4973c843..e7ea9941 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,38 @@ [build-system] requires = ["setuptools", "wheel", "numpy"] build-backend = "setuptools.build_meta" + +[project] +name = "pypolychord" +version = "1.22.1" +description = "Python interface to PolyChord" +readme = "README.rst" +license = "LicenseRef-PolyChord" +license-files = ["LICENCE"] +authors = [ + { name = "Will Handley", email = "wh260@cam.ac.uk" }, +] +requires-python = ">=3.9" +dependencies = [ + "numpy", + "scipy", +] + +[project.optional-dependencies] +plotting = ["getdist"] +mpi = ["mpi4py"] + +[dependency-groups] +dev = ["setuptools", "wheel", "mpi4py"] + +[project.urls] +Homepage = "https://github.com/PolyChord/PolyChordLite" + +[tool.uv] +no-build-isolation-package = ["pypolychord"] + +[tool.setuptools.packages.find] +include = ["pypolychord*"] + +[tool.setuptools.package-data] +pypolychord = ["lib/libchord.so"] diff --git a/setup.py b/setup.py index d075db15..26c2ec2b 100644 --- a/setup.py +++ b/setup.py @@ -83,8 +83,9 @@ def __init__(self, *args, **kwargs): class CustomBuildPy(_build_py, object): def run(self): - env = {} - env["PATH"] = os.environ["PATH"] + env = {k: v for k, v in os.environ.items() + if k in ("PATH", "HOME", "USER", "TMPDIR", "LANG", + "TERM", "SHELL", "LOGNAME")} if self.distribution.no_mpi is None: env["MPI"] = "1" # These need to be set so that build_ext uses the right compilers @@ -99,7 +100,10 @@ def run(self): if self.distribution.debug_flags is not None: self.distribution.ext_modules[0].extra_compile_args += ["-g", "-O0"] env["DEBUG"] = "1" - + + # Enable LAPACK by default for eigendecomposition-based covariance regularization + env["LAPACK"] = os.environ.get("LAPACK", "1") + BASE_PATH = os.path.dirname(os.path.abspath(__file__)) env["CURDIR"] = BASE_PATH env.update({k : os.environ[k] for k in ["CC", "CXX", "FC"] if k in os.environ}) diff --git a/src/polychord/calculate.f90 b/src/polychord/calculate.f90 index 61d42f2b..273c6a9c 100644 --- a/src/polychord/calculate.f90 +++ b/src/polychord/calculate.f90 @@ -5,6 +5,10 @@ module calculate_module subroutine calculate_point(loglikelihood,prior,point,settings,nlike) use settings_module, only: program_settings +#ifdef DEBUG + use, intrinsic :: ieee_arithmetic +#endif + implicit none interface function loglikelihood(theta,phi) @@ -31,18 +35,26 @@ function prior(cube) result(theta) real(dp),dimension(settings%nDims) :: mn, mx ! Min and max bounds real(dp),dimension(settings%nDerived) :: phi ! derived parameters real(dp) :: logL - + mn = merge(-1d0,0d0,settings%wraparound) mx = merge(2d0,1d0,settings%wraparound) cube = point(settings%h0:settings%h1) +#ifdef DEBUG + if (any(ieee_is_nan(cube))) then + write(*,'(A)') 'PolyChord DEBUG (calculate_point): NaN detected in hypercube vector before prior transformation.' + end if +#endif + if ( any(cubemx) ) then theta = 0 logL = settings%logzero else where(settings%wraparound) cube = modulo(cube,1d0) + theta = prior(cube) + logL = loglikelihood(theta,phi) end if @@ -72,7 +84,7 @@ function calculate_posterior_point(settings,point,logweight,evidence,volume) res posterior_point(settings%pos_X) = volume ! Likelihood posterior_point(settings%pos_l) = point(settings%l0) - ! Un-normalised weighting + ! Un-normalised weighting posterior_point(settings%pos_w) = logweight ! un-normalise cumulative weighting posterior_point(settings%pos_Z) = evidence @@ -121,5 +133,4 @@ end function calculate_similarity_matrix - end module calculate_module diff --git a/src/polychord/chordal_sampling.f90 b/src/polychord/chordal_sampling.f90 index c4ed3fc4..1057f109 100644 --- a/src/polychord/chordal_sampling.f90 +++ b/src/polychord/chordal_sampling.f90 @@ -7,6 +7,9 @@ module chordal_module function SliceSampling(loglikelihood,prior,settings,logL,seed_point,cholesky,nlike,num_repeats) result(baby_points) use settings_module, only: program_settings use random_module, only: random_orthonormal_basis,random_real +#ifdef DEBUG + use, intrinsic :: ieee_arithmetic +#endif implicit none interface @@ -63,6 +66,15 @@ function prior(cube) result(theta) ! Start the baby point at the seed point previous_point = seed_point +#ifdef DEBUG + if (any(ieee_is_nan(seed_point))) then + write(*, '(A)') 'PolyChord DEBUG (SliceSampling): NaN detected in the input seed_point.' + end if + if (any(ieee_is_nan(cholesky))) then + write(*, '(A)') 'PolyChord DEBUG (SliceSampling): NaN detected in the input cholesky matrix.' + end if +#endif + ! Initialise the likelihood counter at 0 nlike = 0 @@ -72,12 +84,30 @@ function prior(cube) result(theta) ! Transform to the unit hypercube nhats = matmul(cholesky,nhats) +#ifdef DEBUG + if (any(ieee_is_nan(nhats))) then + write(*,'(A)') 'PolyChord DEBUG (SliceSampling): NaN detected in nhats vector immediately after matmul(cholesky,nhats).' + if (any(ieee_is_nan(cholesky))) then + write(*,'(A)') ' The source is a NaN cholesky matrix.' + else + write(*,'(A)') ' The source is NOT the cholesky matrix. Problem is in generate_nhats or matmul.' + end if + end if +#endif + do i_babies=1,size(nhats,2) ! Get a new random direction nhat = nhats(:,i_babies) ! Normalise it w = sqrt(dot_product(nhat,nhat)) + + ! Warn if direction vector has zero magnitude (root cause of NaN propagation) + if (w < 1.d-20) then + write(*,'(A)') 'PolyChord WARNING: Slice sampling direction vector has zero magnitude (w=0).' + write(*,'(A)') ' Division by zero is imminent, resulting in NaN parameters.' + end if + nhat = nhat/w w = w * 3d0 !* exp( lgamma(0.5d0 * settings%nDims) - lgamma(1.5d0 + 0.5d0 * settings%nDims) ) * settings%nDims @@ -165,6 +195,9 @@ function slice_sample(loglikelihood,prior,logL,nhat,x0,w,S,n) result(baby_point) use utils_module, only: distance use random_module, only: random_real use calculate_module, only: calculate_point +#ifdef DEBUG + use, intrinsic :: ieee_arithmetic +#endif implicit none interface function loglikelihood(theta,phi) @@ -203,16 +236,24 @@ function prior(cube) result(theta) ! The lower bound real(dp), dimension(S%nTotal) :: L - real(dp) :: temp_random integer :: i_step, i real(dp) :: x0Rd, x0Ld +#ifdef DEBUG + if (any(ieee_is_nan(x0))) then + write(*, '(A)') 'PolyChord DEBUG (slice_sample): NaN detected in the input seed point x0.' + end if + if (any(ieee_is_nan(nhat))) then + write(*, '(A)') 'PolyChord DEBUG (slice_sample): NaN detected in the input direction vector nhat.' + end if +#endif + ! Select initial start and end points temp_random = random_real() - L(S%h0:S%h1) = x0(S%h0:S%h1) - temp_random * w * nhat - R(S%h0:S%h1) = x0(S%h0:S%h1) + (1-temp_random) * w * nhat + L(S%h0:S%h1) = x0(S%h0:S%h1) - temp_random * w * nhat + R(S%h0:S%h1) = x0(S%h0:S%h1) + (1-temp_random) * w * nhat ! Calculate initial likelihoods call calculate_point(loglikelihood,prior,R,S,n) @@ -238,15 +279,29 @@ function prior(cube) result(theta) ! Sample within this bound do i_step=0,100 - ! Find the distance between x0 and L + ! Find the distance between x0 and L x0Ld= distance(x0(S%h0:S%h1),L(S%h0:S%h1), [(.false., i=1,S%nDims)]) - ! Find the distance between x0 and R + ! Find the distance between x0 and R x0Rd= distance(x0(S%h0:S%h1),R(S%h0:S%h1), [(.false., i=1,S%nDims)]) +#ifdef DEBUG + if (ieee_is_nan(x0Ld) .or. ieee_is_nan(x0Rd)) then + write(*, '(A)') 'PolyChord DEBUG (slice_sample): NaN detected in boundary distances.' + write(*, '(A, F24.15)') ' x0Ld = ', x0Ld + write(*, '(A, F24.15)') ' x0Rd = ', x0Rd + end if +#endif + ! Draw a random point within L and R - baby_point(S%h0:S%h1) = x0(S%h0:S%h1)+ (random_real() * (x0Rd+x0Ld) - x0Ld) * nhat + baby_point(S%h0:S%h1) = x0(S%h0:S%h1)+ (random_real() * (x0Rd+x0Ld) - x0Ld) * nhat + +#ifdef DEBUG + if (any(ieee_is_nan(baby_point(S%h0:S%h1)))) then + write(*,'(A)') 'PolyChord DEBUG (slice_sample): NaN detected in new parameter vector immediately after creation.' + end if +#endif - ! calculate the likelihood + ! calculate the likelihood call calculate_point(loglikelihood,prior,baby_point,S,n) ! If we're not within the likelihood bound then we need to sample further @@ -275,4 +330,3 @@ end function slice_sample end module chordal_module - diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index fc8e3a25..d270e552 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -5,7 +5,6 @@ module generate_module use utils_module, only: dp - implicit none contains @@ -249,6 +248,7 @@ function prior(cube) result(theta) ! The workers simply generate and send points until they're told to stop by the administrator do while(live_point_needed(live_point,mpi_information)) + time0 = time() call calculate_point( loglikelihood, prior, live_point, settings,nlike) ! Compute physical coordinates, likelihoods and derived parameters ndiscarded=ndiscarded+1 diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index e5bd84bc..fe66c2ca 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -435,7 +435,7 @@ subroutine throw_babies(baby_points,nlike,epoch,mpi_information) integer, intent(in) :: epoch !> The epoch the babies were generated in type(mpi_bundle), intent(in) :: mpi_information - call MPI_SEND( &! + call MPI_SEND( &! baby_points, &! size(baby_points,1)*size(baby_points,2),&! MPI_DOUBLE_PRECISION, &! diff --git a/src/polychord/nested_sampling.F90 b/src/polychord/nested_sampling.F90 index 276fe2e2..b7d5b3ba 100644 --- a/src/polychord/nested_sampling.F90 +++ b/src/polychord/nested_sampling.F90 @@ -257,6 +257,7 @@ end subroutine dumper ! Generate a new set of points within the likelihood bound of the late point baby_points = SliceSampling(loglikelihood,prior,settings,logL,seed_point,cholesky,nlike,num_repeats) + baby_points(settings%b0,:) = logL ! Note the moment it is born at #ifdef MPI else if(settings%synchronous) then @@ -485,7 +486,6 @@ end subroutine dumper time1 = time() slice_time = slice_time + time1-time0 - ! 3) Send the baby points back call throw_babies(baby_points,nlike,worker_epoch,mpi_information) diff --git a/src/polychord/random_utils.F90 b/src/polychord/random_utils.F90 index 1192606e..53ed0a22 100644 --- a/src/polychord/random_utils.F90 +++ b/src/polychord/random_utils.F90 @@ -290,6 +290,12 @@ function random_direction(nDims) random_direction = random_gaussian(nDims) ! Calculate the modulus squared random_direction2 = dot_product(random_direction,random_direction) + + ! --- START OF ADDED TRACE (Non-invasive) --- + if (random_direction2 <= 1.d-30) then + write(*, '(A)') 'PolyChord TRACE (random_direction): Generated a near-zero-length random vector. This could cause issues if not handled.' + endif + ! --- END OF ADDED TRACE --- end do ! normalise the vector diff --git a/src/polychord/run_time_info.f90 b/src/polychord/run_time_info.f90 index 903446f0..05755257 100644 --- a/src/polychord/run_time_info.f90 +++ b/src/polychord/run_time_info.f90 @@ -600,21 +600,38 @@ end function delete_cluster subroutine calculate_covmats(settings,RTI) use settings_module, only: program_settings - use utils_module, only: calc_cholesky, calc_covmat + use utils_module, only: calc_cholesky, calc_covmat, dp use array_module, only: concat + use, intrinsic :: ieee_arithmetic implicit none type(program_settings), intent(in) :: settings !> Program settings type(run_time_info),intent(inout) :: RTI !> Run time information integer :: i_cluster ! cluster iterator + integer :: num_points + real(dp), allocatable, dimension(:,:) :: points_to_check + ! For each cluster: do i_cluster = 1,RTI%ncluster - RTI%covmat(:,:,i_cluster) = calc_covmat(concat(RTI%live(settings%h0:settings%h1,:RTI%nlive(i_cluster),i_cluster),& - RTI%phantom(settings%h0:settings%h1,:RTI%nphantom(i_cluster),i_cluster)),& - settings%wraparound) + ! Concatenate live and phantom points for this cluster + if (RTI%nlive(i_cluster) + RTI%nphantom(i_cluster) > 0) then + points_to_check = concat(RTI%live(settings%h0:settings%h1,:RTI%nlive(i_cluster),i_cluster),& + RTI%phantom(settings%h0:settings%h1,:RTI%nphantom(i_cluster),i_cluster)) + else + allocate(points_to_check(settings%nDims,0)) + end if + num_points = size(points_to_check,2) + + ! Calculate the covariance matrix + RTI%covmat(:,:,i_cluster) = calc_covmat(points_to_check, settings%wraparound) + + if (allocated(points_to_check)) deallocate(points_to_check) + ! Calculate the cholesky decomposition - RTI%cholesky(:,:,i_cluster) = calc_cholesky(RTI%covmat(:,:,i_cluster)) + ! Pass num_points to enable eigendecomposition-based regularization + ! for rank-deficient covariance matrices (when n < D+1) + RTI%cholesky(:,:,i_cluster) = calc_cholesky(RTI%covmat(:,:,i_cluster), num_points) end do diff --git a/src/polychord/utils.F90 b/src/polychord/utils.F90 index e6fc74ef..eef95d29 100644 --- a/src/polychord/utils.F90 +++ b/src/polychord/utils.F90 @@ -1,5 +1,6 @@ module utils_module + use, intrinsic :: ieee_arithmetic implicit none #ifdef MPI include 'mpif.h' @@ -86,6 +87,28 @@ module utils_module ! which means that we neglect all terms smaller than eps times the ! current sum + !> Minimum variance for regularization of covariance matrices + !! Used when clusters have insufficient points or near-zero variance + real(dp), parameter :: min_variance = 1.d-20 + + !> Relative threshold for detecting zero eigenvalues in regularization + real(dp), parameter :: eigenvalue_rel_threshold = 1.d-10 + !> Absolute floor threshold for eigenvalues (prevents issues when max eigenvalue is tiny) + real(dp), parameter :: eigenvalue_abs_threshold = 1.d-20 + +#ifdef HAVE_LAPACK + !> LAPACK interface for symmetric eigenvalue decomposition + interface + subroutine dsyev(jobz, uplo, n, a, lda, w, work, lwork, info) + import :: dp + character, intent(in) :: jobz, uplo + integer, intent(in) :: n, lda, lwork + integer, intent(out) :: info + real(dp), intent(inout) :: a(lda,*) + real(dp), intent(out) :: w(*), work(*) + end subroutine dsyev + end interface +#endif integer,parameter :: flag_blank = -2 integer,parameter :: flag_gestating = -1 @@ -629,11 +652,45 @@ function abovetol (change,current_sum) end function abovetol - function calc_cholesky(a) result(L) + !> Compute Cholesky decomposition of a covariance matrix + !! + !! If n_points is provided and n_points < nDims + 1, the covariance matrix + !! is rank-deficient. In this case, we first apply eigendecomposition-based + !! regularization to make it positive-definite before computing Cholesky. + !! + !! @param[in] a Input covariance matrix + !! @param[in] n_points Optional: number of points used to compute covariance + !! @return L Lower-triangular Cholesky factor + function calc_cholesky(a, n_points) result(L) implicit none real(dp), intent(in),dimension(:,:) :: a + integer, intent(in), optional :: n_points + real(dp), dimension(size(a,1),size(a,2)) :: a_reg real(dp), dimension(size(a,1),size(a,2)) :: L - integer :: i,j + integer :: i,j, nDims + logical :: needs_regularization + + nDims = size(a, 1) + + ! Determine if eigendecomposition-based regularization is needed + ! This is required when the covariance matrix is rank-deficient (n < D+1) + needs_regularization = .false. + if (present(n_points)) then + if (n_points < nDims + 1) then + needs_regularization = .true. + end if + end if + + if (needs_regularization) then + ! Use eigendecomposition-based regularization for rank-deficient matrices + ! This replaces zero eigenvalues with 1.0 (unit variance in null-space) + a_reg = regularize_covmat(a) + else + ! === FIX 3: Pre-regularize the matrix to avoid numerical issues === + ! Add min_variance to diagonal before attempting decomposition + ! This prevents failure for near-singular matrices from tight clusters + a_reg = a + identity_matrix(nDims) * min_variance + end if ! Set it all to zero to begin with L = 0 @@ -641,24 +698,331 @@ function calc_cholesky(a) result(L) ! Zero out the upper half do i=1,size(a,1) - L(i,i)= a(i,i) - sum(L(i,:i-1)**2) + L(i,i)= a_reg(i,i) - sum(L(i,:i-1)**2) if (L(i,i).le.0d0) then ! If the cholesky decomposition does not exist, then set it to - ! be a re-scaled identity matrix - L = identity_matrix(size(a,1)) * sqrt(trace(a)) + ! be a re-scaled identity matrix (original PolyChord behavior) + L = identity_matrix(nDims) * sqrt(trace(a_reg)) return else L(i,i)=sqrt(L(i,i)) end if - do j=i+1,size(a,1) - L(j,i) = (a(i,j) - sum(L(i,:i-1)*L(j,:i-1)))/L(i,i) + do j=i+1,size(a_reg,1) + L(j,i) = (a_reg(i,j) - sum(L(i,:i-1)*L(j,:i-1)))/L(i,i) end do end do end function calc_cholesky + !> Compute eigenvalues and eigenvectors of a symmetric matrix using Jacobi method + !! + !! This is a pure-Fortran fallback for systems without LAPACK. + !! Uses the cyclic Jacobi algorithm which iteratively applies Givens rotations + !! to diagonalize the matrix. + !! + !! @param[inout] a On entry: symmetric matrix. On exit: destroyed. + !! @param[out] w Eigenvalues in ascending order + !! @param[out] v Eigenvectors as columns (v(:,i) is eigenvector for w(i)) + !! @param[out] info 0 on success, >0 if failed to converge + subroutine jacobi_eigen(a, w, v, info) + implicit none + real(dp), intent(inout), dimension(:,:) :: a + real(dp), intent(out), dimension(:) :: w + real(dp), intent(out), dimension(:,:) :: v + integer, intent(out) :: info + + integer :: n, i, j, k, p, q, sweep + real(dp) :: threshold, off_diag, c, s, t, tau, h, g, temp + real(dp) :: tol, sum_off + integer, parameter :: max_sweeps = 50 + + n = size(a, 1) + info = 0 + tol = 1.0d-15 + + ! Initialize eigenvector matrix to identity + v = 0.0d0 + do i = 1, n + v(i,i) = 1.0d0 + end do + + ! Main Jacobi iteration + do sweep = 1, max_sweeps + ! Compute sum of off-diagonal elements + sum_off = 0.0d0 + do i = 1, n-1 + do j = i+1, n + sum_off = sum_off + abs(a(i,j)) + end do + end do + + ! Check for convergence + if (sum_off < tol * n * n) exit + + ! Threshold for this sweep + if (sweep < 4) then + threshold = 0.2d0 * sum_off / (n * n) + else + threshold = 0.0d0 + end if + + ! Sweep through all off-diagonal elements + do p = 1, n-1 + do q = p+1, n + off_diag = abs(a(p,q)) + + ! Skip if element is small enough + if (sweep > 4 .and. off_diag < tol * abs(a(p,p)) .and. & + off_diag < tol * abs(a(q,q))) then + a(p,q) = 0.0d0 + a(q,p) = 0.0d0 + cycle + end if + + if (off_diag > threshold) then + h = a(q,q) - a(p,p) + + if (abs(h) < tol * off_diag) then + t = 1.0d0 + if (a(p,q) < 0.0d0) t = -1.0d0 + else + tau = h / (2.0d0 * a(p,q)) + if (tau >= 0.0d0) then + t = 1.0d0 / (tau + sqrt(1.0d0 + tau*tau)) + else + t = -1.0d0 / (-tau + sqrt(1.0d0 + tau*tau)) + end if + end if + + c = 1.0d0 / sqrt(1.0d0 + t*t) + s = t * c + tau = s / (1.0d0 + c) + h = t * a(p,q) + + ! Update diagonal elements + a(p,p) = a(p,p) - h + a(q,q) = a(q,q) + h + a(p,q) = 0.0d0 + a(q,p) = 0.0d0 + + ! Update rest of row/column p and q + do k = 1, p-1 + g = a(k,p) + h = a(k,q) + a(k,p) = g - s * (h + g * tau) + a(k,q) = h + s * (g - h * tau) + end do + do k = p+1, q-1 + g = a(p,k) + h = a(k,q) + a(p,k) = g - s * (h + g * tau) + a(k,q) = h + s * (g - h * tau) + end do + do k = q+1, n + g = a(p,k) + h = a(q,k) + a(p,k) = g - s * (h + g * tau) + a(q,k) = h + s * (g - h * tau) + end do + + ! Update eigenvector matrix + do k = 1, n + g = v(k,p) + h = v(k,q) + v(k,p) = g - s * (h + g * tau) + v(k,q) = h + s * (g - h * tau) + end do + end if + end do + end do + end do + + ! Check convergence + if (sweep > max_sweeps) then + info = 1 + write(*,'(A)') 'PolyChord WARNING (jacobi_eigen): Failed to converge' + end if + + ! Extract eigenvalues from diagonal + do i = 1, n + w(i) = a(i,i) + end do + + ! Sort eigenvalues in ascending order (simple bubble sort - n is small) + do i = 1, n-1 + do j = i+1, n + if (w(j) < w(i)) then + ! Swap eigenvalues + temp = w(i) + w(i) = w(j) + w(j) = temp + ! Swap eigenvector columns + do k = 1, n + temp = v(k,i) + v(k,i) = v(k,j) + v(k,j) = temp + end do + end if + end do + end do + + end subroutine jacobi_eigen + + + !> Compute eigenvalues and eigenvectors of a symmetric matrix + !! + !! Uses LAPACK DSYEV if available, otherwise falls back to Jacobi method. + !! + !! @param[in] a_in Symmetric matrix (not modified) + !! @param[out] w Eigenvalues in ascending order + !! @param[out] v Eigenvectors as columns + !! @param[out] info 0 on success, >0 on failure + subroutine sym_eigen(a_in, w, v, info) + implicit none + real(dp), intent(in), dimension(:,:) :: a_in + real(dp), intent(out), dimension(:) :: w + real(dp), intent(out), dimension(:,:) :: v + integer, intent(out) :: info + + integer :: n, lwork + real(dp), allocatable :: work(:) +#ifdef HAVE_LAPACK + real(dp), allocatable :: a_copy(:,:) +#endif + + n = size(a_in, 1) + +#ifdef HAVE_LAPACK + ! Use LAPACK DSYEV + allocate(a_copy(n,n)) + a_copy = a_in + + ! Query optimal workspace size + allocate(work(1)) + lwork = -1 + call dsyev('V', 'U', n, a_copy, n, w, work, lwork, info) + lwork = int(work(1)) + deallocate(work) + allocate(work(lwork)) + + ! Compute eigenvalues and eigenvectors + call dsyev('V', 'U', n, a_copy, n, w, work, lwork, info) + + ! DSYEV stores eigenvectors in a_copy + v = a_copy + + deallocate(work, a_copy) +#else + ! Use pure-Fortran Jacobi fallback + ! jacobi_eigen destroys the input matrix, so we work on a copy + ! and store eigenvectors separately + allocate(work(n*n)) + work = reshape(a_in, [n*n]) + v = reshape(work, [n,n]) ! v is used as working copy for matrix + deallocate(work) + + ! jacobi_eigen expects separate output for eigenvectors + block + real(dp), dimension(n,n) :: a_work, v_work + a_work = a_in + call jacobi_eigen(a_work, w, v_work, info) + v = v_work + end block +#endif + + end subroutine sym_eigen + + + !> Regularize a covariance matrix to ensure positive-definiteness + !! + !! For rank-deficient covariance matrices (from clusters with n < D+1 points), + !! this function replaces zero eigenvalues with 1.0 (unit variance in null-space). + !! + !! Algorithm: + !! 1. Eigendecomposition: Σ = Q Λ Q^T + !! 2. Replace eigenvalues below threshold with 1.0 + !! 3. Reconstruct: Σ_reg = Q Λ_reg Q^T + !! + !! @param[in] covmat Input covariance matrix (may be singular) + !! @return covmat_reg Regularized positive-definite covariance matrix + function regularize_covmat(covmat) result(covmat_reg) + implicit none + real(dp), intent(in), dimension(:,:) :: covmat + real(dp), dimension(size(covmat,1), size(covmat,2)) :: covmat_reg + + integer :: n, i, j, info + real(dp), allocatable :: w(:), v(:,:), w_reg(:) + real(dp) :: max_eigenvalue, threshold + + n = size(covmat, 1) + allocate(w(n), v(n,n), w_reg(n)) + + ! Step 1: Eigendecomposition + call sym_eigen(covmat, w, v, info) + + if (info /= 0) then + write(*,'(A,I3)') 'PolyChord WARNING (regularize_covmat): Eigendecomposition failed with info=', info + write(*,'(A)') ' Returning identity matrix' + covmat_reg = identity_matrix(n) + deallocate(w, v, w_reg) + return + end if + + ! Check for NaN in eigenvalues + if (any(ieee_is_nan(w))) then + write(*,'(A)') 'PolyChord WARNING (regularize_covmat): NaN in eigenvalues, returning identity matrix' + covmat_reg = identity_matrix(n) + deallocate(w, v, w_reg) + return + end if + + ! Step 2: Handle negative eigenvalues (numerical error) and compute threshold + w_reg = max(0.0d0, w) + max_eigenvalue = maxval(w_reg) + + ! Hybrid threshold: relative + absolute floor + threshold = max(max_eigenvalue * eigenvalue_rel_threshold, eigenvalue_abs_threshold) + + ! Step 3: Replace small/zero eigenvalues with 1.0 (unit variance in null-space) + ! Also handle rank-0 case (all eigenvalues near zero) + if (max_eigenvalue < eigenvalue_abs_threshold) then + ! All eigenvalues effectively zero - return identity + write(*,'(A)') 'PolyChord INFO (regularize_covmat): All eigenvalues near zero, using identity matrix' + covmat_reg = identity_matrix(n) + deallocate(w, v, w_reg) + return + end if + + do i = 1, n + if (w_reg(i) < threshold) then + w_reg(i) = 1.0d0 + end if + end do + + ! Step 4: Reconstruct covariance matrix: Σ_reg = V * diag(w_reg) * V^T + ! Use efficient matmul instead of nested loops for BLAS optimization + block + real(dp), dimension(n,n) :: temp_mat + ! Calculate V * Lambda (scale columns of V by eigenvalues) + do j = 1, n + temp_mat(:,j) = v(:,j) * w_reg(j) + end do + ! Calculate (V * Lambda) * V^T + covmat_reg = matmul(temp_mat, transpose(v)) + end block + + ! Sanity check + if (any(ieee_is_nan(covmat_reg))) then + write(*,'(A)') 'PolyChord WARNING (regularize_covmat): NaN in reconstructed matrix, returning identity' + covmat_reg = identity_matrix(n) + end if + + deallocate(w, v, w_reg) + + end function regularize_covmat + function calc_covmat(x,wraparound) result(covmat) implicit none real(dp), intent(in), dimension(:,:) :: x @@ -669,23 +1033,49 @@ function calc_covmat(x,wraparound) result(covmat) real(dp), dimension(size(x,1),size(x,2)) :: dx real(dp), dimension(size(x,1)) :: mu, circle_mu - integer :: nDims,n + integer :: nDims,n,i + real(dp) :: sum_s, sum_c ! For wraparound checks nDims = size(x,1) n = size(x,2) - ! Compute the circle mean + ! Handle insufficient points (n < 2) + if (n < 2) then + ! Return unit identity matrix to allow local exploration + ! This is mathematically sound: represents zero correlation and unit variance + covmat = identity_matrix(nDims) + return + endif + + ! === FIX 2: Wraparound atan2(0,0) protection === + ! Compute the circle mean with protection against atan2(0,0) circle_mu = 0d0 - where(wraparound) circle_mu = atan2(sum(sin(x*TwoPi),dim=2),sum(cos(x*TwoPi),dim=2))/TwoPi + if (any(wraparound)) then + do i = 1, nDims + if (wraparound(i)) then + sum_s = sum(sin(x(i,:)*TwoPi)) + sum_c = sum(cos(x(i,:)*TwoPi)) + ! Check if magnitude is effectively zero (all points identical or antipodal) + if (sqrt(sum_s**2 + sum_c**2) < 1.d-15) then + ! Mean is undefined, use first point's value + circle_mu(i) = x(i,1) + else + ! Normal case: compute angle + circle_mu(i) = atan2(sum_s, sum_c) / TwoPi + endif + endif + end do + endif - ! Compute the mean - dx = x - spread(circle_mu,dim=2,ncopies=n) + ! Compute the mean + dx = x - spread(circle_mu,dim=2,ncopies=n) where(spread(wraparound,dim=2,ncopies=n)) dx = dx - nint(dx) mu = modulo(sum(dx,dim=2)/n + circle_mu, 1d0) ! Compute the covariance matrix - dx = x - spread(mu,dim=2,ncopies=n) + dx = x - spread(mu,dim=2,ncopies=n) where(spread(wraparound,dim=2,ncopies=n)) dx = dx - nint(dx) + covmat = matmul(dx,transpose(dx))/(n-1) end function calc_covmat diff --git a/tests/test_fortran_format.py b/tests/test_fortran_format.py new file mode 100644 index 00000000..a4d8bc16 --- /dev/null +++ b/tests/test_fortran_format.py @@ -0,0 +1,109 @@ +"""Tests for the custom Fortran E24.15E3 and I12 formatters.""" + +import math +import numpy as np +import pytest +from pypolychord.polychord import _format_fortran_double, _format_fortran_int + + +class TestFormatFortranDouble: + """Test _format_fortran_double against Fortran E24.15E3 output.""" + + def test_field_width(self): + """Every formatted value should be exactly 24 characters.""" + values = [1.0, -1.0, 0.0, 1e-100, 1e+300, float('nan'), float('inf')] + for v in values: + result = _format_fortran_double(v) + assert len(result) == 24, f"Width {len(result)} for {v}: '{result}'" + + def test_positive_one(self): + result = _format_fortran_double(1.0) + assert result.strip() == '0.100000000000000E+001' + + def test_negative_pi(self): + result = _format_fortran_double(-3.14159265358979) + assert result.strip().startswith('-0.314159265358979') + assert 'E+' in result + + def test_zero(self): + result = _format_fortran_double(0.0) + assert result.strip() == '0.000000000000000E+000' + + def test_negative_zero(self): + result = _format_fortran_double(-0.0) + assert result.strip() == '-0.000000000000000E+000' + + def test_small_value(self): + result = _format_fortran_double(1e-100) + # Fortran 0-based: 1e-100 = 0.1e-99 + assert 'E-099' in result + assert result.strip().startswith('0.1000000000000') + + def test_large_value(self): + result = _format_fortran_double(1e+300) + assert 'E+301' in result + assert result.strip().startswith('0.1000000000000') + + def test_nan(self): + for nan_val in [float('nan'), np.float64('nan')]: + result = _format_fortran_double(nan_val) + assert len(result) == 24 + assert result.strip() == 'NaN' + + def test_inf(self): + result = _format_fortran_double(float('inf')) + assert len(result) == 24 + assert result.strip() == 'Infinity' + + def test_neg_inf(self): + result = _format_fortran_double(float('-inf')) + assert len(result) == 24 + assert result.strip() == '-Infinity' + + def test_logzero(self): + """Test the typical logzero value used in PolyChord.""" + result = _format_fortran_double(-1e30) + assert len(result) == 24 + assert result.strip().startswith('-0.1000000000000') + assert 'E+031' in result + + def test_numpy_float64(self): + """Ensure numpy float64 values work correctly.""" + result = _format_fortran_double(np.float64(2.5)) + assert len(result) == 24 + assert result.strip() == '0.250000000000000E+001' + + def test_normalization(self): + """Mantissa should always start with 0. (Fortran convention).""" + values = [1.0, 0.5, 123.456, 1e-10, 9.99] + for v in values: + result = _format_fortran_double(v).strip() + # After optional sign, should start with '0.' + if result.startswith('-'): + assert result[1:3] == '0.', f"Bad normalization for {v}: {result}" + else: + assert result[0:2] == '0.', f"Bad normalization for {v}: {result}" + + +class TestFormatFortranInt: + """Test _format_fortran_int against Fortran I12 output.""" + + def test_field_width(self): + result = _format_fortran_int(42) + assert len(result) == 12 + + def test_right_justified(self): + result = _format_fortran_int(5) + assert result == ' 5' + + def test_negative(self): + result = _format_fortran_int(-10) + assert result == ' -10' + + def test_zero(self): + result = _format_fortran_int(0) + assert result == ' 0' + + def test_numpy_int(self): + result = _format_fortran_int(np.int64(100)) + assert result == ' 100'