diff --git a/mcdc/.dockerignore b/mcdc/.dockerignore new file mode 100644 index 000000000..c64cfd509 --- /dev/null +++ b/mcdc/.dockerignore @@ -0,0 +1,164 @@ +# Git +.git +.github + +# Build artifacts +build +.eggs +mcdc.egg-info +dist/ + +# Material / cross-section data +mcdc_xs +MCDC-Xsec + +# Python cache +__pycache__ +*.pyc +*.nbc +*.nbi + +# GPU cache +__ptxcache__ +__harmonize_cache__ + +# Editor files +.spyproject +*.swp +.vscode/ +**/.idea + +# Virtual environments +.venv +venv +env + +# Output files +*output.h5 +output*.h5 +*.h5 +*.hdf5 +*.png +*.mp4 +*.gif + +# Cluster job files +*.out +*.pbs +*.log + +# Test cache +.pytest_cache +pytestdebug.log +.coverage +htmlcov + +# Profiler +*.prof + +# Documentation builds +docs/build +docs/source/pythonapi/generated/ +docs + +# Container artifacts +*.sif +*_sandbox +*.tar +containers/ + +# Misc +tmp +tmp.py +tmp_* +*_tmp.py +*.core +.DS_Store +**/.DS_Store +# Git +.git +.github + +# Build artifacts +build +.eggs +mcdc.egg-info +dist/ + +# Material / cross-section data +mcdc_xs +MCDC-Xsec + +# Python cache +__pycache__ +*.pyc +*.nbc +*.nbi + +# GPU cache +__ptxcache__ +__harmonize_cache__ + +# Editor files +.spyproject +*.swp +.vscode/ +**/.idea + +# Virtual environments +.venv +venv +env + +# Output files +*output.h5 +output*.h5 +*.h5 +*.hdf5 +*.png +*.mp4 +*.gif + +# Cluster job files +*.out +*.pbs +*.log + +# Test cache +.pytest_cache +pytestdebug.log +.coverage +htmlcov + +# Profiler +*.prof + +# Documentation builds +docs/build +docs/source/pythonapi/generated/ +docs + +# Container artifacts +*.sif +*_sandbox +*.tar +containers/ + +# Misc +tmp +tmp.py +tmp_* +*_tmp.py +*.core +.DS_Store +**/.DS_Store +*.csv + +# Regression test data +dummy_nuclide.h5 +source_particles.h5 +*.csv + +# Regression test data +dummy_nuclide.h5 +source_particles.h5 \ No newline at end of file diff --git a/mcdc/.git-blame-ignore-revs b/mcdc/.git-blame-ignore-revs new file mode 100644 index 000000000..5ae64dab5 --- /dev/null +++ b/mcdc/.git-blame-ignore-revs @@ -0,0 +1,6 @@ +# Run code through autopep8 +482f6e642f4e7a4a2a6f82fe5f1dd739af6b61e8 + +# Run code through black +af5dbfc6d12716cd91155e8c85fa350b13c28d8e +0c3b4f8a61617f66956c71f206b961a5ced0d5ab diff --git a/mcdc/.github/ISSUE_TEMPLATE/bug_report.md b/mcdc/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..cf155c808 --- /dev/null +++ b/mcdc/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,39 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '[Bug] ' +labels: 'bug' +assignees: '' + +--- + + +# Describe the bug. + +A clear description of what the bug is, with code blocks wherever possible. + +# Steps to reproduce the bug. + +Steps to reproduce the behavior (ideally as a minimum example of where the failure is coming from with code blocks for error messages and inputs): +1. Outline what you did. + +## What inputs and outputs are involved? + +If applicable, add what you ran or received to help explain your problem. +If there are relevant configuration files, please provide those as well. + +## Are there additional replication details? + + - Operating System: + - Software Version: + - Python Version: + +# What is the expected behavior? + +A clear description of what you expected to happen. + +# Additional context. + +Add any other context about the problem here. + +# How can this issue be closed? diff --git a/mcdc/.github/ISSUE_TEMPLATE/feature_request.md b/mcdc/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..92ef27527 --- /dev/null +++ b/mcdc/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,27 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '[Feature] ' +labels: 'enhancement' +assignees: '' + +--- + + +# Is your feature request related to a problem? Please describe. + +Describe the problem here. + +# Describe the feature you'd like. + +A clear description of what you want to happen. + +## Describe alternatives you've considered. + +A clear description of any alternative solutions or features you've considered. + +# Additional context. + +Add any other context or code blocks about the feature request here. + +# How can this issue be closed? diff --git a/mcdc/.github/ISSUE_TEMPLATE/general.md b/mcdc/.github/ISSUE_TEMPLATE/general.md new file mode 100644 index 000000000..fb9c0f3fe --- /dev/null +++ b/mcdc/.github/ISSUE_TEMPLATE/general.md @@ -0,0 +1,12 @@ +--- +name: Generic issue +about: Bare template for general needs +title: '' +labels: '' +assignees: '' + +--- + +# Description + +# How can this issue be closed? diff --git a/mcdc/.github/ISSUE_TEMPLATE/question.md b/mcdc/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 000000000..79b470833 --- /dev/null +++ b/mcdc/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,25 @@ +--- +name: Question or Discussion +about: Ask a question or start a discussion about this project +title: "[Question] " +labels: 'question' +assignees: '' + +--- +- + +# Question or discussion topic. + +Please describe your question or the topic you want to discuss. + +## Provide details. + +Provide any relevant details, context, or background information that might help others +understand your question or discussion topic. + +## External context or references. + +Add any other context, reference links, or screenshots about the question or discussion +topic here. + +# How can this issue be closed? diff --git a/mcdc/.github/pull_request_template.md b/mcdc/.github/pull_request_template.md new file mode 100644 index 000000000..6d61965d8 --- /dev/null +++ b/mcdc/.github/pull_request_template.md @@ -0,0 +1,41 @@ +## Summary of changes + + +## Types of changes + + +- [ ] Bug fix (non-breaking change which fixes an issue) +- [ ] New feature (non-breaking change which adds functionality) +- [ ] Breaking change (fix or feature that would cause existing functionality to change) +- [ ] Organization and beautification (changes which improve readability and/or accessibility) + +## Developer Checklist + + +- [ ] I have read the [contributing guide](https://mcdc.readthedocs.io/en/latest/contribution/index.html). +- [ ] My code follows the [code style](https://mcdc.readthedocs.io/en/latest/contribution/index.html#code-styling) of this project. +- [ ] I have updated the documentation accordingly. +- [ ] I have added tests to cover my changes. +- [ ] All new and existing tests pass + +## Associated Issues and PRs + + +- closes # +- related to # + +## Associated Developers + + +- Dev: @ diff --git a/mcdc/.github/workflows/black_lint.yml b/mcdc/.github/workflows/black_lint.yml new file mode 100644 index 000000000..366d1174c --- /dev/null +++ b/mcdc/.github/workflows/black_lint.yml @@ -0,0 +1,28 @@ +name: Black Style Check + +on: + push: + pull_request: + +jobs: + black-lint: + name: Lint with Black + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.14" + cache: pip + + - name: Install Black + run: | + pip install --upgrade pip + pip install black + + - name: Run Black (check mode) + run: black --check . diff --git a/mcdc/.github/workflows/docker.yml b/mcdc/.github/workflows/docker.yml new file mode 100644 index 000000000..73c1906e9 --- /dev/null +++ b/mcdc/.github/workflows/docker.yml @@ -0,0 +1,202 @@ +# ============================================================================= +# Docker Build, Test, and Publish +# ============================================================================= +# Builds the CPU Docker image, runs the full test suite inside it, +# and publishes to GHCR on pushes to main/dev (i.e., after PR merge). +# +# Publish trigger logic: +# - PRs → build + test only (no publish) +# - Push to dev/main → build + test + publish +# When a PR is merged, GitHub fires a 'push' event on the target branch, +# which naturally triggers the publish path. +# ============================================================================= + +name: Docker Build, Test, and Publish + +on: + push: + branches: [dev, main] + pull_request: + branches: [dev, main] + +env: + REGISTRY: ghcr.io + IMAGE_NAME: mcdc-project/mcdc + +jobs: + + # --------------------------------------------------------------------------- + # Stage 1: Build the image + # --------------------------------------------------------------------------- + build: + name: Build Docker image + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Determine image tag + run: | + if [ "${{ github.event_name }}" == "pull_request" ]; then + echo "IMAGE_TAG=${{ github.base_ref }}" >> $GITHUB_ENV + else + echo "IMAGE_TAG=${{ github.ref_name }}" >> $GITHUB_ENV + fi + + - name: Build Docker image + run: | + docker build -f containers/Dockerfile -t mcdc:${{ env.IMAGE_TAG }} . + + - name: Save image as artifact + run: | + docker save mcdc:${{ env.IMAGE_TAG }} -o /tmp/mcdc-image.tar + + - name: Upload image artifact + uses: actions/upload-artifact@v4 + with: + name: mcdc-image + path: /tmp/mcdc-image.tar + retention-days: 1 + + # --------------------------------------------------------------------------- + # Stage 2: Test suite (runs in parallel after build) + # --------------------------------------------------------------------------- + unit-test: + name: Unit tests (Docker) + needs: build + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Determine image tag + run: | + if [ "${{ github.event_name }}" == "pull_request" ]; then + echo "IMAGE_TAG=${{ github.base_ref }}" >> $GITHUB_ENV + else + echo "IMAGE_TAG=${{ github.ref_name }}" >> $GITHUB_ENV + fi + + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: mcdc-image + path: /tmp + + - name: Load Docker image + run: docker load -i /tmp/mcdc-image.tar + + - name: Verify MC/DC import + run: | + docker run --rm mcdc:${{ env.IMAGE_TAG }} python -c "import mcdc; print('MC/DC import OK')" + + - name: Run unit tests + run: | + docker run --rm \ + -v ${{ github.workspace }}:/opt/mcdc \ + -w /opt/mcdc/test/unit \ + mcdc:${{ env.IMAGE_TAG }} python run.py + + regression-test: + name: Regression – ${{ matrix.mode }} + needs: build + runs-on: ubuntu-latest + timeout-minutes: ${{ matrix.timeout }} + strategy: + fail-fast: false + matrix: + mode: + - Python-Serial + - Python-MPI + - Numba-Serial + - Numba-MPI + include: + - mode: Python-Serial + run_cmd: "python run.py" + timeout: 10 + - mode: Python-MPI + run_cmd: | + python run.py --mpiexec=4 + python run.py --mpiexec=16 --name=slab_reed_dd_3d + timeout: 20 + - mode: Numba-Serial + run_cmd: "python run.py --mode numba" + timeout: 120 + - mode: Numba-MPI + run_cmd: | + python run.py --mode numba --mpiexec=4 + python run.py --mode numba --mpiexec=16 --name=slab_reed_dd_3d + timeout: 120 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Determine image tag + run: | + if [ "${{ github.event_name }}" == "pull_request" ]; then + echo "IMAGE_TAG=${{ github.base_ref }}" >> $GITHUB_ENV + else + echo "IMAGE_TAG=${{ github.ref_name }}" >> $GITHUB_ENV + fi + + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: mcdc-image + path: /tmp + + - name: Load Docker image + run: docker load -i /tmp/mcdc-image.tar + + - name: Run regression tests (${{ matrix.mode }}) + run: | + docker run --rm \ + -v ${{ github.workspace }}:/opt/mcdc \ + -w /opt/mcdc/test/regression \ + mcdc:${{ env.IMAGE_TAG }} bash -c '${{ matrix.run_cmd }}' + + # --------------------------------------------------------------------------- + # Stage 3: Publish (only on push to dev/main, after all tests pass) + # --------------------------------------------------------------------------- + publish: + name: Publish to GHCR + needs: [unit-test, regression-test] + if: github.event_name == 'push' + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + + steps: + - name: Download image artifact + uses: actions/download-artifact@v4 + with: + name: mcdc-image + path: /tmp + + - name: Load Docker image + run: docker load -i /tmp/mcdc-image.tar + + - name: Login to GitHub Container Registry + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev') + uses: docker/login-action@v3 + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Push to registry + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev') + run: | + BRANCH=${{ github.ref_name }} + DATE=$(date +%Y-%m-%d) + FULL_IMAGE=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + + docker tag mcdc:${BRANCH} ${FULL_IMAGE}:${BRANCH} + docker tag mcdc:${BRANCH} ${FULL_IMAGE}:${BRANCH}-${DATE} + + docker push ${FULL_IMAGE}:${BRANCH} + docker push ${FULL_IMAGE}:${BRANCH}-${DATE} + + echo "Pushed: ${{ env.BRANCH_NAME }} and ${{ env.BRANCH_NAME }}-${DATE}" diff --git a/mcdc/.github/workflows/docs_test.yml b/mcdc/.github/workflows/docs_test.yml new file mode 100644 index 000000000..24e1ca536 --- /dev/null +++ b/mcdc/.github/workflows/docs_test.yml @@ -0,0 +1,42 @@ +name: Test Build the Docs + +on: + push: + pull_request: + workflow_dispatch: + +jobs: + build-docs: + runs-on: "ubuntu-latest" + timeout-minutes: 60 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python 3.13 + uses: actions/setup-python@v5 + with: + python-version: "3.13" + cache: pip + + - name: Set up MPI + uses: mpi4py/setup-mpi@v1 + + - name: Install mpi4py + run: | + pip install --upgrade pip + pip install --no-binary=mpi4py mpi4py + + - name: Install docs dependencies + run: | + python -m pip install --upgrade pip + # If you have extras for docs, use this: + python -m pip install ".[docs]" + # Otherwise, something like: + # python -m pip install . sphinx sphinx-autodoc-typehints ... + python -m pip list + + - name: Build the docs + working-directory: docs + run: make html \ No newline at end of file diff --git a/mcdc/.github/workflows/draft-pdf.yml b/mcdc/.github/workflows/draft-pdf.yml new file mode 100644 index 000000000..ed90402da --- /dev/null +++ b/mcdc/.github/workflows/draft-pdf.yml @@ -0,0 +1,26 @@ +name: Build JOSS paper, only on dispatch + +on: + workflow_dispatch: + +jobs: + paper: + runs-on: ubuntu-latest + name: Paper Draft + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Build draft PDF + uses: openjournals/openjournals-draft-action@master + with: + journal: joss + # This should be the path to the paper within your repo. + paper-path: docs/paper.md + - name: Upload + uses: actions/upload-artifact@v1 + with: + name: paper + # This is the output path where Pandoc will write the compiled + # PDF. Note, this should be the same directory as the input + # paper.md + path: docs/paper.pdf diff --git a/mcdc/.github/workflows/manual_publish.yml b/mcdc/.github/workflows/manual_publish.yml new file mode 100644 index 000000000..b24b01755 --- /dev/null +++ b/mcdc/.github/workflows/manual_publish.yml @@ -0,0 +1,47 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# assumming this is being done in a repo with trusted publishing permissions in pypi + +# There is an automated runner which should upload a new PyPi package but it often fails due to API BS. +# This is here so one doesn't have to republish everything + +name: manually publish pypi package (for touble shooting) + +on: + workflow_dispatch: + +permissions: + contents: read + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + id-token: write + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.13 + uses: actions/setup-python@v3 + with: + python-version: '3.13' + - uses: mpi4py/setup-mpi@v1 + - name: Install dependencies + run: | + python --version + python -m pip install --upgrade pip + pip install . + pip install build + - name: Build package + run: python -m build + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + + #with: + # user: __token__ + # password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/mcdc/.github/workflows/python-publish.yml b/mcdc/.github/workflows/python-publish.yml new file mode 100644 index 000000000..1fea89a1e --- /dev/null +++ b/mcdc/.github/workflows/python-publish.yml @@ -0,0 +1,45 @@ +# This workflow will upload a Python Package using Twine when a release is created +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# assumming this is being done in a repo with trusted publishing permissions in pypi + +name: Upload Python Package + +on: + release: + types: [published] + +permissions: + contents: read + +jobs: + deploy: + runs-on: ubuntu-latest + permissions: + id-token: write + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.13 + uses: actions/setup-python@v3 + with: + python-version: '3.13' + - uses: mpi4py/setup-mpi@v1 + - name: Install dependencies + run: | + python --version + python -m pip install --upgrade pip + pip install . + pip install build + - name: Build package + run: python -m build + - name: Publish package distributions to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + + #with: + # user: __token__ + # password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/mcdc/.github/workflows/regression_test-gpu.yml b/mcdc/.github/workflows/regression_test-gpu.yml new file mode 100644 index 000000000..7fc4d9ce5 --- /dev/null +++ b/mcdc/.github/workflows/regression_test-gpu.yml @@ -0,0 +1,37 @@ +name: GPU regression test + +on: + workflow_dispatch: + +jobs: + build: + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + - name: Load modules and build python venv + run: | + pwd + cd .. + pwd + module load cuda/11.8 gcc/10.3 mpich/4.0h_gcc-10 python/3.13 + rm -rf ci_testing + python -m venv ci_testing + module unload python/3.13 + source ci_testing/bin/activate + pip install --upgrade pip + pwd + - name: Install MC/DC and Harmonize + run: | + source ../ci_testing/bin/activate + pwd + pip install -e . + git clone https://github.com/CEMeNT-PSAAP/harmonize.git + cd harmonize + pip install -e . + cd .. + - name: Regression Test - GPU + run: | + pwd + source ../ci_testing/bin/activate + cd test/regression + python run.py --mode=numba --target=gpu diff --git a/mcdc/.github/workflows/regression_test.yml b/mcdc/.github/workflows/regression_test.yml new file mode 100644 index 000000000..8c1904854 --- /dev/null +++ b/mcdc/.github/workflows/regression_test.yml @@ -0,0 +1,86 @@ +name: Regression Tests + +on: + push: + pull_request: + +jobs: + regression-test: + name: ${{ matrix.mode }} (py${{ matrix.python-version }}) + runs-on: ubuntu-latest + timeout-minutes: ${{ matrix.timeout }} + + strategy: + fail-fast: false + matrix: + python-version: ['3.10', '3.13'] + mode: + - Python-Serial + - Python-MPI + - Numba-Serial + - Numba-MPI + include: + - mode: Python-Serial + timeout: 10 + - mode: Python-MPI + timeout: 20 + - mode: Numba-Serial + timeout: 120 + - mode: Numba-MPI + timeout: 120 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + + - name: Show environment info + run: | + pwd + ls + python --version + pip --version + + - name: Set up MPI + uses: mpi4py/setup-mpi@v1 + + - name: Install mpi4py + run: | + pip install --upgrade pip + pip install --no-binary=mpi4py mpi4py + + - name: Install MC/DC + run: | + pip install -e ".[dev]" + pip list + + - name: Run regression tests (${{ matrix.mode }}) + if: matrix.mode == 'Python-Serial' + working-directory: test/regression + run: | + python run.py + + - name: Run regression tests (${{ matrix.mode }}) + if: matrix.mode == 'Python-MPI' + working-directory: test/regression + run: | + python run.py --mpiexec=4 + python run.py --mpiexec=16 --name=slab_reed_dd_3d + + - name: Run regression tests (${{ matrix.mode }}) + if: matrix.mode == 'Numba-Serial' + working-directory: test/regression + run: | + python run.py --mode numba + + - name: Run regression tests (${{ matrix.mode }}) + if: matrix.mode == 'Numba-MPI' + working-directory: test/regression + run: | + python run.py --mode numba --mpiexec=4 + python run.py --mode numba --mpiexec=16 --name=slab_reed_dd_3d diff --git a/mcdc/.github/workflows/unit_test.yml b/mcdc/.github/workflows/unit_test.yml new file mode 100644 index 000000000..c46a73a4b --- /dev/null +++ b/mcdc/.github/workflows/unit_test.yml @@ -0,0 +1,50 @@ +name: Unit Tests + +on: + push: + pull_request: + +jobs: + unit-test: + name: (py${{ matrix.python-version }}) + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + python-version: ['3.10', '3.13'] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + cache: pip + + - name: Show environment info + run: | + pwd + ls + python --version + pip --version + + - name: Set up MPI + uses: mpi4py/setup-mpi@v1 + + - name: Install mpi4py + run: | + pip install --upgrade pip + pip install --no-binary=mpi4py mpi4py + + - name: Install MC/DC + run: | + pip install -e ".[dev]" + pip list + + - name: Run unit tests + working-directory: test/unit + run: | + python run.py diff --git a/mcdc/.gitignore b/mcdc/.gitignore new file mode 100644 index 000000000..704b9fb04 --- /dev/null +++ b/mcdc/.gitignore @@ -0,0 +1,71 @@ +# setup.py +build +.eggs +mcdc.egg-info + +# Material Data +mcdc_xs +MCDC-Xsec + +# Python cache +__pycache__ +*.pyc +*.nbc +*.nbi + +# GPU cache +__ptxcache__ +__harmonize_cache__ + +# Editor +.spyproject +*.swp +.vscode/ + +# Output +*output.h5 +output*.h5 +*output.h5 +*.png +*.mp4 +*.gif + +# Ignore all png and giffiles except those in the docs/source/images directory +!docs/source/images/**/*.png +!docs/source/images/**/*.gif + +# Cluster job +*.out +*.pbs + +# Misc. +tmp +tmp.py +tmp_* +*_tmp.py +*.core +.DS_Store # for mac users +**/.DS_Store +**/.idea + +# test cache +.pytest_cache +pytestdebug.log + +# profiler +*.prof + +# Documentation builds +docs/build +docs/source/pythonapi/generated/ + +*.csv + +# Regression tests +dummy_nuclide.h5 +source_particles.h5 + +# Container artifacts +*.sif +*_sandbox +*.tar \ No newline at end of file diff --git a/mcdc/.pre-commit-config.yaml b/mcdc/.pre-commit-config.yaml new file mode 100644 index 000000000..036d92446 --- /dev/null +++ b/mcdc/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + # Using this mirror lets us use mypyc-compiled black, which is about 2x faster + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 26.1.0 + hooks: + - id: black + # It is recommended to specify the latest version of Python + # supported by your project here, or alternatively use + # pre-commit's default_language_version, see + # https://pre-commit.com/#top_level-default_language_version + language_version: python3.11 diff --git a/mcdc/.readthedocs.yaml b/mcdc/.readthedocs.yaml new file mode 100644 index 000000000..63499aaea --- /dev/null +++ b/mcdc/.readthedocs.yaml @@ -0,0 +1,28 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-20.04 + tools: + python: "3.10" + apt_packages: + - libopenmpi-dev + - openmpi-bin + +# Build documentation in the "docs/" directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: docs/requirements-rtd.txt + - method: pip + path: . \ No newline at end of file diff --git a/mcdc/CITATION.cff b/mcdc/CITATION.cff new file mode 100644 index 000000000..12c6fc359 --- /dev/null +++ b/mcdc/CITATION.cff @@ -0,0 +1,110 @@ +# This CITATION.cff file was generated with cffinit. +# Visit https://bit.ly/cffinit to generate yours today! + +cff-version: 1.2.0 +title: 'MC/DC: Monte Carlo Dynamic Code' +message: >- + a pure python high performance Monte Carlo neutronics + package +type: software +authors: + - name: >- + Center for Exascale Monte Carlo Neutron Transport + (CEMeNT) + website: 'https://cement-psaap.github.io/' + - given-names: Ilham + family-names: 'Variansyah ' + email: variansi@oregonstate.edu + affiliation: Oregon State University + orcid: 'https://orcid.org/0000-0003-3426-7160' + - given-names: Joanna Piper + family-names: Morgan + email: morgajoa@oregonstate.edu + affiliation: Oregon State University + orcid: 'https://orcid.org/0000-0003-1379-5431' + - given-names: Samuel + family-names: Pasmann + orcid: 'https://orcid.org/0000-0003-1391-1471' + - given-names: Kayla + family-names: Clements + email: clemekay@oregonstate.edu + affiliation: Oregon State University + orcid: 'https://orcid.org/0000-0003-3358-5618' + - given-names: Braxton + family-names: Cuneo + email: bcuneo@seattleu.edu + affiliation: Seattle University + orcid: 'https://orcid.org/0000-0002-6493-0990' + - given-names: Alexander + family-names: Mote + email: motea@oregonstate.edu + orcid: 'https://orcid.org/0000-0001-5099-0223' + affiliation: Oregon State University + - given-names: Caleb + family-names: Shaw + email: cashaw4@ncsu.edu + affiliation: North Carolina State University + - given-names: Jordan + family-names: Northrop + email: northj@oregonstate.edu + affiliation: Oregon State Universtiy + orcid: 'https://orcid.org/0000-0003-0420-9699' + - given-names: Rohan + family-names: Pankaj + orcid: 'https://orcid.org/0009-0005-0445-9323' + - given-names: 'Ryan G. ' + family-names: McClarren + email: rmcclarr@nd.edu + affiliation: University of Notre Dame + orcid: 'https://orcid.org/0000-0002-8342-6132' + - given-names: Todd S. + family-names: Palmer + email: palmerts@oregonstate.edu + affiliation: Oregon State Univeristy + orcid: 'https://orcid.org/0000-0003-3310-5258' + - given-names: Lizhong + family-names: Chen + email: chenliz@oregonstate.edu + affiliation: Oregon State University + orcid: 'https://orcid.org/0000-0001-5890-7121' + - given-names: Dmitriy Y. + family-names: Anistratov + email: anistratov@ncsu.edu + affiliation: North Carolina State University + - given-names: C. T. + family-names: Kelley + email: ctk@ncsu.edu + affiliation: North Carolina State University + - given-names: 'Camille ' + family-names: Palmer + email: palmecam@oregonstate.edu + orcid: 'https://orcid.org/0000-0002-7573-4215' + affiliation: Oregon State University + - given-names: Kyle E. + family-names: Niemeyer + email: niemeyek@oregonstate.edu + orcid: 'https://orcid.org/0000-0003-4425-7097' + affiliation: Oregon State University +identifiers: + - type: doi + value: 10.5281/zenodo.10576604 + description: Zenodo Archive + - type: doi + value: 10.21105/joss.06415 + description: Paper description of MC/DC +repository-code: 'https://github.com/CEMeNT-PSAAP/MCDC' +url: 'https://mcdc.readthedocs.io/en/latest/' +abstract: >- + MC/DC is a performant, scalable, and machine-portable + Python-based Monte Carlo neutron transport software + currently developed in the Center for Exascale Monte Carlo + Neutron Transport (CEMeNT). +keywords: + - monte carlo + - numba + - gpu + - neutron transport + - radiation transport +license: BSD-3-Clause +version: 0.9.1 +date-released: '2024-04-08' diff --git a/mcdc/CODE_OF_CONDUCT.md b/mcdc/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..58d55b4ca --- /dev/null +++ b/mcdc/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official email address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[INSERT CONTACT METHOD]. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations \ No newline at end of file diff --git a/mcdc/LICENSE b/mcdc/LICENSE new file mode 100644 index 000000000..2ad9c7aae --- /dev/null +++ b/mcdc/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2021, CEMeNT +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/mcdc/README.md b/mcdc/README.md new file mode 100644 index 000000000..d8c0413ec --- /dev/null +++ b/mcdc/README.md @@ -0,0 +1,32 @@ +# MC/DC: Monte Carlo Dynamic Code + +![mcdc_logo v1](https://user-images.githubusercontent.com/26186244/173467190-74d9b09a-ef7d-4f0e-8bdf-4a076de7c43c.svg) + +[![Build](https://github.com/CEMeNT-PSAAP/MCDC/actions/workflows/regression_test.yml/badge.svg)](https://github.com/CEMeNT-PSAAP/MCDC/actions/workflows/regression_test.yml) +[![DOI](https://joss.theoj.org/papers/10.21105/joss.06415/status.svg)](https://doi.org/10.21105/joss.06415) +[![ReadTheDocs](https://github.com/CEMeNT-PSAAP/MCDC/actions/workflows/docs_test.yml/badge.svg)](https://mcdc.readthedocs.org/en/dev/ ) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) + +MC/DC is a performant, scalable, and machine-portable Python-based Monte Carlo +neutron transport software, initiated by the Center for Exascale Monte Carlo +Neutron Transport ([CEMeNT](https://cement-psaap.github.io/)), and currently +in active development in the Center for Advancing the Radiation Resilience of +Electronics ([CARRE](https://carre-psaapiv.org)). + +## Documentation + +All detailed instructions and guides are hosted on [Read the Docs](https://mcdc.readthedocs.io/en/dev/). These include: +- [Installation](https://mcdc.readthedocs.io/en/dev/install.html), +- [User Guide](https://mcdc.readthedocs.io/en/dev/user/index.html), +- [API Reference](https://mcdc.readthedocs.io/en/dev/pythonapi/index.html), and +- [Contribution Guide](https://mcdc.readthedocs.io/en/dev/contribution/index.html). + +## Citing + +If you use MC/DC in your work and want to provide attribution, please cite the following as appropriate: +- **[MC/DC Origins]** I. Variansyah, et al. (2023). Development of MC/DC: a performant, scalable, and portable Python-based Monte Carlo neutron transport code. Proc. ANS M&C 2025, Niagara Falls, Canada. https://doi.org/10.48550/arXiv.2305.07636. +- **[MC/DC JOSS article]** J. Morgan, et al. (2024). Monte Carlo / Dynamic Code (MC/DC): An accelerated Python package for fully transient neutron transport and rapid methods development. Journal of Open Source Software, 9(96), 6415. https://doi.org/10.21105/joss.06415. + +## Reporting Bugs and Issues + +To report bugs or request new features, feel free to [open an Issue](https://github.com/CEMeNT-PSAAP/MCDC/issues). diff --git a/mcdc/containers/Dockerfile b/mcdc/containers/Dockerfile new file mode 100644 index 000000000..5d8776fd5 --- /dev/null +++ b/mcdc/containers/Dockerfile @@ -0,0 +1,30 @@ +FROM python:3.11-slim + +LABEL org.opencontainers.image.source="https://github.com/CEMeNT-PSAAP/MCDC" +LABEL org.opencontainers.image.description="MC/DC: Monte Carlo Dynamic Code" +LABEL org.opencontainers.image.licenses="BSD-3-Clause" + +# Suppress pip root user warning (safe inside containers) +ENV PIP_ROOT_USER_ACTION=ignore + +# Disable apt sandbox for rootless container compatibility (HPC systems) +RUN echo 'APT::Sandbox::User "root";' > /etc/apt/apt.conf.d/no-sandbox + +# MPICH is used instead of OpenMPI for broader HPC compatibility: +# - No openssh-client dependency (avoids chmod issues on some filesystems) +# - No root restrictions (runs as root without --allow-run-as-root) +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + g++ \ + libmpich-dev \ + mpich \ + git \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt/mcdc +COPY pyproject.toml . +RUN pip install --no-cache-dir mpi4py +COPY . . +RUN pip install --no-cache-dir -e ".[dev]" + +CMD ["bash"] \ No newline at end of file diff --git a/mcdc/containers/Dockerfile.cuda b/mcdc/containers/Dockerfile.cuda new file mode 100644 index 000000000..d8f4943c5 --- /dev/null +++ b/mcdc/containers/Dockerfile.cuda @@ -0,0 +1,52 @@ +FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 + +LABEL org.opencontainers.image.source="https://github.com/CEMeNT-PSAAP/MCDC" +LABEL org.opencontainers.image.description="MC/DC: Monte Carlo Dynamic Code (CUDA/GPU)" +LABEL org.opencontainers.image.licenses="BSD-3-Clause" + +ENV PIP_ROOT_USER_ACTION=ignore +ENV DEBIAN_FRONTEND=noninteractive + +# Disable apt sandbox for rootless container compatibility +RUN echo 'APT::Sandbox::User "root";' > /etc/apt/apt.conf.d/no-sandbox + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + g++ \ + libmpich-dev \ + mpich \ + git \ + wget \ + && rm -rf /var/lib/apt/lists/* + +# Install Miniforge (conda-forge default, no Anaconda TOS issues) +RUN wget -q https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O /tmp/miniforge.sh \ + && bash /tmp/miniforge.sh -b -p /opt/conda \ + && rm /tmp/miniforge.sh + +ENV PATH="/opt/conda/bin:$PATH" + +# Install Python environment (matching Harmonize prereqs: CUDA=11.8, Numba>=0.60.0) +RUN conda install -y \ + gxx \ + python=3.11 \ + numba \ + numpy \ + && conda clean -afy + +# Install mpi4py +RUN pip install --no-cache-dir mpi4py + +# Install Harmonize (NVIDIA GPU runtime for MC/DC) +RUN git clone https://github.com/CEMeNT-PSAAP/harmonize.git /opt/harmonize \ + && cd /opt/harmonize \ + && pip install --no-cache-dir -e . + +# Install MC/DC +WORKDIR /opt/mcdc +COPY pyproject.toml . +COPY . . +RUN pip install --no-cache-dir -e ".[dev]" + +CMD ["bash"] \ No newline at end of file diff --git a/mcdc/containers/Dockerfile.rocm b/mcdc/containers/Dockerfile.rocm new file mode 100644 index 000000000..952ae40c5 --- /dev/null +++ b/mcdc/containers/Dockerfile.rocm @@ -0,0 +1,53 @@ +FROM rocm/dev-ubuntu-22.04:6.0 + +LABEL org.opencontainers.image.source="https://github.com/CEMeNT-PSAAP/MCDC" +LABEL org.opencontainers.image.description="MC/DC: Monte Carlo Dynamic Code (ROCm/GPU)" +LABEL org.opencontainers.image.licenses="BSD-3-Clause" + +ENV PIP_ROOT_USER_ACTION=ignore +ENV DEBIAN_FRONTEND=noninteractive + +# Disable apt sandbox for rootless container compatibility +RUN echo 'APT::Sandbox::User "root";' > /etc/apt/apt.conf.d/no-sandbox + +# Install system dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + g++ \ + libmpich-dev \ + mpich \ + git \ + wget \ + && rm -rf /var/lib/apt/lists/* + +# Install Miniforge (conda-forge default, no Anaconda TOS issues) +RUN wget -q https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-Linux-x86_64.sh -O /tmp/miniforge.sh \ + && bash /tmp/miniforge.sh -b -p /opt/conda \ + && rm /tmp/miniforge.sh + +ENV PATH="/opt/conda/bin:$PATH" + +# Install Python environment (matching Harmonize prereqs: ROCm=6.0.0, Numba>=0.60.0) +RUN conda install -y \ + gxx \ + python=3.11 \ + numba \ + numpy \ + && conda clean -afy + +# Install mpi4py +RUN pip install --no-cache-dir mpi4py + +# Install Harmonize (AMD branch - GPU runtime for MC/DC) +RUN git clone https://github.com/CEMeNT-PSAAP/harmonize.git /opt/harmonize \ + && cd /opt/harmonize \ + && git switch amd_event_interop_revamp \ + && pip install --no-cache-dir -e . + +# Install MC/DC +WORKDIR /opt/mcdc +COPY pyproject.toml . +COPY . . +RUN pip install --no-cache-dir -e ".[dev]" + +CMD ["bash"] \ No newline at end of file diff --git a/mcdc/containers/docker-compose.yml b/mcdc/containers/docker-compose.yml new file mode 100644 index 000000000..43a7686a7 --- /dev/null +++ b/mcdc/containers/docker-compose.yml @@ -0,0 +1,41 @@ +services: + + # Development environment + # Usage: docker compose -f containers/docker-compose.yml run --rm dev bash + dev: + build: + context: .. + dockerfile: containers/Dockerfile + image: mcdc:dev + volumes: + # Mount local source code into container + # Any changes you make locally are immediately reflected inside + - ..:/opt/mcdc + working_dir: /opt/mcdc + stdin_open: true + tty: true + + # Run test suite + # Usage: docker compose -f containers/docker-compose.yml run --rm test + test: + build: + context: .. + dockerfile: containers/Dockerfile + image: mcdc:dev + volumes: + - ..:/opt/mcdc + working_dir: /opt/mcdc/test/unit + command: python run.py + + # Run with MPI + # Usage: docker compose -f containers/docker-compose.yml run --rm mpi mpirun -n 4 python input.py + mpi: + build: + context: .. + dockerfile: containers/Dockerfile + image: mcdc:dev + volumes: + - ..:/opt/mcdc + working_dir: /opt/mcdc + stdin_open: true + tty: true \ No newline at end of file diff --git a/mcdc/docs/Makefile b/mcdc/docs/Makefile new file mode 100644 index 000000000..8b6275ab8 --- /dev/null +++ b/mcdc/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= -W --keep-going +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/mcdc/docs/README.md b/mcdc/docs/README.md new file mode 100644 index 000000000..21284cd27 --- /dev/null +++ b/mcdc/docs/README.md @@ -0,0 +1,10 @@ +# MC/DC Documentation! + +To build these docs locally run the following: +```bash +conda install sphinx==7.2.6 +pip install furo sphinx_toolbox +make html +``` + +Then launch ``build/html/index.html`` with your browser of choice \ No newline at end of file diff --git a/mcdc/docs/make.bat b/mcdc/docs/make.bat new file mode 100644 index 000000000..747ffb7b3 --- /dev/null +++ b/mcdc/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/mcdc/docs/paper.bib b/mcdc/docs/paper.bib new file mode 100644 index 000000000..9956d4490 --- /dev/null +++ b/mcdc/docs/paper.bib @@ -0,0 +1,245 @@ +@inproceedings{lam_numba_2015, + address = {Austin Texas}, + title = {Numba: a {LLVM}-based {Python} {JIT} compiler}, + isbn = {978-1-4503-4005-2}, + shorttitle = {Numba}, + url = {https://dl.acm.org/doi/10.1145/2833157.2833162}, + doi = {10.1145/2833157.2833162}, + abstract = {Dynamic, interpreted languages, like Python, are attractive for domain-experts and scientists experimenting with new ideas. However, the performance of the interpreter is often a barrier when scaling to larger data sets. This paper presents a just-in-time compiler for Python that focuses in scientific and array-oriented computing. Starting with the simple syntax of Python, Numba compiles a subset of the language into efficient machine code that is comparable in performance to a traditional compiled language. In addition, we share our experience in building a JIT compiler using LLVM[1].}, + language = {en}, + urldate = {2023-11-03}, + booktitle = {Proceedings of the {Second} {Workshop} on the {LLVM} {Compiler} {Infrastructure} in {HPC}}, + publisher = {ACM}, + author = {Lam, Siu Kwan and Pitrou, Antoine and Seibert, Stanley}, + month = nov, + year = {2015}, + pages = {1--6}, + file = {numba.pdf:/Users/jonesy/Documents/PapersLibrary/numba.pdf:application/pdf}, +} + + +@article{mpi4py_2021, + title = {mpi4py: {Status} {Update} {After} 12 {Years} of {Development}}, + volume = {23}, + issn = {1521-9615, 1558-366X}, + shorttitle = {mpi4py}, + url = {https://ieeexplore.ieee.org/document/9439927/}, + doi = {10.1109/MCSE.2021.3083216}, + language = {en}, + number = {4}, + urldate = {2023-11-02}, + journal = {Computing in Science \& Engineering}, + author = {Dalcin, Lisandro and Fang, Yao-Lung L.}, + month = jul, + year = {2021}, + pages = {47--54}, + file = {mpi4py_Status_Update_After_12_Years_of_Development.pdf:/Users/jonesy/Documents/PapersLibrary/mpi4py_Status_Update_After_12_Years_of_Development.pdf:application/pdf}, +} + +@article{mcdc:qmc, + title="A Quasi-{M}onte {C}arlo Method with {K}rylov Linear Solvers for Multigroup Neutron Transport Simulations", + author="S. Pasmann and I. Variansyah and C. T. Kelley and R. McClarren", + journal="Nuclear Science and Engineering", + doi="10.1080/00295639.2022.2143704", + note="Published online Jan 12, 2023", + year=2023 +} + +@inproceedings{mcdc:clements_mc23, + Booktitle = {International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering}, + title = {Global Sensitivity Analysis in {M}onte {C}arlo Radiation Transport}, + Month = {8}, + year = {2023}, + doi = {10.48550/arXiv.2403.06106}, + author = {Kayla Clements and Gianluca Geraci and Aaron J Olson and Todd Palmer}, + address = {Niagara Falls, Ontario, Canada}, +} + + +@inproceedings{mcdc:qmcabs, + booktitle = {International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering}, + title="{iQMC}: Iterative Quasi-{Monte Carlo} with {K}rylov Linear Solvers for k-Eigenvalue Neutron Transport Simulations", + author="S. Pasmann and I. Variansyah and C. T. Kelley and R. McClarren", + doi = {10.48550/arXiv.2306.11600}, + year=2023, + address = {Niagara Falls, Ontario, Canada}, +} + +@inproceedings{mcdc:variansyah_physor22_pct, + Booktitle = {International Conference on Physics of Reactors}, + title = {Performance of Population Control Techniques in {M}onte {C}arlo Reactor Criticality Simulation}, + year = {2022}, + doi = {10.13182/physor22-37871}, + author = {Ilham Variansyah and Ryan G. McClarren}, + address = {Pittsburgh, Pennsylvania, USA}, +} + +@inproceedings{variansyah_mc23_ic, + Booktitle = {International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering}, + title = {An effective initial particle sampling technique for {M}onte {C}arlo reactor transient simulations}, + year = {2023}, + author = {Ilham Variansyah and Ryan G. McClarren}, + address = {Niagara Falls, Ontario, Canada}, + doi = {10.48550/arXiv.2305.07646} +} + +@inproceedings{variansyah_mc23_moving_object, + Booktitle = {International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering}, + title = {High-fidelity treatment for object movement in time-dependent {M}onte {C}arlo transport simulations}, + year = {2023}, + author = {Ilham Variansyah and Ryan G. McClarren}, + address = {Niagara Falls, Ontario, Canada}, + doi = {10.48550/arXiv.2305.07641} +} + +@article{mcdc:variansyah_nse22_pct, + author = {Ilham Variansyah and Ryan G McClarren}, + title = {Analysis of Population Control Techniques for Time-Dependent and Eigenvalue {M}onte {C}arlo Neutron Transport Calculations}, + journal = {Nuclear Science and Engineering}, + volume = {196:11}, + pages = {1280--1305}, + year = {2022}, + doi = {10.1080/00295639.2022.2091906} +} + +@article{mcdc:clements_variance_2024, + title = {A variance deconvolution estimator for efficient uncertainty quantification in {Monte} {Carlo} radiation transport applications}, + volume = {319}, + issn = {0022-4073}, + url = {https://www.sciencedirect.com/science/article/pii/S0022407324000657}, + doi = {10.1016/j.jqsrt.2024.108958}, + abstract = {Monte Carlo simulations are at the heart of many high-fidelity simulations and analyses for radiation transport systems. As is the case with any complex computational model, it is important to propagate sources of input uncertainty and characterize how they affect model output. Unfortunately, uncertainty quantification (UQ) is made difficult by the stochastic variability that Monte Carlo transport solvers introduce. The standard method to avoid corrupting the UQ statistics with the transport solver noise is to increase the number of particle histories, resulting in very high computational costs. In this contribution, we propose and analyze a sampling estimator based on the law of total variance to compute UQ variance even in the presence of residual noise from Monte Carlo transport calculations. We rigorously derive the statistical properties of the new variance estimator, compare its performance to that of the standard method, and demonstrate its use on neutral particle transport model problems involving both attenuation and scattering physics. We illustrate, both analytically and numerically, the estimator’s statistical performance as a function of available computational budget and the distribution of that budget between UQ samples and particle histories. We show analytically and corroborate numerically that the new estimator is unbiased, unlike the standard approach, and is more accurate and precise than the standard estimator for the same computational budget.}, + journal = {Journal of Quantitative Spectroscopy and Radiative Transfer}, + author = {Clements, Kayla B. and Geraci, Gianluca and Olson, Aaron J. and Palmer, Todd S.}, + year = {2024}, + keywords = {Monte Carlo radiation transport, Stochastic solvers, Uncertainty quantification}, + pages = {108958}, +} + +@inproceedings{variansyah_mc23_mcdc, + Booktitle = {International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering}, + title = {Development of {MC/DC}: a performant, scalable, and portable {P}ython-based {M}onte {C}arlo neutron transport code}, + year = {2023}, + author = {Ilham Variansyah and Joanna Piper Morgan and Jordan Northrop and Kyle E. Niemeyer and Ryan G. McClarren}, + address = {Niagara Falls, Ontario, Canada}, + doi = {10.48550/arXiv.2305.07636} +} + +@article{brax2023, + author = {Cuneo, Braxton and Bailey, Mike}, + title = {Divergence Reduction in {M}onte {C}arlo Neutron Transport with On-{GPU} Asynchronous Scheduling}, + year = {2023}, + publisher = {Association for Computing Machinery}, + address = {New York, NY, USA}, + issn = {1049-3301}, + url = {10.1145/3626957}, + doi = {10.1145/3626957}, + journal = {ACM Trans. Model. Comput. Simul.}, + month = {oct}, + keywords = {asynchronous, divergence, scheduling, GPGPU, GPU} +} + + +@book{lewis_computational_1984, + address = {New York, NY, USA}, + title = {Computational methods of neutron transport}, + publisher = {John Wiley and Sons, Inc.}, + author = {Lewis, Elmer Eugene and Miller, Warren F}, + url = {https://www.osti.gov/biblio/5538794}, + year = {1984}, +} + + +#shift +@article{shift, + title = {Continuous-energy {Monte} {Carlo} neutron transport on {GPUs} in the {Shift} code}, + volume = {128}, + issn = {03064549}, + url = {https://linkinghub.elsevier.com/retrieve/pii/S0306454919300167}, + doi = {10.1016/j.anucene.2019.01.012}, + abstract = {A continuous-energy Monte Carlo neutron transport solver executing on GPUs has been developed within the Shift code. Several algorithmic approaches are considered, including both history-based and event-based implementations. Unlike in previous work involving multigroup Monte Carlo transport, it is demonstrated that event-based algorithms significantly outperform a historybased approach for continuous-energy transport as a result of increased device occupancy and reduced thread divergence. Numerical results are presented for detailed full-core models of a small modular reactor (SMR), including a model containing depleted fuel materials. These results demonstrate the substantial gains in performance that are possible with the latest-generation of GPUs. On the depleted SMR core configuration, an NVIDIA P100 GPU with 56 streaming multiprocessors provides performance equivalent to 90 CPU cores, and the latest V100 GPU with 80 multiprocessors offers the performance of more than 150 CPU cores.}, + language = {en}, + urldate = {2023-11-03}, + journal = {Annals of Nuclear Energy}, + author = {Hamilton, Steven P. and Evans, Thomas M.}, + month = jun, + year = {2019}, + pages = {236--247}, + file = {1-s2.0-S0306454919300167-am.pdf:/Users/jonesy/Documents/PapersLibrary/1-s2.0-S0306454919300167-am.pdf:application/pdf}, +} + + +#mcatk +@article{mcatk, + title = {Monte {Carlo} {Application} {ToolKit} ({MCATK})}, + volume = {82}, + issn = {0306-4549}, + url = {https://www.sciencedirect.com/science/article/pii/S0306454914004472}, + doi = {10.1016/j.anucene.2014.08.047}, + abstract = {The Monte Carlo Application ToolKit (MCATK) is a component-based software library designed to build specialized applications and to provide new functionality for existing general purpose Monte Carlo radiation transport codes. We will describe MCATK and its capabilities along with presenting some verification and validations results.}, + journal = {Annals of Nuclear Energy}, + author = {Adams, Terry and Nolen, Steve and Sweezy, Jeremy and Zukaitis, Anthony and Campbell, Joann and Goorley, Tim and Greene, Simon and Aulwes, Rob}, + year = {2015}, + keywords = {Agile development, Component software, Monte Carlo particle transport, Parallel computing, Population control, Time-dependent}, + pages = {41--47}, + annote = {Joint International Conference on Supercomputing in Nuclear Applications and Monte Carlo 2013, SNA + MC 2013. Pluri- and Trans-disciplinarity, Towards New Modeling and Numerical Simulation Paradigms}, +} + + +# mcnp +@techreport{mcnp, + address = {Los Alamos, NM, USA}, + author = {Rising, Michael Evan and Armstrong, Jerawan Chudoung and Bolding, Simon R. and Brown, Forrest Brooks and Bull, Jeffrey S. and Burke, Timothy Patrick and Clark, Alexander Rich and Dixon, David A. and Forster, III, Robert Arthur and Giron, Jesse Frank and Grieve, Tristan Sumner and Hughes, III, Henry Grady and Josey, Colin James and Kulesza, Joel Aaron and Martz, Roger Lee and McCartney, Austin P. and McKinney, Gregg Walter and Mosher, Scott William and Pearson, Eric John and Solomon, Jr., Clell Jeffrey and Swaminarayan, Sriram and Sweezy, Jeremy Ed and Wilson, Stephen Christian and Zukaitis, Anthony J.}, + doi = {10.2172/1909545}, + institution = {Los Alamos National Laboratory}, + month = {January}, + number = {LA-UR-22-33103, Rev.~1}, + title = {{MCNP\textsuperscript{\textregistered} Code Version 6.3.0 Release Notes}}, + url = {https://www.osti.gov/biblio/1909545}, + year = {2023} +} + + + +@article{openmc, + title = {{OpenMC}: {A} state-of-the-art {Monte} {Carlo} code for research and development}, + volume = {82}, + issn = {0306-4549}, + url = {https://www.sciencedirect.com/science/article/pii/S030645491400379X}, + doi = {10.1016/j.anucene.2014.07.048}, + abstract = {This paper gives an overview of OpenMC, an open source Monte Carlo particle transport code recently developed at the Massachusetts Institute of Technology. OpenMC uses continuous-energy cross sections and a constructive solid geometry representation, enabling high-fidelity modeling of nuclear reactors and other systems. Modern, portable input/output file formats are used in OpenMC: XML for input, and HDF5 for output. High performance parallel algorithms in OpenMC have demonstrated near-linear scaling to over 100,000 processors on modern supercomputers. Other topics discussed in this paper include plotting, CMFD acceleration, variance reduction, eigenvalue calculations, and software development processes.}, + journal = {Annals of Nuclear Energy}, + author = {Romano, Paul K. and Horelik, Nicholas E. and Herman, Bryan R. and Nelson, Adam G. and Forget, Benoit and Smith, Kord}, + year = {2015}, + keywords = {HDF5, Monte Carlo, Neutron transport, OpenMC, Parallel, XML}, + pages = {90--97}, + annote = {Joint International Conference on Supercomputing in Nuclear Applications and Monte Carlo 2013, SNA + MC 2013. Pluri- and Trans-disciplinarity, Towards New Modeling and Numerical Simulation Paradigms}, +} + + + + +# geant4 +@article{geant4, + title = {Geant4—a simulation toolkit}, + journal = {Nuclear Instruments and Methods in Physics Research Section A: Accelerators, Spectrometers, Detectors and Associated Equipment}, + volume = {506}, + number = {3}, + pages = {250-303}, + year = {2003}, + issn = {0168-9002}, + doi = {10.1016/S0168-9002(03)01368-8}, + url = {https://www.sciencedirect.com/science/article/pii/S0168900203013688}, + author = {S. Agostinelli and J. Allison and K. Amako and J. Apostolakis and H. Araujo and P. Arce and M. Asai and D. Axen and S. Banerjee and G. Barrand and F. Behner and L. Bellagamba and J. Boudreau and L. Broglia and A. Brunengo and H. Burkhardt and S. Chauvie and J. Chuma and R. Chytracek and G. Cooperman and G. Cosmo and P. Degtyarenko and A. Dell'Acqua and G. Depaola and D. Dietrich and R. Enami and A. Feliciello and C. Ferguson and H. Fesefeldt and G. Folger and F. Foppiano and A. Forti and S. Garelli and S. Giani and R. Giannitrapani and D. Gibin and J.J. {Gómez Cadenas} and I. González and G. {Gracia Abril} and G. Greeniaus and W. Greiner and V. Grichine and A. Grossheim and S. Guatelli and P. Gumplinger and R. Hamatsu and K. Hashimoto and H. Hasui and A. Heikkinen and A. Howard and V. Ivanchenko and A. Johnson and F.W. Jones and J. Kallenbach and N. Kanaya and M. Kawabata and Y. Kawabata and M. Kawaguti and S. Kelner and P. Kent and A. Kimura and T. Kodama and R. Kokoulin and M. Kossov and H. Kurashige and E. Lamanna and T. Lampén and V. Lara and V. Lefebure and F. Lei and M. Liendl and W. Lockman and F. Longo and S. Magni and M. Maire and E. Medernach and K. Minamimoto and P. {Mora de Freitas} and Y. Morita and K. Murakami and M. Nagamatu and R. Nartallo and P. Nieminen and T. Nishimura and K. Ohtsubo and M. Okamura and S. O'Neale and Y. Oohata and K. Paech and J. Perl and A. Pfeiffer and M.G. Pia and F. Ranjard and A. Rybin and S. Sadilov and E. {Di Salvo} and G. Santin and T. Sasaki and N. Savvas and Y. Sawada and S. Scherer and S. Sei and V. Sirotenko and D. Smith and N. Starkov and H. Stoecker and J. Sulkimo and M. Takahata and S. Tanaka and E. Tcherniaev and E. {Safai Tehrani} and M. Tropeano and P. Truscott and H. Uno and L. Urban and P. Urban and M. Verderi and A. Walkden and W. Wander and H. Weber and J.P. Wellisch and T. Wenaus and D.C. Williams and D. Wright and T. Yamada and H. Yoshida and D. Zschiesche}, +} + +@confrence{mcdc:cuneo2024alternative, + title={An Alternative to Stride-Based RNG for {M}onte {C}arlo Transport}, + author={Braxton S. Cuneo and Ilham Variansyah}, + year={2024}, + doi = {10.48550/arXiv.2403.06362}, + journal = {Submitted to Transactions of the American Nuclear, Annual meeting 2024}, + eprint={2403.06362}, + archivePrefix={arXiv}, + primaryClass={physics.comp-ph} +} diff --git a/mcdc/docs/paper.md b/mcdc/docs/paper.md new file mode 100644 index 000000000..c7c44e101 --- /dev/null +++ b/mcdc/docs/paper.md @@ -0,0 +1,138 @@ +--- +title: 'Monte Carlo / Dynamic Code (MC/DC): An accelerated Python package for fully transient neutron transport and rapid methods development' +tags: + - Python + - Monte Carlo + - nuclear engineering + - neutron transport + - reactor analysis + - numba + - HPC + - mpi4py + - GPU +authors: # x=reviewed + - name: Joanna Piper Morgan #x + orcid: 0000-0003-1379-5431 + affiliation: "1, 2" # (Multiple affiliations must be quoted) + corresponding: true + - name: Ilham Variansyah + orcid: 0000-0003-3426-7160 + affiliation: "1, 2" + corresponding: true + - name: Samuel L. Pasmann + orcid: 0000-0003-1391-1471 + affiliation: "1, 3" + - name: Kayla B. Clements + orcid: 0000-0003-3358-5618 + affiliation: "1, 2" + - name: Braxton Cuneo + orcid: 0000-0002-6493-0990 + affiliation: "1, 5" + - name: Alexander Mote + orcid: 0000-0001-5099-0223 + affiliation: "1, 2" + - name: Charles Goodman + affiliation: "1, 4" + - name: Caleb Shaw + affiliation: "1, 4" + - name: Jordan Northrop + orcid: 0000-0003-0420-9699 + affiliation: "1, 2" + - name: Rohan Pankaj + orcid: 0009-0005-0445-9323 + affiliation: "1, 6" + - name: Ethan Lame + orcid: 0000-0001-7686-9755 + affiliation: "1, 2" + - name: Benjamin Whewell + orcid: 0000-0001-7826-5525 + affiliation: "1, 3" + - name: Ryan G. McClarren #advisors in order of authors except Niemeyer + orcid: 0000-0002-8342-6132 + affiliation: "1, 3" + - name: Todd S. Palmer + orcid: 0000-0003-3310-5258 + affiliation: "1, 2" + - name: Lizhong Chen + orcid: 0000-0001-5890-7121 + affiliation: "1, 2" + - name: Dmitriy Y. Anistratov + affiliation: "1, 4" + - name: C. T. Kelley + affiliation: "1, 4" + - name: Camille J. Palmer + orcid: 0000-0002-7573-4215 + affiliation: "1, 2" + - name: Kyle E. Niemeyer + orcid: 0000-0003-4425-7097 + affiliation: "1, 2" + + +affiliations: + - name: Center for Exascale Monte Carlo Neutron Transport + index: 1 + - name: Oregon State University, Corvallis, OR, USA + index: 2 + - name: University of Notre Dame, South Bend, IN, USA + index: 3 + - name: North Carolina State University, Raleigh, NC, USA + index: 4 + - name: Seattle University, Seattle, WA, USA + index: 5 + - name: Brown University, Providence, RI, USA + index: 6 +date: 28 Jan 2024 +bibliography: paper.bib + +# Optional fields if submitting to a AAS journal too, see this blog post: +# https://blog.joss.theoj.org/2018/12/a-new-collaboration-with-aas-publishing +# aas-doi: 10.3847/xxxxx <- update this with the DOI from AAS once you know it. +# aas-journal: Astrophysical Journal <- The name of the AAS journal. +--- + +# Summary + +Predicting how neutrons move through space and time, and change speed and direction of travel, are important considerations when modeling inertial confinement fusion systems, pulsed neutron sources, and nuclear criticality safety experiments, among other systems. +This can be modeled with a Monte Carlo simulation, where particles with statistical importance are created and transported to produce a particle history [@lewis_computational_1984]. +A particle's path and the specific set of events that occur within its history are governed by pseudo-random numbers, known probabilities (e.g., from material data), and known geometries. +Information about how particles move and/or interact with the system is tallied to construct a histogram solution of parameters of interest with an associated statistical error from the Monte Carlo process. +Simulating dynamic systems that vary in time requires novel numerical methods to compute a solution performantly. +We designed Monte Carlo / Dynamic Code (`MC/DC`) to explore such novel numerical methods on modern high-performance computing systems. +We avoid the need for a compiled or domain-specific language by using the Numba compiler for Python to accelerate and abstract our compute kernels to near compiled code speeds. +We have implemented novel algorithms using this scheme and, in some verification tests, have approached the performance of industry-standard codes at the scale of tens of thousands of processors. + +# Statement of need + +`MC/DC` is a performant software platform for rapidly developing and applying novel, dynamic, neutron-transport algorithms on modern high-performance computing systems. +It uses the Numba compiler for Python to compile compute kernels to a desired hardware target, including support for graphics processing units (GPUs) [@lam_numba_2015]. +`MC/DC` uses `mpi4py` for distributed-memory parallelism [@mpi4py_2021] and has run at the scale of tens of thousands of processors [@variansyah_mc23_mcdc]. +These acceleration and abstraction techniques allow `MC/DC` developers to remain in a pure Python development environment without needing to support compiled or domain-specific languages. +This has allowed `MC/DC` to grow from its initialization less than two years ago into a codebase that supports full performant neutron transport and investigation of novel transport algorithms, with development mostly from relative novices. + +Many traditionally developed neutron-transport codes are export-controlled (e.g. `MCNP` [@mcnp], `Shift` [@shift], and `MCATK` [@mcatk]) and some are known to be difficult to install, use, and develop in. +`MC/DC` is open-source, and thus, similar to other open-source Monte Carlo neutron-transport codes (e.g., `OpenMC` [@openmc]), it promotes knowledge sharing, collaboration, and inclusive, community-driven development. +What makes `MC/DC` unique is that its code base is exclusively written in Python, making it a good method exploration tool and an excellent entry point for students. +Furthermore, `MC/DC` is wrapped as a Python package that can be conveniently installed via the `pip` distribution, and its development is assisted by a suite of unit, regression, verification, and performance tests, which are mostly run using continuous integration via GitHub Actions. +This all together makes `MC/DC` ideal for use in an academic environment for both research and education. + +`MC/DC` has support for continuous and multi-group energy neutron transport physics with constructive solid geometry modeling. +It can solve k-eigenvalue problems (used to determine neutron population growth rates in reactors) as well as fully dynamic simulations. +It also supports some simple domain decomposition, with more complex algorithms currently being implemented. +In an initial code-to-code performance comparison, `MC/DC` was found to run about 2.5 times slower than the Shift Monte Carlo code for a simple problem and showed similar scaling on some systems [@variansyah_mc23_mcdc]. + +`MC/DC`-enabled explorations into dynamic neutron transport algorithms have been successful, including quasi-Monte Carlo techniques [@mcdc:qmc], hybrid iterative techniques for k-eigenvalue simulations [@mcdc:qmcabs], population control techniques [@mcdc:variansyah_nse22_pct; @mcdc:variansyah_physor22_pct], continuous geometry movement techniques that model transient elements [@variansyah_mc23_moving_object] (e.g., control rods or pulsed neutron experiments) more accurately than step functions typically used by other codes, initial condition sampling technique for typical reactor transients [@variansyah_mc23_ic], hash-based random number generation [@mcdc:cuneo2024alternative], uncertainty and global sensitivity analysis [@mcdc:clements_mc23; @mcdc:clements_variance_2024], residual Monte Carlo methods, and machine learning techniques for dynamic node scheduling, among others. + +# Future Work + +The main `MC/DC` branch currently only supports CPU architectures enabled by Numba (`x86-64`, `arm64`, and `ppc64`) but we are rapidly extending support to GPUs. +We currently have operability on Nvidia GPUs (supported via Numba), and work is ongoing to enable compilation for AMD GPUs. +On GPUs, `MC/DC` will use the `harmonize` asynchronous GPU scheduler to increase performance [@brax2023]. +`harmonize` works by batching jobs during execution such that similar operations get executed simultaneously, reducing the divergence between parallel threads running on the GPU. + +We will continue to explore novel methods for dynamic neutron transport and will keep pushing to make `MC/DC` not only a proven platform for rapidly exploring neutron-transport methods, but also a fully-fledged simulation code for academic and industrial use. + +# Acknowledgements + +This work was supported by the Center for Exascale Monte-Carlo Neutron Transport (CEMeNT) a PSAAP-III project funded by the Department of Energy, grant number: DE-NA003967. + +# References diff --git a/mcdc/docs/requirements-rtd.txt b/mcdc/docs/requirements-rtd.txt new file mode 100644 index 000000000..59629f309 --- /dev/null +++ b/mcdc/docs/requirements-rtd.txt @@ -0,0 +1,8 @@ +sphinx==7.2.6 +furo +numpy +numba +matplotlib +scipy +h5py +sphinx_toolbox diff --git a/mcdc/docs/source/_static/.gitkeep b/mcdc/docs/source/_static/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mcdc/docs/source/_static/custom.css b/mcdc/docs/source/_static/custom.css new file mode 100644 index 000000000..67b0574f9 --- /dev/null +++ b/mcdc/docs/source/_static/custom.css @@ -0,0 +1,22 @@ +/* Hide the "class" keyword prefix on autoclass signatures + so they render like function calls (e.g. mcdc.Universe(...) instead of + class mcdc.Universe(...)). */ +dt.sig.sig-object.py > em.property { + display: none; +} + +/* Container styling */ +.admonition.tip { + border-left: 5px solid #ffde57; /* Python Yellow border */ +} + +/* Adding the Green Circle to the title */ +.admonition.tip > .admonition-title::before { + background-color: #ffde57; +} + +/* Optional: Change title background to a light Python Yellow */ +.admonition.tip > .admonition-title { + background-color: #3776ab; + color: #ffffff; +} diff --git a/mcdc/docs/source/_templates/.gitkeep b/mcdc/docs/source/_templates/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/mcdc/docs/source/_templates/omcclass.rst b/mcdc/docs/source/_templates/omcclass.rst new file mode 100644 index 000000000..56a10d57c --- /dev/null +++ b/mcdc/docs/source/_templates/omcclass.rst @@ -0,0 +1,7 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :no-members: + :no-special-members: diff --git a/mcdc/docs/source/_templates/omcfunction.rst b/mcdc/docs/source/_templates/omcfunction.rst new file mode 100644 index 000000000..4d7ea38a1 --- /dev/null +++ b/mcdc/docs/source/_templates/omcfunction.rst @@ -0,0 +1,6 @@ +{{ fullname }} +{{ underline }} + +.. currentmodule:: {{ module }} + +.. autofunction:: {{ objname }} diff --git a/mcdc/docs/source/conf.py b/mcdc/docs/source/conf.py new file mode 100644 index 000000000..5f6645b06 --- /dev/null +++ b/mcdc/docs/source/conf.py @@ -0,0 +1,65 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html +# -- Path setup -------------------------------------------------------------- +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +import os +import sys + +# Make sure the project root (containing the `mcdc/` package) is importable +HERE = os.path.abspath(os.path.dirname(__file__)) +PROJECT_ROOT = os.path.abspath(os.path.join(HERE, "..", "..")) +if PROJECT_ROOT not in sys.path: + sys.path.insert(0, PROJECT_ROOT) + +# -- Project information ----------------------------------------------------- +project = "MC/DC" +copyright = "2023-2026, Center for Exascale Monte Carlo Neutron Transport (CEMeNT), Center for Advancing the Radiation Resilience of Electronics (CARRE), and MC/DC contributors" + +# The full version, including alpha/beta/rc tags +release = " " + +# -- General configuration --------------------------------------------------- +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx_toolbox.github", + "sphinx_toolbox.sidebar_links", + "sphinx.ext.autosectionlabel", +] +autosummary_generate = True +autosectionlabel_prefix_document = True + +github_username = "CEMeNT-PSAAP" +github_repository = "MCDC" +github_url = "https://github.com/{github_username}/{github_repository}" + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = [] + +# -- Options for HTML output ------------------------------------------------- +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "furo" +html_logo = "images/home/mcdc.svg" + +# html_permalinks = ['https://cement-psaap.github.io/', 'https://github.com/CEMeNT-PSAAP/MCDC'] + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static", "images/home"] +html_css_files = ["custom.css"] diff --git a/mcdc/docs/source/contribution/ci.rst b/mcdc/docs/source/contribution/ci.rst new file mode 100644 index 000000000..0fb31d7c0 --- /dev/null +++ b/mcdc/docs/source/contribution/ci.rst @@ -0,0 +1,41 @@ +.. _ci: + +.. highlight:: none + +Continuous Integration +====================== + +We use `github actions `_ to host and run most of our CI tests and version release information. +We run pure python unit tests and regression testing in pure Python, pure Python + MPI, numba, numba + MPI, and numba+GPU+harmonize. +When running regression tests we compare small particle count outputs to saved files in the testing directory. +If the RNG seed has not changed the results should be deterministic. + + +GPU COE Machine +--------------- + +CEMeNT currently has a `CI machine `_ on OSU's campus administered by the college of engineering HPC folks to do export controlled and GPU continuous integration. +It has a single Nvidia A2 (16GB VRAM) and an AMD EPYC 7313P 16-Core Processor with 64 GBs of RAM. +This is a dedicated machine with no additional users other then CEMeNT staff. + +To access the machine you have to be on OSU's VPN or on campus and ssh into + +.. code-block:: bash + + ssh @cement.hpc.engr.oregonstate.edu + +If you do not have account access to that machine email to Rob Yelle (``robert.yelle@oregonstate.edu``) for support and ask to be added. +From there users can use SLURM or module system to load preinstalled software. + +The standard dev env for MC/DC install on this machine can be ascertained with + +.. code-block:: bash + + module load cuda/11.8 gcc/10.3 mpich/4.0h_gcc-10 python/3.11 + python -m venv + module unload python/3.11 + source /bin/activate + +Then MC/DC and harmonize can be installed there in the normal manner for GPU capabilities. +The runner runs all the time in background of Joanna's account. +Contact her with any issues or on instructions to set up your own runner! \ No newline at end of file diff --git a/mcdc/docs/source/contribution/container-dev.rst b/mcdc/docs/source/contribution/container-dev.rst new file mode 100644 index 000000000..7dbfd7334 --- /dev/null +++ b/mcdc/docs/source/contribution/container-dev.rst @@ -0,0 +1,147 @@ +MC/DC Container Build & Development +=================================== + +Building from Source +-------------------- + +Run builds from the **root directory** of the MC/DC repository. + +.. code-block:: bash + + cd /path/to/MCDC + ls containers/Dockerfile + ls pyproject.toml + +Docker +~~~~~~ + +.. code-block:: bash + + docker build -f containers/Dockerfile -t mcdc:dev . + +Docker Compose +~~~~~~~~~~~~~~ + +.. code-block:: bash + + docker compose -f containers/docker-compose.yml build + +Podman +~~~~~~ + +.. code-block:: bash + + podman build -f containers/Dockerfile -t mcdc:dev . + +Apptainer +~~~~~~~~~ + +Option A: + +.. code-block:: bash + + apptainer build --sandbox mcdc_sandbox docker://ghcr.io/cement-psaap/mcdc:dev + +Option B: + +.. code-block:: bash + + docker build -f containers/Dockerfile -t mcdc:dev . + docker save mcdc:dev -o mcdc.tar + scp mcdc.tar user@host:~/ + apptainer build --sandbox mcdc_sandbox docker-archive://mcdc.tar + +LLNL Storage Setup +------------------ + +If you see: + +:: + + lsetxattr: operation not supported + +Option A: + +.. code-block:: bash + + podman --root /var/tmp/$USER/containers/storage run --rm -it mcdc:dev + +Option B: + +.. code-block:: bash + + mkdir -p ~/.config/containers + # create storage.conf with overlay config + +Troubleshooting +--------------- + +``lsetxattr`` error +~~~~~~~~~~~~~~~~~~~ +Cause: Podman storage on network filesystem. + +``setgroups 65534 failed`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Cause: Rootless Podman user mapping. + +``permission denied`` +~~~~~~~~~~~~~~~~~~~~~ +Fix: + +.. code-block:: bash + + podman run --rm -it --user root mcdc:dev + +``Out of memory`` (Apptainer) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use sandbox mode. + +``HYDU_create_process`` error +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use: + +.. code-block:: bash + + mpirun -launcher fork -n 4 python input.py + +For Developers +-------------- + +Pushing to the Registry +~~~~~~~~~~~~~~~~~~~~~~~ + +.. rubric:: One-Time Setup + +1. Go to https://github.com/settings/tokens +2. Generate token with ``write:packages`` +3. Login: + +.. code-block:: bash + + echo "TOKEN" | docker login ghcr.io -u USER --password-stdin + +.. rubric:: Building and Pushing + +On Apple Silicon: + +.. code-block:: bash + + docker build --platform linux/amd64 -f containers/Dockerfile -t mcdc:dev-amd64 . + docker tag mcdc:dev-amd64 ghcr.io/cement-psaap/mcdc:dev + docker push ghcr.io/cement-psaap/mcdc:dev + +.. rubric:: Making the Package Public + +1. Go to org packages page +2. Find ``mcdc`` +3. Change visibility to Public + +File Overview +~~~~~~~~~~~~~ + +:: + + containers/ + ├── Dockerfile + ├── docker-compose.yml + └── README.md diff --git a/mcdc/docs/source/contribution/documentation.rst b/mcdc/docs/source/contribution/documentation.rst new file mode 100644 index 000000000..2283122dd --- /dev/null +++ b/mcdc/docs/source/contribution/documentation.rst @@ -0,0 +1,141 @@ +.. _documentation: + +.. highlight:: none + +Documentation +============= + +Our website is built using a documentation generator called `Sphinx `_ that translates a set of plaintext files into a set of html files (it can also build other formats too, like PDFs). +We use the platform `readthedocs `_ to build and host our documentation as a website. Yay! + +Sphinx has a ton of useful features and capabilities. +On this page, we do our best to keep it to what you need to know to contribute to MC/DC's documentation. + + +reStructuredText and Sphinx +--------------------------- + +We write files for Sphinx using a plaintext markup language called reStructuredText (rst). +`Click here for a rst Primer `_. +Sphinx builds an html file for every rst file in the documentation root directory and its subdirectories our documentation root directory is ``MCDC/docs/source/``. +The root document, ``index.rst``, serves as the welcome page. +The root directory also contains several subdirectories, each of which has its own ``index.rst`` file and several other rst files. +It's useful to compare our rst files to their associated webpages to get a feel for how they translate. + + +Like any plaintext markup language, rst uses "explicit markup" for constructs that need special handling, such as including a code-block or cross-referencing other pages. + +A block of explicit markup text starts with ".. " and is terminated by the next paragraph at the same level of indentation. + +Sphinx creates webpage elements using explicit markup blocks called directives. + +.. tip:: + For example, this block was created using the `tip` directive! + +:: + + .. tip:: + For example, this block was created using the `tip` directive! + +An explicit markup block without a directive is taken as a comment that will not appear on the webpage: +:: + + .. For example, this is a comment. + +In addition to directives for blocks of explicit markup, Sphinx handles in-line explicit markup with roles. +For example, this equation :math:`a^2 + b^2 = c^2` was created using the `math` role. +:: + + For example, this equation :math:`a^2 + b^2 = c^2` was created using the `math` role. + +`Click here for a list of Sphinx directives `_ and `click here for a list of Sphinx roles `_. + + +The toctree +----------- + +Sphinx's main directive is the `toctree` directive, which generates a table of contents tree (toctree) with links to other webpages in the build. +The listed documents should be named relative to the current document and excluding the .rst extension. +For example, the MC/DC docs root directory contains ``index.rst``, ``install.rst``, and a subdirectory ``user/`` that also contains its own ``index.rst``. +The following on ``index.rst`` creates a table of contents on the main page with links to the install and user pages: +:: + + .. toctree:: + install + user/index + +Sphinx will build an html file for all rst files in the source directory and its subdirectories. +Sphinx will issue a warning if an html file isn't referenced in any toctree because that means that the generated webpage is not reachable through standard navigation. + + +Using autodoc and autosummary +----------------------------- + +Within MC/DC's source code, we document functions and classes using docstrings. +`We use two Sphinx extensions `_ -- ``autodoc`` and ``autosummary`` -- to generate rst files for Sphinx using the existing docstrings in our source code. +For ``autodoc`` and ``autosummary`` to work, the docstrings within MC/DC's source code must be written in correct rst. + +The ``autodoc`` extension includes a set of directives to document different chunks of code (e.g., modules, functions, classes). +For example, below is the entire rst file that generates the :doc:`../pythonapi/generated/mcdc.MaterialMG` page: + +.. code-block:: + + mcdc.MaterialMG + =============== + + .. currentmodule:: mcdc + + .. autoclass:: MaterialMG + +(That in-line reference was created using :code:`:doc:\`../pythonapi/generated/mcdc.MaterialMG\``, by the way). + +A rst file with an ``autodoc`` directive is required for each module or function that we would like to document. +Rather than create all of these rst files by hand, we use the ``autosummary`` extension to do it for us. + +For example, let's look at the first ``autosummary`` directive in ``source/pythonapi/index.rst``, the file that governs the :doc:`../pythonapi/index` page: + +.. code-block:: + + .. autosummary:: + + mcdc.Material + mcdc.MaterialMG + +This directive: + #. Generates two files in ``pythonapi/generated/``: ``mcdc.Material.rst`` and ``mcdc.MaterialMG.rst``. + #. Populates each file with the proper autoclass directive. + #. Creates a table on :doc:`../pythonapi/index` with entries mcdc.Material and mcdc.MaterialMG that link to the respective generated pages. + + +Building the documentation +-------------------------- + +We can check our work with a local build. +Make sure you're in ``MCDC/docs/``: + +#. Both Sphinx and furo (the package we use for website theming) should have been installed with MC/DC. + To check, type ``sphinx-build --version`` on the commandline. + If not installed, ``pip install sphinx furo``. +#. With Sphinx installed, run ``make html``. + This builds local html files in ``MCDC/docs/build/``. +#. To launch your local html from the commandline, ``open build/html/index.html``. + Check your work: has your content been added or changed as you expected? +#. Continue making changes to your local rst files, building locally, and launching the built html files until you're satisfied with how the website will look. + +.. warning:: + In the process of creating MC/DC's documentation, ``autodoc`` *imports every python module that MC/DC imports*. + + This doesn't cause any issues when you build the webpages locally, because you already have all of MC/DC's requisite packages installed. + + However, this *WILL* cause issues with our documentation website host, readthedocs. + Like you just did, readthedocs will checkout our repo and use Sphinx to build html files from our rst files, attempting to import all of MC/DC's packages along the way. + There are some python packages, like ``mpi4py``, that readthedocs is unable to import, causing the documentation build to fail. + + **If you've added any new package imports to MC/DC's source code, add them to the** ``MOCK_MODULES`` **list in** ``MCDC/docs/source/conf.py``. + + This will allow readthedocs to get past the imports without issue. + + +Once you're satisfied with your changes and have added any new modules to ``conf.py``, submit a PR! + + diff --git a/mcdc/docs/source/contribution/index.rst b/mcdc/docs/source/contribution/index.rst new file mode 100644 index 000000000..a64d3dceb --- /dev/null +++ b/mcdc/docs/source/contribution/index.rst @@ -0,0 +1,216 @@ +.. _contribution: + +================== +Contribution Guide +================== + +Thank you for looking to contribute to MC/DC! +We are really excited to see what you bring to this exciting open source project! +Whether you are here to make a single PR and never return, or want to become a maintainer we are pumped to work with you. +We have regular developers meetings for any and all who are interested to discuss contributions to this code base. + +This describes the processes of contributing to MC/DC for both internal (CEMeNT) and external developers. +We make contributions to the ``dev`` branch of MC/DC. +To get started making alterations in a cloned repo + +#. fork ``CEMeNT-PSAAP/MCDC`` to your github account +#. ``git clone git@github.com:/MCDC.git`` +#. ``git switch dev`` +#. run install script which will install MC/DC as an editable package from this directory + +Push some particles around!!!! + +Development Guidelines +---------------------- + +.. toctree:: + :maxdepth: 1 + + documentation.rst + ci.rst + container-dev.rst + +Check out the :doc:`documentation` guide for info on how to contribute to these docs. +We understand that documenting code is often a lower priority than the code itself, but it goes a long way towards usability and maintainability. + +Please note our `code of conduct `_, which we take seriously. + +------------ +Code Styling +------------ + +Our code is auto-linted for the `Black code style `_. +Your contributions will not be merged unless you follow this code style. +It's pretty easy to do this locally, just run, + +.. code-block:: sh + + + pip install black + black . + + +in the top level MC/DC directory and all necessary changes will be automatically made for you. + +--------- +Debugging +--------- + +MCDC includes options to debug the Numba JIT code. +It does this by toggling Numba options using the numba.config submodule. +This will result in less performant code and longer compile times but will allow for better error messages from Numba and other packages. +`See Numba documentation of a list of all possible debug and compiler options. `_ +The most useful set of debug options for MC/DC can be enabled with + +.. code-block:: python3 + + python input.py --mode=numba_debug + +Which will toggle the following debug and compiler options in Numba: + +* ``DISABLE_JIT=False`` turns on the jitter +* ``NUMBA_OPT=0`` Forces the compilers to form un-optimized code (other options for this are ``1``, ``2``, and ``3`` with ``3`` being the most optimized). This option might need to be changed if errors only result from more optimization. +* ``DEBUG=False`` turns on all debugging options. This is still disabled in ``mcdc numba_debug`` as it will print ALOT of info on your terminal screen +* ``NUMBA_FULL_TRACEBACKS=1`` allows errors from sub-packages to be printed (i.e. Numpy) +* ``NUMBA_BOUNDSCHECK=1`` numba will check vectors for bounds errors. If this is disabled it bound errors will result in a ``seg_fault``. This in consort with the previous option allows for the exact location of a bound error to be printed from Numpy subroutines +* ``NUMBA_DEBUG_NRT=1`` enables the `Numba run time (NRT) statistics counter `_ This helps with debugging memory leaks. +* ``NUMBA_DEBUG_TYPEINFER= 1`` print out debugging information about type inferences that numba might need to make if a function is ill-defined +* ``NUMBA_ENABLE_PROFILING=1`` enables profiler use +* ``NUMBA_DUMP_CFG=1`` prints out a control flow diagram + +If extra debug options or alteration to these options are required they can be toggled and passed under the ``mode==numba_debug`` option tree in ``mcdc/config.py``. + +------- +Caching +------- + +MC/DC is a just-in-time (JIT) compiled code. +This is sometimes disadvantageous, especially for users who might run many versions of the same simulation with slightly different parameters. +As the JIT compilation scheme will only compile functions that are actually used in a given simulation, it is not a grantee that any one function will be compiled. + +Developers should be very cautious about using caching features. +Numba has a few documented errors around caching. +The most critical of which is that functions in other files that are called by cached functions will not force a recompile, even if there are changes in those sub-functions. +In this case caching should be disabled. + +In MC/DC the simulation functions (in ``mcdc/transport/simulation.py``) can be configured to use caching. +Caching behavior is controlled via the ``--caching`` and ``--clear_cache`` command-line flags. + +To disable caching, omit the ``--caching`` flag (the default). +Alternatively a developer could delete the ``__pycache__`` directory or other cache directory which is system dependent (`see more about clearing the numba cache `_) + + +At some point MC/DC will enable `Numba's Ahead of Time compilation abilities `_. But the core development team is holding off until scheduled `upgrades to AOT functionality in Numba are implemented `_. +However if absolutely required by users numba does allow for some `cache sharing `_. + +------------------ +Adding a New Input +------------------ + +To add a new keyword argument such that a user can interface with it in an input deck +there are a few different places a dev will need to make alterations. +The input objects are defined as dataclasses in the ``mcdc/object_/`` directory: + +#. ``mcdc/object_/settings.py`` — simulation settings and k-eigenvalue parameters +#. ``mcdc/object_/material.py`` — material definitions (``Material``, ``MaterialMG``) +#. ``mcdc/object_/surface.py`` — surface geometry (``Surface`` class methods) +#. ``mcdc/object_/cell.py`` — cell definitions (``Cell``) +#. ``mcdc/object_/source.py`` — source specifications (``Source``) +#. ``mcdc/object_/tally.py`` — tally objects (``Tally``) +#. ``mcdc/object_/technique.py`` — variance reduction techniques +#. ``mcdc/config.py`` — command-line argument definitions + +------- +Testing +------- + +Check out the :doc:`ci` for more info on how we run these tests automatically + +MC/DC has a robust testing suite that your changes must be able to pass before a PR is accepted. +Unit tests for functions that have them are ran in a pure python from. +Mostly this is for ensuring input operability +A regression test suite (including models with analytical and experimental solutions) is provided to ensure accuracy and precision of MC/DC. + +Our test suite runs on every PR, and Push. +Our github based CI runs for, + +* linux-64 (x86) +* osx-64 (x86, intel based macs) + +while we do not have continuous integration we have validated MC/DC on other systems. + +To run the regression tests locally, navigate to ``MCDC/test/regression`` and run, + +.. code-block:: sh + + + python run.py + + +and all the tests will run. Various option ``OPTION_FLAG`` are accepted to control the tests ran, + +* Run a specific test (with wildcard ``*`` support): ``--name=`` +* Run in Numba mode: ``--mode=numba`` +* Run in multiple MPI ranks (currently support ``mpiexec`` and ``srun``): ``--mpiexec=`` + +Note that flags can be combined. To add a new test: + +#. Create a folder. The name of the folder will be the test name. +#. Add the input file. Name it`input.py`. +#. Add the answer key file. Name it `answer.h5`. +#. Make sure that the number of particles run is large enough for a good test. +#. If the test runs longer than 5 seconds, consider decreasing the number of particles. + +When adding a new hardware backend a new instantiation of the test suit should be made. +This is done with github actions. +See the (``.github/workflows``) for examples. + +If a new simulation type is added (e.g. quasi montecarlo w/ davidson's method, residual monte carlo, intrusive uq) more regression tests should be added with your PR. +If you are wondering accommodations. + + +-------------------- +Adding Documentation +-------------------- + + +It's not everything it needs to be but we are trying! +If your contribution changes the behavior of the input deck, instillation process, or testing infrastructure your contribution must include alteration to this documentation. +That can be done by editing the RST files in ``/MCDC/docs/source/.rst``. + +To add a new page to the documentation, + +#. Add a new file for example ``.rst`` +#. Add the necessary file header (for example this file is: ``.. _contributions:``) +#. Add ```` (without file extension to the ``.. toctree::`` section of ``index.rst``) +#. Write your contributions using ``.rst`` format (see this `cheat sheet `_) + +To build the docs changes you have made locally before committing, + +#. Install dependencies (we recommend: ``conda install sphinx`` and ``pip install furo sphinx_toolbox``). + Note that these dependencies are not installed as part of base MC/DC. +#. From the ``MCDC/docs/`` directory, run ``make html`` to compile. +#. Launch ``build/html/index.html`` with your browser of choice. + + +------------- +Pull Requests +------------- + + +MC/DC works off of a fork workflow in which contributors fork our repo, make alterations, and submit a pull requests. +You should only submit a pull request once your code passes all tests, is properly linted, you have edited documentation (if necessary), and added any new tests (if needed). +Open a PR to the ``dev`` branch in Github. +MC/DC's main branch is only updated for version releases at which time a PR from dev to main is opened, tagged, archived, and published automatically. + +Within your pull request documentation please list: + +#. Type of PR (e.g. enhancement, bugfix, etc); +#. Link to any theory to understand what you are doing; +#. Link to any open/closed issues if applicable; +#. New functionalities implemented +#. Depreciated functionalities +#. New dependencies needed (we don't add these lightly) +#. Anything else we need to give you the thorough code review you deserve! + +If these things aren't listed we will ask for clarifying questions! diff --git a/mcdc/docs/source/examples/c5g7_k_eigenvalue.rst b/mcdc/docs/source/examples/c5g7_k_eigenvalue.rst new file mode 100644 index 000000000..67eda2256 --- /dev/null +++ b/mcdc/docs/source/examples/c5g7_k_eigenvalue.rst @@ -0,0 +1,64 @@ +.. _example_c5g7_k_eigenvalue: + +============================================= +C5G7 — k-eigenvalue example +============================================= + +Description +=========== + +Multigroup k-eigenvalue calculation for the C5G7 benchmark using the +packaged MGXS HDF5 library in ``examples/c5g7``. This example performs a +static criticality calculation and reports :math:`k_{\mathrm{eff}}` and +gyration-radius diagnostics. + +Step-by-Step Walkthrough +======================== + +The C5G7 benchmark uses a pre-packaged 7-group cross-section library +(``MGXS-C5G7-TD.h5``). The input file defines the full-core geometry +using MC/DC’s lattice and universe system. + +Key concepts demonstrated: + +- **Multi-group materials** loaded from an external HDF5 library via + ``mcdc.MaterialMG(library=...)``. +- **Pin-cell universes** built from cylindrical fuel pins in square + moderator cells. +- **Lattice assemblies** that tile pin-cell universes into fuel + assemblies of different enrichments. +- **Core lattice** that arranges assemblies and reflector regions. +- **k-eigenvalue mode** with ``set_eigenmode()`` for criticality. + +Refer to the embedded code below — comments in the source mark each +section (materials, pins, assemblies, core, source, tallies, settings). + +**What to try:** + +- Increase ``N_particle`` for better :math:`k_{\text{eff}}` statistics. +- Adjust the number of inactive/active cycles. +- Compare :math:`k_{\text{eff}}` with the published C5G7 reference value. + +Full Input +========== + +Click here to view the input file: `examples/c5g7/k-eigenvalue/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/c5g7/k-eigenvalue/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/c5g7/k-eigenvalue/input.py + +Expected Output +=============== + +Eigenvalue history printed to stdout and HDF5 tally data for flux and +gyration-radius diagnostics saved in the build artifacts folder. diff --git a/mcdc/docs/source/examples/c5g7_transient.rst b/mcdc/docs/source/examples/c5g7_transient.rst new file mode 100644 index 000000000..5d4a5a025 --- /dev/null +++ b/mcdc/docs/source/examples/c5g7_transient.rst @@ -0,0 +1,64 @@ +.. _example_c5g7_transient: + +============================================= +C5G7 — Transient example +============================================= + +Description +=========== + +Time-dependent C5G7-TD transient driven by control-rod movements and a +time-limited source. Uses the packaged MGXS library in +``examples/c5g7`` and demonstrates moving surfaces and time-resolved +tallies. + +Step-by-Step Walkthrough +======================== + +This example extends the C5G7 k-eigenvalue setup with time-dependent +features: + +- **Moving surfaces** simulate control-rod insertion/withdrawal. +- **Time-resolved tallies** capture the transient fission rate. +- **Time census** checkpoints the particle population at specified + intervals for population control. + +The geometry and material setup is identical to the k-eigenvalue case. +The transient-specific additions are: + +#. Surface velocities assigned via ``surface.move(...)``. +#. A ``time`` grid added to the mesh tally. +#. ``set_time_census(...)`` for time-step population control. + +Refer to the embedded code below for the full implementation. + +**What to try:** + +- Change the rod insertion speed to see prompt vs. delayed transient response. +- Add more time census points for finer population control. +- Compare power history with published C5G7-TD benchmarks. + +Full Input +========== + +Click here to view the input file: `examples/c5g7/transient/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/c5g7/transient/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/c5g7/transient/input.py + +Expected Output +=============== + +HDF5 tallies with time-resolved fission rates and PNG visualisations for +fission and relative standard deviation per time step produced by the +companion plotting scripts. diff --git a/mcdc/docs/source/examples/fuel_array_packaged.rst b/mcdc/docs/source/examples/fuel_array_packaged.rst new file mode 100644 index 000000000..196b28f1a --- /dev/null +++ b/mcdc/docs/source/examples/fuel_array_packaged.rst @@ -0,0 +1,189 @@ +.. _example_fuel_array_packaged: + +============================================= +Packaged Fuel Array (Universe and Lattice) +============================================= + +Problem Description +=================== + +A three-dimensional fixed-source problem featuring two identical fuel +assemblies placed side-by-side inside a water-filled box. Each +assembly uses a composite "shooting-star" fuel geometry built from the +union of two orthogonal cylinders enclosed by a cladding sphere. + +This example demonstrates MC/DC's **universe**, **translation**, and +**rotation** capabilities for constructive solid geometry (CSG) +packaging. + +Geometry and Materials +====================== + +The global domain is a rectangular box: +:math:`x \in [-10,10]`, :math:`y \in [-5,5]`, :math:`z \in [-5,5]` cm, +with vacuum boundary conditions. + +Each assembly is defined as a universe containing three cells: + +1. **Fuel** — union of a z-aligned and an x-aligned cylinder + (radius 1 cm, half-length 5 cm). +2. **Cladding** — spherical shell (radius 3 cm) surrounding the fuel. +3. **Water** — region outside the cladding sphere. + +The left assembly is translated to :math:`(-5,0,0)` cm; +the right assembly is translated to :math:`(+5,0,0)` cm and rotated +:math:`10°` about the :math:`y`-axis. + +.. list-table:: Cross-section data (mono-energetic, cm\ :sup:`-1`) + :widths: 20 15 15 15 15 + + * - **Region** + - :math:`\Sigma_c` + - :math:`\Sigma_s` + - :math:`\Sigma_f` + - :math:`\nu` + * - Fuel + - 0.45 + - — + - 0.55 + - 2.5 + * - Cladding + - 0.05 + - 0.95 + - — + - — + * - Water + - 0.02 + - 0.08 + - — + - — + +Physical Assumptions +==================== + +* Mono-energetic (one-speed) neutron transport. +* Isotropic scattering. +* Steady-state fixed-source calculation. +* No delayed neutrons. + +Numerical Setup +=============== + +.. list-table:: + :widths: 35 65 + + * - **Spatial mesh (tally)** + - :math:`201 \times 101` in the :math:`(x,z)`-plane + * - **Tally score** + - Fission rate + * - **Source particles** + - :math:`10^{3}` (demonstration) + * - **Batches** + - 2 + +Quantities of Interest +====================== + +* Two-dimensional fission rate distribution in the :math:`(x,z)`-plane. +* Relative standard deviation map for convergence assessment. + +Reference Solution +================== + +No analytical reference. The geometry can be verified using MC/DC's +built-in ``mcdc.visualize()`` function to render the CSG model. + +Step-by-Step Walkthrough +======================== + +**1. Materials (lines 1–27)** + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :lines: 1-27 + :linenos: + :lineno-match: + +Three mono-energetic materials: fissile fuel, a scattering cladding, and +water moderator. + +**2. Assembly Geometry — Shooting-Star CSG (lines 29–54)** + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :lines: 29-54 + :linenos: + :lineno-match: + +The fuel region is the **union** of a z-cylinder and an x-cylinder (the +"shooting star"). The cladding fills the sphere minus the fuel. +Water fills outside the sphere. These three cells form a reusable +**universe**. + +**3. Packaging with Universe, Translation, and Rotation (lines 56–80)** + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :lines: 56-80 + :linenos: + :lineno-match: + +The assembly universe is placed twice using ``mcdc.Cell(..., fill=assembly)``: + +- **Left** — translated to :math:`(-5, 0, 0)`. +- **Right** — translated to :math:`(+5, 0, 0)` and rotated 10° about :math:`y`. + +``set_root_universe()`` tells MC/DC these are the top-level cells. + +**4. Source, Tallies, Settings, and Run (lines 82–105)** + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :lines: 82-105 + :linenos: + :lineno-match: + +A point-like source near the centre, a structured mesh tally for the +:math:`(x,z)`-plane fission rate, and 1 000 particles in 2 batches. +The ``active_bank_buffer`` accommodates fission-born particles. + +**5. Optional Visualization (lines 107–end)** + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :lines: 107- + :linenos: + :lineno-match: + +Set ``visualize = True`` to render the CSG geometry with +``mcdc.visualize()`` instead of running the transport. + +**What to try:** + +- Change the rotation angle and observe the effect on the fission map. +- Add a third assembly copy with a different translation. +- Use ``mcdc.Lattice`` instead of manual universe placement. + +Full Input +========== + +Click here to view the input file: `examples/fuel_array_packaged/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/fuel_array_packaged/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/fuel_array_packaged/input.py + +Expected Output +=============== + +An HDF5 mesh tally and optional visualization images produced by the +``mcdc.visualize()`` helper when run with visualization enabled. diff --git a/mcdc/docs/source/examples/index.rst b/mcdc/docs/source/examples/index.rst new file mode 100644 index 000000000..c5fc2feaa --- /dev/null +++ b/mcdc/docs/source/examples/index.rst @@ -0,0 +1,48 @@ +.. _examples: + +================ +Example Problems +================ + +MC/DC ships with a curated set of benchmark and demonstration problems that +exercise the code's key capabilities—from simple mono-energetic fixed-source +calculations to full-core, continuous-energy reactor transients. +Each example is self-contained: an ``input.py`` script sets up the problem and +a companion ``process-output.py`` (or ``process.py``) script post-processes +the results. + +The problems below mirror the concrete example folders present in the +`examples/` directory of this repository. Each page embeds the +corresponding ``input.py`` so that the documented setup exactly matches +the code shipped in-tree. A **Step-by-Step Walkthrough** section breaks +the input into annotated blocks, and a **What to try** box suggests +parameter changes for further exploration. + +Basic Examples +-------------- + +.. toctree:: + :maxdepth: 1 + + kobayashi_dog_leg + kobayashi_td + +Advanced Examples +----------------- + +.. toctree:: + :maxdepth: 1 + + moving_source + moving_pellet + fuel_array_packaged + sphere_in_cube + +Reactor Benchmarks +------------------ + +.. toctree:: + :maxdepth: 1 + + c5g7_k_eigenvalue + c5g7_transient diff --git a/mcdc/docs/source/examples/kobayashi_dog_leg.rst b/mcdc/docs/source/examples/kobayashi_dog_leg.rst new file mode 100644 index 000000000..b7869c5f0 --- /dev/null +++ b/mcdc/docs/source/examples/kobayashi_dog_leg.rst @@ -0,0 +1,189 @@ +.. _example_kobayashi_dog_leg: + +============================================= +Kobayashi Dog-Leg Void Benchmark +============================================= + +Problem Description +=================== + +A mono-energetic, three-dimensional shielding benchmark featuring a +dog-leg vacuum channel embedded in a purely scattering/absorbing shield. +The problem evaluates the ability of a Monte Carlo code to transport +neutrons through deep-penetration streaming paths. + +It is based on the NEA steady-state fixed-source benchmark problem suite +by Kobayashi *et al.* [Kobayashi2001]_. + +Geometry and Materials +====================== + +The computational domain spans +:math:`x \in [0,60]`, :math:`y \in [0,100]`, :math:`z \in [0,60]` cm. +Boundary conditions are reflective on the three symmetry planes +(:math:`x=0`, :math:`y=0`, :math:`z=0`) and vacuum elsewhere. + +Three regions are defined: + +1. **Source region** — :math:`x \in [0,10]`, :math:`y \in [0,10]`, + :math:`z \in [0,10]` cm. +2. **Dog-leg void channel** — an L-shaped duct connecting the source + corner to the far side of the domain. +3. **Shield** — the remaining volume. + +.. list-table:: Cross-section data (mono-energetic, cm\ :sup:`-1`) + :widths: 30 20 20 + + * - **Region** + - :math:`\Sigma_c` + - :math:`\Sigma_s` + * - Shield + - 0.05 + - 0.05 + * - Void channel + - :math:`5\times10^{-5}` + - :math:`5\times10^{-5}` + +Physical Assumptions +==================== + +* Mono-energetic (one-speed) neutron transport. +* Isotropic scattering. +* Steady-state (time-independent) fixed-source problem. +* Implicit capture variance-reduction technique. + +Numerical Setup +=============== + +.. list-table:: + :widths: 35 65 + + * - **Spatial mesh (tally)** + - :math:`60 \times 100 \times 60` uniform cells (1 cm spacing) + * - **Tally score** + - Scalar flux + * - **Source particles** + - :math:`10^{3}` (demonstration; increase for production runs) + * - **Batches** + - 2 + +Quantities of Interest +====================== + +* Three-dimensional scalar flux distribution :math:`\phi(x,y,z)`. +* Flux attenuation along the streaming channel and through the shield. + +Reference Solution +================== + +Reference solutions are tabulated in [Kobayashi2001]_ for several +axial slices. Post-processing in MC/DC generates :math:`(x,y)` flux +maps at selected :math:`z`-planes for direct comparison. + +References +========== + +.. [Kobayashi2001] K. Kobayashi, N. Sugimura, and Y. Nagaya, + "3D Radiation Transport Benchmark Problems and Results for Simple + Geometries with Void Region," + *Progress in Nuclear Energy*, **39**:2, 119–144 (2001). + `[link] `__ + +Step-by-Step Walkthrough +======================== + +This section walks through the input file block by block. + +**1. Import and Materials (lines 1–13)** + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :lines: 1-13 + :linenos: + :lineno-match: + +Two mono-energetic multi-group materials are created: +``m`` for the shield (:math:`\Sigma_c = \Sigma_s = 0.05`) and +``m_void`` for the dog-leg channel (:math:`10^{-4}` total). + +**2. Surfaces (lines 15–30)** + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :lines: 15-30 + :linenos: + :lineno-match: + +Fifteen planar surfaces define the 3-D bounding box and the internal +partitions. Reflective conditions on ``sx1``, ``sy1``, ``sz1`` exploit +the quarter-symmetry; vacuum on the outer faces allows leakage. + +**3. Cells — CSG Region Definitions (lines 32–44)** + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :lines: 32-44 + :linenos: + :lineno-match: + +Three cells cover the domain: + +- The **source cell** (a small corner cube) filled with shield material. +- The **void channel** — four rectangular segments combined with the + ``|`` (union) operator to form the L-shaped duct. +- The **shield** — the full box minus the void channel, using the + ``~`` (complement) operator. + +**4. Source (lines 50–57)** + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :lines: 50-57 + :linenos: + :lineno-match: + +An isotropic, uniformly distributed source fills the +:math:`10 \times 10 \times 10` cm corner cube. + +**5. Tallies, Settings, Techniques, and Run (lines 63–74)** + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :lines: 63-74 + :linenos: + :lineno-match: + +- A uniform :math:`60 \times 100 \times 60` mesh tally records scalar flux. +- 1 000 source particles in 2 batches (increase for production). +- Implicit capture prevents particles from being absorbed prematurely. +- ``mcdc.run()`` launches the simulation. + +**What to try:** + +- Increase ``N_particle`` to :math:`10^5` or more for smoother flux maps. +- Change void-channel cross sections to see how attenuation changes. +- Add a time grid to the tally for a transient variant (see the TD example). + +Full Input +========== + +Click here to view the input file: `examples/kobayashi/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/kobayashi/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/kobayashi/input.py + +Expected Output +=============== + +A mesh tally HDF5 file with 3-D flux data and example plotting using +the companion ``process-output.py`` in the same directory. diff --git a/mcdc/docs/source/examples/kobayashi_td.rst b/mcdc/docs/source/examples/kobayashi_td.rst new file mode 100644 index 000000000..f55f216af --- /dev/null +++ b/mcdc/docs/source/examples/kobayashi_td.rst @@ -0,0 +1,79 @@ +.. _example_kobayashi_td: + +============================================= +Time-Dependent Kobayashi Dog-Leg +============================================= + +Description +=========== + +Time-dependent variant of the Kobayashi dog-leg shielding benchmark. +See the steady-state Kobayashi example for geometry and material +specifications; this variant drives the problem with a pulsed source and +records time-resolved tallies. + +Step-by-Step Walkthrough +======================== + +This example reuses the exact geometry from the steady-state Kobayashi +dog-leg benchmark. The key differences are highlighted below. + +**1. Source with a Time Window (line 54–60)** + +.. literalinclude:: ../../../examples/kobayashi-TD/input.py + :language: python + :lines: 50-60 + :linenos: + :lineno-match: + +The source now has ``time=[0.0, 50.0]``, meaning particles are emitted +over a 50 s window rather than instantaneously. + +**2. Time-Resolved Tallies (lines 66–69)** + +.. literalinclude:: ../../../examples/kobayashi-TD/input.py + :language: python + :lines: 66-69 + :linenos: + :lineno-match: + +A ``time`` grid is added to both the mesh tally and a global density +tally. This creates a time-resolved :math:`\phi(x, y, t)` dataset. +Global ``Tally`` with ``scores=["density"]`` tracks total neutron +population over time. + +**What to try:** + +- Shorten the source time to create a short pulse and watch the + neutron cloud propagate. +- Add a finer time grid to capture early transient behaviour. +- Compare with the steady-state Kobayashi results. + +Full Input +========== + +Click here to view the input file: `examples/kobayashi-TD/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/kobayashi-TD/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/kobayashi-TD/input.py + +Expected Output +=============== + +Time-resolved mesh tally HDF5 file and an animation produced by +``process-output.py`` that visualises neutron density versus time. + +References +========== + +See: Kobayashi *et al.* (2001), Progress in Nuclear Energy. diff --git a/mcdc/docs/source/examples/moving_pellet.rst b/mcdc/docs/source/examples/moving_pellet.rst new file mode 100644 index 000000000..af70b5a34 --- /dev/null +++ b/mcdc/docs/source/examples/moving_pellet.rst @@ -0,0 +1,197 @@ +.. _example_moving_pellet: + +============================================= +Moving Fuel Pellet +============================================= + +Problem Description +=================== + +A three-dimensional time-dependent problem in which a cylindrical fuel +pellet traverses a box of air-like material along a piecewise-linear +trajectory. Both the cylindrical surface and the bounding planes of the +pellet move, demonstrating MC/DC's **moving-surface** capability for +transient geometry. + +Geometry and Materials +====================== + +The computational domain is a rectangular box: +:math:`x \in [-5,5]`, :math:`y \in [-5,5]`, :math:`z \in [-10,10]` cm, +with vacuum boundary conditions on all faces. + +A z-aligned cylindrical fuel pellet (radius 1 cm) is initially located +at :math:`z \in [6,9]` cm. The pellet moves in three consecutive phases: + +.. list-table:: + :widths: 15 40 40 15 + + * - **Phase** + - **Cylinder velocity** + - **End-cap velocity** + - **Duration (s)** + * - 1 + - :math:`(-0.5,\; 0,\; 0)` + - :math:`(0,\; 0,\; -2)` + - 2 / 5 + * - 2 + - :math:`(1,\; 0,\; 0)` + - :math:`(0,\; 0,\; 4)` + - 5 / 2 + * - 3 + - :math:`(-2,\; 0,\; 0)` + - :math:`(0,\; 0,\; -10)` + - 1 / 1 + +Two materials are used: + +.. list-table:: Cross-section data (mono-energetic, cm\ :sup:`-1`) + :widths: 20 15 15 15 15 + + * - **Region** + - :math:`\Sigma_c` + - :math:`\Sigma_s` + - :math:`\Sigma_f` + - :math:`\nu` + * - Fuel pellet + - 0.50 + - — + - 0.25 + - 1.5 + * - Air + - 0.002 + - 0.008 + - — + - — + +Neutron speed: :math:`v = 2 \times 10^{5}` cm/s (both regions). + +Physical Assumptions +==================== + +* Mono-energetic (one-speed) neutron transport. +* Isotropic scattering. +* Time-dependent transport with moving geometry surfaces. +* Fission in the pellet region only. + +Numerical Setup +=============== + +.. list-table:: + :widths: 35 65 + + * - **Spatial mesh (tally)** + - :math:`201 \times 201` in the :math:`(x,z)`-plane + * - **Time mesh (tally)** + - 46 equally spaced bins over :math:`t \in [0,9]` s + * - **Tally score** + - Fission rate + * - **Source particles** + - :math:`10^{5}` + * - **Batches** + - 2 + +Quantities of Interest +====================== + +* Time-resolved 2-D fission rate distribution in the :math:`(x,z)`-plane. +* Animation of the fission rate tracking the moving pellet geometry. + +Reference Solution +================== + +No analytical reference. The solution is validated by verifying that +the fission rate follows the pellet trajectory and that particle +conservation is maintained. + +Step-by-Step Walkthrough +======================== + +**1. Materials (lines 1–22)** + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :lines: 1-22 + :linenos: + :lineno-match: + +A fissile fuel pellet (:math:`\Sigma_f = 0.25`, :math:`\nu = 1.5`) and +an air-like background. Both include ``speed`` for time-dependent +transport. + +**2. Surfaces and Moving Geometry (lines 24–30)** + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :lines: 24-30 + :linenos: + :lineno-match: + +A z-cylinder and two z-planes define the pellet. The key feature: +``surface.move(velocities, durations)`` makes these surfaces **time-dependent**. +The cylinder moves laterally while the endcaps move axially, simulating +a pellet traversing the domain. + +**3. Container and Cells (lines 32–50)** + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :lines: 32-50 + :linenos: + :lineno-match: + +The fuel pellet region is defined by the intersection of the cylinder +and the two planes. The air fills the complement inside the bounding box. + +**4. Source (lines 56–63)** + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :lines: 56-63 + :linenos: + :lineno-match: + +A small box source near the pellet’s initial position, active over the +full simulation time :math:`t \in [0, 9]` s. + +**5. Tallies, Settings, and Run (lines 69–83)** + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :lines: 69-83 + :linenos: + :lineno-match: + +A structured mesh tally in the :math:`(x,z)`-plane with 46 time bins +captures the fission rate as the pellet moves. + +**What to try:** + +- Change the pellet velocities to create different trajectories. +- Set ``visualize = True`` to watch the geometry evolve with + ``mcdc.visualize(..., time=...)``. +- Compare with ``moving_source`` to see source motion vs. geometry motion. + +Full Input +========== + +Click here to view the input file: `examples/moving_pellet/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/moving_pellet/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/moving_pellet/input.py + +Expected Output +=============== + +An HDF5 tally file with time-resolved fission rates and an optional +animation created by the example's post-processing script. diff --git a/mcdc/docs/source/examples/moving_source.rst b/mcdc/docs/source/examples/moving_source.rst new file mode 100644 index 000000000..99263cab6 --- /dev/null +++ b/mcdc/docs/source/examples/moving_source.rst @@ -0,0 +1,167 @@ +.. _example_moving_source: + +============================================= +Moving Neutron Source +============================================= + +Problem Description +=================== + +A time-dependent problem in which an isotropic neutron source moves +through a three-dimensional box of air-like material along a prescribed +piecewise-linear trajectory. This example demonstrates MC/DC's +**moving-source** capability. + +Geometry and Materials +====================== + +The computational domain is a rectangular box: +:math:`x \in [-5,5]`, :math:`y \in [-5,5]`, :math:`z \in [-10,10]` cm, +with vacuum boundary conditions on all faces. + +A single homogeneous material (air analogue) fills the entire domain: + +.. list-table:: Cross-section data (mono-energetic, cm\ :sup:`-1`) + :widths: 30 20 20 + + * - **Region** + - :math:`\Sigma_c` + - :math:`\Sigma_s` + * - Air + - 0.002 + - 0.008 + +Neutron speed: :math:`v = 2 \times 10^{5}` cm/s. + +Physical Assumptions +==================== + +* Mono-energetic (one-speed) neutron transport. +* Isotropic scattering. +* Time-dependent transport with a moving point-like source. +* No fission. + +Numerical Setup +=============== + +The source moves in three consecutive phases: + +.. list-table:: + :widths: 15 30 15 + + * - **Phase** + - **Velocity (cm/s)** + - **Duration (s)** + * - 1 + - :math:`(1, 0, 0)` + - 7 + * - 2 + - :math:`(-0.5,\; 2,\; 0)` + - 2 + * - 3 + - :math:`(0,\; -3,\; 0)` + - 1 + +.. list-table:: + :widths: 35 65 + + * - **Spatial mesh (tally)** + - :math:`201 \times 201` in the :math:`(x,y)`-plane + * - **Time mesh (tally)** + - 46 equally spaced bins over :math:`t \in [0,10]` s + * - **Tally score** + - Scalar flux + * - **Source particles** + - :math:`10^{5}` + * - **Batches** + - 2 + +Quantities of Interest +====================== + +* Time-resolved 2-D flux distribution :math:`\phi(x,y,t)`. +* Animated GIF of the neutron cloud following the moving source. + +Reference Solution +================== + +No analytical reference. The solution is validated qualitatively by +confirming that the flux maximum tracks the prescribed source trajectory. + +Step-by-Step Walkthrough +======================== + +**1. Materials (lines 1–15)** + +.. literalinclude:: ../../../examples/moving_source/input.py + :language: python + :lines: 1-15 + :linenos: + :lineno-match: + +A single air-like material with ``speed`` defined for time-dependent +transport. + +**2. Geometry (lines 17–27)** + +.. literalinclude:: ../../../examples/moving_source/input.py + :language: python + :lines: 17-27 + :linenos: + :lineno-match: + +A simple box with vacuum boundaries. One cell fills the entire domain. + +**3. Moving Source (lines 33–49)** + +.. literalinclude:: ../../../examples/moving_source/input.py + :language: python + :lines: 33-49 + :linenos: + :lineno-match: + +The source is created with spatial and angular extent, then ``src.move()`` +assigns a piecewise-linear trajectory: three velocity segments with their +durations. The source physically translates through the domain over time. + +**4. Tallies, Settings, and Run (lines 55–65)** + +.. literalinclude:: ../../../examples/moving_source/input.py + :language: python + :lines: 55-65 + :linenos: + :lineno-match: + +A structured :math:`201 \times 201` mesh tally with 46 time bins captures +the evolving 2-D flux. The companion ``process-output.py`` script +generates an animated GIF. + +**What to try:** + +- Change the velocity vectors to create a circular or zigzag path. +- Add more time-resolution bins for smoother animation. +- Compare with ``moving_pellet`` where the geometry moves instead of the source. + +Full Input +========== + +Click here to view the input file: `examples/moving_source/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/moving_source/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/moving_source/input.py + +Expected Output +=============== + +An HDF5 mesh tally with time-resolved 2-D flux slices and a GIF animation +produced by the example's post-processing script. diff --git a/mcdc/docs/source/examples/sphere_in_cube.rst b/mcdc/docs/source/examples/sphere_in_cube.rst new file mode 100644 index 000000000..b261e8d10 --- /dev/null +++ b/mcdc/docs/source/examples/sphere_in_cube.rst @@ -0,0 +1,152 @@ +.. _example_sphere_in_cube: + +============================================= +Sphere-in-Cube Fission Detector +============================================= + +Problem Description +=================== + +A three-dimensional time-dependent problem with a homogeneous fissile +sphere embedded inside a scattering cube. A cell-based tally records +the fission rate inside the sphere, demonstrating MC/DC's **cell tally** +functionality. + +Geometry and Materials +====================== + +The computational domain is a cube: +:math:`x,y,z \in [0,4]` cm, with vacuum boundary conditions. + +A sphere of radius 1.5 cm is centred at :math:`(2,2,2)` cm. + +.. list-table:: Cross-section data (mono-energetic, cm\ :sup:`-1`) + :widths: 25 15 15 15 + + * - **Region** + - :math:`\Sigma_s` + - :math:`\Sigma_f` + - :math:`\nu` + * - Cube (outside sphere) + - 1.0 + - — + - — + * - Sphere (fissile) + - — + - 1.0 + - 1.2 + +Physical Assumptions +==================== + +* Mono-energetic (one-speed) neutron transport. +* Isotropic scattering (cube) and isotropic fission (sphere). +* Time-dependent transport with a uniform isotropic source, + :math:`t \in [0,50]` s. +* Implicit capture variance-reduction technique. + +Numerical Setup +=============== + +.. list-table:: + :widths: 35 65 + + * - **Tally type** + - Cell tally on the spherical region + * - **Tally score** + - Fission rate + * - **Source particles** + - :math:`10^{3}` (demonstration) + * - **Batches** + - 2 + +Quantities of Interest +====================== + +* Volume-integrated fission rate inside the sphere. +* Statistical uncertainty (standard deviation) of the cell tally. + +Reference Solution +================== + +The problem can be verified analytically for simple cross-section +combinations using first-flight collision probabilities. + +Step-by-Step Walkthrough +======================== + +**1. Import and Materials (lines 1–12)** + +.. literalinclude:: ../../../examples/sphere_in_cube/input.py + :language: python + :lines: 1-12 + :linenos: + :lineno-match: + +Two mono-energetic materials: a purely fissile material (``pure_f``, +:math:`\Sigma_f = 1.0`, :math:`\nu = 1.2`) for the sphere, and a purely +scattering material (``pure_s``, :math:`\Sigma_s = 1.0`) for the cube. + +**2. Surfaces and CSG Regions (lines 14–26)** + +.. literalinclude:: ../../../examples/sphere_in_cube/input.py + :language: python + :lines: 14-26 + :linenos: + :lineno-match: + +Six planes define the cube, and a ``Sphere`` surface defines the +detector region. The ``~`` (complement) operator carves out the +sphere from the cube. + +**3. Source (lines 32–39)** + +.. literalinclude:: ../../../examples/sphere_in_cube/input.py + :language: python + :lines: 32-39 + :linenos: + :lineno-match: + +A uniform isotropic source fills the cube over :math:`t \in [0,50]` s. + +**4. Cell Tally, Settings, and Run (lines 45–55)** + +.. literalinclude:: ../../../examples/sphere_in_cube/input.py + :language: python + :lines: 45-55 + :linenos: + :lineno-match: + +This example uses cell-filtered ``Tally`` — it tallies fission events inside a +specific cell (the sphere) rather than on a spatial mesh. +Implicit capture is enabled to keep particles alive longer. + +**What to try:** + +- Replace the cell filter with a mesh filter to visualise the 3-D flux. +- Change the sphere radius or :math:`\nu` to see how fission rate changes. +- Add a time grid to the cell tally for time-resolved data. + +Full Input +========== + +Click here to view the input file: `examples/sphere_in_cube/input.py `_. + +The complete input used for this example is embedded below: + +.. literalinclude:: ../../../examples/sphere_in_cube/input.py + :language: python + :linenos: + +How to Run +========== + +From the repository root run:: + + python examples/sphere_in_cube/input.py + +Expected Output +=============== + +Volume-integrated fission rate time series saved by the tally and a +small printed summary from the example's ``process-output.py`` script. diff --git a/mcdc/docs/source/images/home/DOE_logo.png b/mcdc/docs/source/images/home/DOE_logo.png new file mode 100644 index 000000000..010e34c54 Binary files /dev/null and b/mcdc/docs/source/images/home/DOE_logo.png differ diff --git a/mcdc/docs/source/images/home/NNSA_Logo.png b/mcdc/docs/source/images/home/NNSA_Logo.png new file mode 100644 index 000000000..9098d9c50 Binary files /dev/null and b/mcdc/docs/source/images/home/NNSA_Logo.png differ diff --git a/mcdc/docs/source/images/home/SU.png b/mcdc/docs/source/images/home/SU.png new file mode 100644 index 000000000..ffbd1ac22 Binary files /dev/null and b/mcdc/docs/source/images/home/SU.png differ diff --git a/mcdc/docs/source/images/home/berkeley-logo.png b/mcdc/docs/source/images/home/berkeley-logo.png new file mode 100644 index 000000000..fd24a4f6d Binary files /dev/null and b/mcdc/docs/source/images/home/berkeley-logo.png differ diff --git a/mcdc/docs/source/images/home/carre.png b/mcdc/docs/source/images/home/carre.png new file mode 100644 index 000000000..585588409 Binary files /dev/null and b/mcdc/docs/source/images/home/carre.png differ diff --git a/mcdc/docs/source/images/home/carre2.png b/mcdc/docs/source/images/home/carre2.png new file mode 100644 index 000000000..9de26a319 Binary files /dev/null and b/mcdc/docs/source/images/home/carre2.png differ diff --git a/mcdc/docs/source/images/home/cement-logo-1.png b/mcdc/docs/source/images/home/cement-logo-1.png new file mode 100644 index 000000000..4d74d24fa Binary files /dev/null and b/mcdc/docs/source/images/home/cement-logo-1.png differ diff --git a/mcdc/docs/source/images/home/kobayashi.gif b/mcdc/docs/source/images/home/kobayashi.gif new file mode 100644 index 000000000..55a6601db Binary files /dev/null and b/mcdc/docs/source/images/home/kobayashi.gif differ diff --git a/mcdc/docs/source/images/home/kobayishi-red.png b/mcdc/docs/source/images/home/kobayishi-red.png new file mode 100644 index 000000000..b53808400 Binary files /dev/null and b/mcdc/docs/source/images/home/kobayishi-red.png differ diff --git a/mcdc/docs/source/images/home/mcdc.svg b/mcdc/docs/source/images/home/mcdc.svg new file mode 100644 index 000000000..366ea9230 --- /dev/null +++ b/mcdc/docs/source/images/home/mcdc.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/mcdc/docs/source/images/home/ncsu-logo.png b/mcdc/docs/source/images/home/ncsu-logo.png new file mode 100644 index 000000000..6d9014ad4 Binary files /dev/null and b/mcdc/docs/source/images/home/ncsu-logo.png differ diff --git a/mcdc/docs/source/images/home/nd-logo.png b/mcdc/docs/source/images/home/nd-logo.png new file mode 100644 index 000000000..2cc62f262 Binary files /dev/null and b/mcdc/docs/source/images/home/nd-logo.png differ diff --git a/mcdc/docs/source/images/home/osu-logo.png b/mcdc/docs/source/images/home/osu-logo.png new file mode 100644 index 000000000..6813dd469 Binary files /dev/null and b/mcdc/docs/source/images/home/osu-logo.png differ diff --git a/mcdc/docs/source/images/home/psaapiii.png b/mcdc/docs/source/images/home/psaapiii.png new file mode 100644 index 000000000..3fcf650db Binary files /dev/null and b/mcdc/docs/source/images/home/psaapiii.png differ diff --git a/mcdc/docs/source/images/home/psaapiv.png b/mcdc/docs/source/images/home/psaapiv.png new file mode 100644 index 000000000..21518de42 Binary files /dev/null and b/mcdc/docs/source/images/home/psaapiv.png differ diff --git a/mcdc/docs/source/images/home/smr-mcdc.png b/mcdc/docs/source/images/home/smr-mcdc.png new file mode 100644 index 000000000..ea5d398e6 Binary files /dev/null and b/mcdc/docs/source/images/home/smr-mcdc.png differ diff --git a/mcdc/docs/source/images/home/ucsd-logo.png b/mcdc/docs/source/images/home/ucsd-logo.png new file mode 100644 index 000000000..0a6ad6d4a Binary files /dev/null and b/mcdc/docs/source/images/home/ucsd-logo.png differ diff --git a/mcdc/docs/source/images/home/vanderbilt-logo.png b/mcdc/docs/source/images/home/vanderbilt-logo.png new file mode 100644 index 000000000..8cf0cc4ea Binary files /dev/null and b/mcdc/docs/source/images/home/vanderbilt-logo.png differ diff --git a/mcdc/docs/source/images/theory/gpu_comp/amd_flow.png b/mcdc/docs/source/images/theory/gpu_comp/amd_flow.png new file mode 100644 index 000000000..2e03ab9a0 Binary files /dev/null and b/mcdc/docs/source/images/theory/gpu_comp/amd_flow.png differ diff --git a/mcdc/docs/source/images/theory/gpu_comp/nvcc_flow.png b/mcdc/docs/source/images/theory/gpu_comp/nvcc_flow.png new file mode 100644 index 000000000..99a08bcbc Binary files /dev/null and b/mcdc/docs/source/images/theory/gpu_comp/nvcc_flow.png differ diff --git a/mcdc/docs/source/images/user/af_slab_1.png b/mcdc/docs/source/images/user/af_slab_1.png new file mode 100644 index 000000000..3d54fb7ad Binary files /dev/null and b/mcdc/docs/source/images/user/af_slab_1.png differ diff --git a/mcdc/docs/source/images/user/af_slab_2.png b/mcdc/docs/source/images/user/af_slab_2.png new file mode 100644 index 000000000..e27253f56 Binary files /dev/null and b/mcdc/docs/source/images/user/af_slab_2.png differ diff --git a/mcdc/docs/source/images/user/c5g7.png b/mcdc/docs/source/images/user/c5g7.png new file mode 100644 index 000000000..1169d1344 Binary files /dev/null and b/mcdc/docs/source/images/user/c5g7.png differ diff --git a/mcdc/docs/source/images/user/dragon.gif b/mcdc/docs/source/images/user/dragon.gif new file mode 100644 index 000000000..0d5bf12d8 Binary files /dev/null and b/mcdc/docs/source/images/user/dragon.gif differ diff --git a/mcdc/docs/source/images/user/j_slab_1.png b/mcdc/docs/source/images/user/j_slab_1.png new file mode 100644 index 000000000..46c730d5f Binary files /dev/null and b/mcdc/docs/source/images/user/j_slab_1.png differ diff --git a/mcdc/docs/source/images/user/j_slab_2.png b/mcdc/docs/source/images/user/j_slab_2.png new file mode 100644 index 000000000..065d71a1a Binary files /dev/null and b/mcdc/docs/source/images/user/j_slab_2.png differ diff --git a/mcdc/docs/source/images/user/kobayashi-white.png b/mcdc/docs/source/images/user/kobayashi-white.png new file mode 100644 index 000000000..29993cf68 Binary files /dev/null and b/mcdc/docs/source/images/user/kobayashi-white.png differ diff --git a/mcdc/docs/source/images/user/sf_slab_1.png b/mcdc/docs/source/images/user/sf_slab_1.png new file mode 100644 index 000000000..e420defba Binary files /dev/null and b/mcdc/docs/source/images/user/sf_slab_1.png differ diff --git a/mcdc/docs/source/images/user/sf_slab_2.png b/mcdc/docs/source/images/user/sf_slab_2.png new file mode 100644 index 000000000..b599bf2a6 Binary files /dev/null and b/mcdc/docs/source/images/user/sf_slab_2.png differ diff --git a/mcdc/docs/source/index.rst b/mcdc/docs/source/index.rst new file mode 100644 index 000000000..6b42d7617 --- /dev/null +++ b/mcdc/docs/source/index.rst @@ -0,0 +1,82 @@ +.. MC/DC documentation master file + +====================================== +MC/DC: Monte Carlo Dynamic Code +====================================== + +MC/DC is a performant, scalable, and portable Python-based Monte Carlo radiation +transport software package. It is purpose-built as a rapid methods development +platform capable of leveraging modern high-performance computing systems, supporting +both CPUs and GPUs. + +MC/DC supports continuous-energy and multi-group neutron transport calculations. It is +capable of running fixed-source and eigenvalue transport simulations on models built +from constructive solid geometry. For continuous-energy neutron transport, +MC/DC translates `ACE `_ nuclear data libraries into +its native `HDF5 `_ format. Photon, electron, +and charged-particle transport are currently under development, with the goal of making +MC/DC a multi-radiation/particle transport software package. + +While MC/DC's Python environment promotes rapid iterative testing of ideas, its +Numba-based compilation framework improves runtime performance and enables portability. +`Harmonize `_ serves as the GPU execution +framework, optimizing device utilization within stochastic simulations; and +`MPI4Py `_ is used to achieve parallel +scalability across nodes in large computer clusters. In addition to running on commonly +used desktops and workstations, MC/DC has been tested on large heterogeneous +high-performance systems, including +`Lassen `_ +(IBM POWER9 and NVIDIA Volta V100) and +`Tuolumne `_ (AMD MI300A APU). + +MC/DC development was initiated by the Center for Exascale Monte Carlo Neutron +Transport (`CEMeNT `_), a Focused Investigatory Center +of the Predictive Science Academic Alliance Program–III +(`PSAAP-III `_). MC/DC is currently under active development +by the Center for Advancing the Radiation Resilience of Electronics +(`CARRE `_), a Predictive Simulation Center of +`PSAAP-IV `_. MC/DC is open source +(`BSD 3-Clause `_) and +welcomes external contributions via `GitHub `_. + +.. admonition:: Recommended citation + :class: tip + + Morgan, Joanna Piper, et al. "Monte Carlo/Dynamic Code (MC/DC): An accelerated + Python package for fully transient neutron transport and rapid methods development." + Journal of Open Source Software 9.96 (2024): 6415. + https://joss.theoj.org/papers/10.21105/joss.06415 + +------------------------------ +Contents +------------------------------ + +.. toctree:: + :maxdepth: 1 + :caption: User Documentation + + install + user/index + pythonapi/index + examples/index + +.. toctree:: + :maxdepth: 1 + :caption: Developer Documentation + + contribution/index + theory/index + +.. toctree:: + :maxdepth: 1 + :caption: References + + publications + +.. sidebar-links:: + :caption: External Links + :pypi: mcdc + :github: + + CARRE + CEMeNT diff --git a/mcdc/docs/source/install.rst b/mcdc/docs/source/install.rst new file mode 100644 index 000000000..9750d3733 --- /dev/null +++ b/mcdc/docs/source/install.rst @@ -0,0 +1,267 @@ +.. _install: + +=================== +Installation Guide +=================== + +Whether installing MC/DC as a user or from source as a developer, +we recommend doing so using an environment manager like venv or conda. +This will avoid the need for any admin access and keep dependencies clean. + +In general, :ref:`creating-a-venv-environment` and :ref:`installing-with-pip` is easier and recommended. +Creating a conda environment and :ref:`installing-with-conda` is more robust and reliable, but is also more difficult. +A conda environment is necessary to install MC/DC on LLNL's Lassen machine. + + + +.. _creating-a-venv-environment: + +--------------------------- +Creating a venv environment +--------------------------- + +Python `virtual environments `_ are the easy and +recommended way to get MC/DC operating on personal machines as well as HPCs; +all you need is a working Python version with venv installed. +Particularly on HPCs, using a Python virtual environment is convenient because +system admins will have already configured venv and the pip within it to load packages and dependencies +from the proper sources. +HPCs often use a module system, so before doing anything else, +``module load python/``. + +A python virtual environment can (usually) be created using + +.. code-block:: sh + + python -m venv + +Once you have created a venv, you will need to activate it + +.. code-block:: sh + + source /bin/activate + +and will need to do so every time a new terminal instance is launched. +Once your environment is active, you can move on to :ref:`installing-with-pip`. + + +.. _installing-with-pip: + +------------------- +Installing with pip +------------------- +Assuming you have a working Python environment, you can install using pip. +Doing so within an active venv or conda environment avoids the need for any admin access +and keeps dependencies clean. + +If you would like to run MC/DC as published in the main branch *and* +do not need to develop in MC/DC, you can install from PyPI: + +.. code-block:: sh + + pip install mcdc + +---------------------- +Installing from Source +---------------------- +If you would like to execute a version of MC/DC from a specific branch or +*do* plan to develop in MC/DC, you'll need to install from source: + +#. Clone the MC/DC repo: ``git clone https://github.com/CEMeNT-PSAAP/MCDC.git`` +#. Go to your new MC/DC directory: ``cd MCDC`` +#. Install the package from your MC/DC files: ``pip install -e .`` + +This should install all needed dependencies without a hitch. +The `-e` flag installs MC/DC as an editable package, meaning that any changes +you make to the MC/DC source files, including checking out a different +branch, will be immediately reflected without needing to do any re-installation. + +.. _installing-with-conda: + +-------------------------- +Installing MC/DC via conda +-------------------------- + +Conda is the most robust (works even on bespoke systems) option to install MC/DC. +`Conda `_ is an open source package and environment management system +that runs on Windows, macOS, and Linux. It allows for easy installing and switching between multiple +versions of software packages and their dependencies. +Conda is really useful on systems with non-standard hardware (e.g. not x86 CPUs) like Lassen, where +mpi4py is often the most troublesome dependency. + +First, ``conda`` should be installed with `Miniconda `_ +or `Anaconda `_. HPC instructions: + +`Dane `_ (LLNL, x86_64), + +.. code-block:: sh + + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh + bash Miniconda3-latest-Linux-x86_64.sh + + +`Lassen `_ (LLNL, IBM Power9), + +.. code-block:: sh + + wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-ppc64le.sh + bash Miniconda3-latest-Linux-ppc64le.sh + + +Then create and activate a new conda environment called *mcdc-env* in +which to install MC/DC. MC/DC supports Python ``>3.10``; +we recommend Python 3.11: + +.. code-block:: sh + + conda create -n mcdc-env python=3.11 + conda activate mcdc-env + +Then, MC/DC can be installed from source by first cloning the MC/DC repository: + +.. code-block:: sh + + git clone https://github.com/CEMeNT-PSAAP/MCDC.git + cd MCDC + +then using the the ``install.sh`` within it. The install script will +build MC/DC and all of its dependencies and execute any necessary patches. +This has been tested on Quartz, Dane, Tioga, Lassen, and Apple M2. +The ``install.sh`` script **will fail outside of a conda environment**. + +On HPC machines, the script will install mpi4py +`from source `_. +This means that all appropriate modules must be loaded prior to executing. + +On Quartz, the default modules are sufficient (``intel-classic`` and ``mvapich2``). +On Lassen, ``module load gcc/8 cuda/11.8``. Then, + +.. code-block:: sh + + bash install.sh --hpc + + +On local machines, mpi4py will be installed using conda, + +.. code-block:: sh + + bash install.sh + +To confirm that everything is properly installed, execute ``pytest`` from the MCDC directory. + +.. _installing-via-containers: + +-------------------------- +Installing via Containers +-------------------------- + +For container-based installation and execution, see :doc:`user/container`. + +.. toctree:: + :maxdepth: 1 + + user/container + +.. _install-data-library: + +----------------------------------------- +Generating a Data Library from ACE Files +----------------------------------------- + +MC/DC ships with a conversion tool in ``tools/data_library_generator/`` that reads +standard ACE-format nuclear data files and writes them into MC/DC's per-nuclide +HDF5 format. This is the primary path for creating CE libraries. + +**Prerequisites:** + +.. code-block:: sh + + pip install ACEtk h5py numpy tqdm + +You also need a set of ACE files (e.g., from `NJOY `_ or +an ENDF/B distribution). + +**Environment variables:** + +.. list-table:: + :widths: 25 75 + :header-rows: 1 + + * - Variable + - Description + * - ``MCDC_ACELIB`` + - Path to the directory containing your ACE files. + * - ``MCDC_LIB`` + - Path to the output directory where MC/DC HDF5 files will be written. + +**Running the generator:** + +.. code-block:: sh + + export MCDC_ACELIB=/path/to/ace/files + export MCDC_LIB=/path/to/mcdc/library + + cd tools/data_library_generator + python generate.py + +By default the tool only converts nuclides that do not already have a corresponding +HDF5 file in ``$MCDC_LIB``. Use ``--rewrite`` to regenerate all files, or +``--verbose`` for detailed per-nuclide output: + +.. code-block:: sh + + python generate.py --rewrite --verbose + +The generator processes each ACE file as follows: + +#. Reads the ACE header to determine nuclide identity (Z, A, isomeric state) + and temperature. +#. Extracts the principal cross-section block (energy grid, elastic, capture, + fission, inelastic channels) and writes them as HDF5 datasets grouped by + reaction type (elastic scattering, capture, inelastic scattering, fission). +#. Extracts angular distributions (tabulated cosine PDFs) and energy + distributions (level scattering, evaporation, Maxwellian, Kalbach-Mann, + N-body phase space, tabulated outgoing energy) for each reaction channel. +#. For fissionable nuclides, extracts prompt/delayed :math:`\nu(E)` multiplicities, + delayed neutron precursor fractions, decay constants, and energy spectra. + +The resulting HDF5 file (e.g., ``U235-293.6K.h5``) is ready for use with ``mcdc.Material()``. + + +--------------------------------- +GPU Operability (MC/DC+Harmonize) +--------------------------------- + +MC/DC supports most of its Numba enabled features for GPU compilation and execution. +When targeting GPUs, MC/DC uses the `Harmonize `_ library as its GPU runtime, a.k.a. the thing that actually executes MC/DC functions. +How Harmonize works gets a little involved, but in short, +Harmonize acts as MC/DC's GPU runtime by using two major scheduling schemes: an event schedular similar to those implemented in OpenMC and Shift, plus a novel scheduler. +For more information on Harmonize and how we compile MC/DC with it, see this `TOMACs article describing the async scheduler `_ or our publications in American Nuclear Society: Math and Comp Meeting in 2025. + +If you encounter problems with configuration, please file `Github issues promptly `_ , +especially when on supported super computers (LLNL's `Tioga `_, `El Capitan `_, and `Lassen `_). + +.. rubric:: Nvidia GPUs + +To compile and execute MC/DC on Nvidia GPUs first ensure you have the `Harmonize prerecs `_ (CUDA=11.8, Numba>=0.60.0) and a working MC/DC version >=0.10.0. Then, + +#. Clone the harmonize repo: ``git clone https://github.com/CEMeNT-PSAAP/harmonize.git`` +#. Install into the proper Python env: ``pip install -e .`` + +Operability should now be enabled. + +.. _install-amd-gpus: + +.. rubric:: AMD GPUs + +The prerequisites for AMD operability are slightly more complex and +require a patch to Numba to allow for AMD target triple LLVM-IR. +It is recommended that this is done within a Python venv virtual environment. + +To compile and execute MC/DC on AMD GPUs first ensure you have the `Harmonize prerecs `_ (ROCm=6.0.0, Numba>=0.60.0) and a working MC/DC version >=0.11.0. Then, + +#. Patch Numba to enable HIP (`instructions here `_) +#. Clone harmonize and `switch to the AMD `_ branch with ``git switch amd_event_interop_revamp`` +#. Install Harmonize with ``pip install -e .`` or using `Harmonize's install script `_ + +Operability should now be enabled. diff --git a/mcdc/docs/source/publications.rst b/mcdc/docs/source/publications.rst new file mode 100644 index 000000000..eaf986a88 --- /dev/null +++ b/mcdc/docs/source/publications.rst @@ -0,0 +1,83 @@ +.. _pubs: + +============= +Publications +============= + +Overview +-------- + +- Morgan, Joanna Piper, et al. "Monte Carlo/Dynamic Code (MC/DC): An accelerated Python package for fully transient neutron transport and rapid methods development." Journal of Open Source Software 9.96 (2024): 6415. https://joss.theoj.org/papers/10.21105/joss.06415 + +- Variansyah, Ilham, et al. "Development of MC/DC: a performant, scalable, and portable Python-based Monte Carlo neutron transport code." In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Niagara Falls, Ontario, Canada (2023). Preprint: https://arxiv.org/abs/2305.07636 + +Benchmarking, Verification, and Validation +------------------------------------------ + +- Variansyah, I. Four-Phase C5G7 Transient Benchmark for Neutron Transport. Zenodo, 23 June 2025, https://doi.org/10.5281/zenodo.15719118 + +- Variansyah, I. Time-Dependent Kobayashi Dog-Leg Benchmark for Neutron Transport. Zenodo, 23 Mar. 2025, https://doi.org/10.5281/zenodo.15069882 + +- Northrop, J., et al. Inter-code Comparison of Time Independent Pulsed Sphere Benchmark Results. Zenodo, 2022, https://doi.org/10.5281/zenodo.7250603. + +Software Engineering +-------------------- + +- Morgan, Joanna Piper, et al. "Performant and Portable Monte Carlo Neutron Transport via Numba." Computing in Science & Engineering (2025). https://ieeexplore.ieee.org/abstract/document/10926859/ + +- Cuneo, Braxton, and Mike Bailey. "Divergence reduction in Monte Carlo neutron transport with on-GPU asynchronous scheduling." ACM Transactions on Modeling and Computer Simulation 34.1 (2024): 1-25. https://dl.acm.org/doi/abs/10.1145/3626957 + +- Morgan, J. P., et al. “Explorations of Python-Based Automatic Hardware Code Generation for Neutron Transport Applications”. Transactions of The American Nuclear Society, 1, vol. 126, Zenodo, 2022, https://doi.org/10.5281/zenodo.6646813. + +Variance/Runtime Reduction Technique +------------------------------------ + +- Variansyah, Ilham, Ryan G. McClarren, and Todd S. Palmer. "Implicit Collision Multiplicity Adjustment for Efficient Monte Carlo Transport Simulation of Reactivity Excursion." In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Denver, Colorado, USA (2025). Preprint: https://arxiv.org/abs/2501.06391 + +- Northrop, Jordan, et al. "Interplay of Variance Reduction and Population Control in Monte Carlo Neutron Transport." Nuclear Science and Engineering (2025): 1-12. https://www.tandfonline.com/doi/abs/10.1080/00295639.2025.2567750 + +- Morgan, Joanna Piper, et al. "Hybrid Delta Tracking Schemes Using a Track-Length Estimator." arXiv preprint arXiv:2510.00152 (2025). https://doi.org/10.48550/arXiv.2510.00152 + +- Variansyah, Ilham, and Ryan G. McClarren. "Analysis of population control techniques for time-dependent and eigenvalue Monte Carlo neutron transport calculations." Nuclear Science and Engineering 196.11 (2022): 1280-1305. https://www.tandfonline.com/doi/abs/10.1080/00295639.2022.2091906 + +- Variansyah, Ilham, and Ryan G. McClarren. "Performance of Population Control Techniques in Monte Carlo Reactor Criticality Simulations." Proc. PHYSOR. 2022. Preprint https://www.researchgate.net/profile/Ilham-Variansyah/publication/360852360_Performance_of_Population_Control_Techniques_in_Monte_Carlo_Reactor_Criticality_Simulations/links/628eb74c8d19206823dae963/Performance-of-Population-Control-Techniques-in-Monte-Carlo-Reactor-Criticality-Simulation.pdf + +Hybrid Monte Carlo Transport +---------------------------- + +- Pasmann, Samuel, et al. "Mitigating Spatial Error in the Iterative Quasi–Monte Carlo (iQMC) Method for Neutron Transport Simulations with Linear Discontinuous Source Tilting and Effective Scattering and Fission Rate Tallies." Nuclear Science and Engineering 199.sup1 (2025): S381-S396. https://www.tandfonline.com/doi/abs/10.1080/00295639.2024.2332007 + +- Novellino, Vincent N., and Dmitriy Y. Anistratov. Analysis of Hybrid MC/Deterministic Methods for Transport Problems Based on Low-Order Equations Discretized by Finite Volume Scheme. Transaction of American Nuclear Society, v. 130, 2024. Preprint: https://doi.org/10.48550/arXiv.2403.05673 + +- Whewell, Ben, et al. "Multigroup neutron transport using a collision-based hybrid method." Nuclear science and engineering 197.7 (2023): 1386-1405. https://www.tandfonline.com/doi/abs/10.1080/00295639.2022.2154119 + +- Pasmann, Sam, et al. "A quasi–Monte Carlo method with Krylov linear solvers for multigroup neutron transport simulations." Nuclear Science and Engineering 197.6 (2023): 1159-1173. https://www.tandfonline.com/doi/abs/10.1080/00295639.2022.2143704 + +- Pasmann, Sam, et al. "A quasi–Monte Carlo method with Krylov linear solvers for multigroup neutron transport simulations." Nuclear Science and Engineering 197.6 (2023): 1159-1173. https://www.tandfonline.com/doi/abs/10.1080/00295639.2022.2143704 + +- Pasmann, Samuel, et al. “iQMC: Iterative Quasi-Monte Carlo with Krylov Linear Solvers for k-Eigenvalue Neutron Transport Simulations.” In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Niagara Falls, Ontario, Canada (2023). Preprint: https://arxiv.org/abs/2306.11600 + +- Pasmann, Samuel, Ilham Variansyah, and R. G. McClarren. "Convergent transport source iteration calculations with Quasi-Monte Carlo." Transactions of the American Nuclear Society 124 (2021): 192-195. + +Uncertainty Quantification and Sensitivity Analysis +--------------------------------------------------- + +- Variansyah, Ilham, Ryan G. McClarren, and Todd S. Palmer. "Derivative Source Method for Monte Carlo Transport Calculation of Sensitivities to Material Densities and Dimensions." In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Denver, Colorado, USA (2025). Preprint: https://arxiv.org/abs/2501.06397 + +- Clements, Kayla B., et al. "A variance deconvolution estimator for efficient uncertainty quantification in Monte Carlo radiation transport applications." Journal of Quantitative Spectroscopy and Radiative Transfer 319 (2024): 108958. https://www.sciencedirect.com/science/article/pii/S0022407324000657 + +- Clements, Kayla, et al. "Global Sensitivity Analysis in Monte Carlo Radiation Transport." In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Niagara Falls, Ontario, Canada (2023). Preprint: https://arxiv.org/abs/2403.06106 + +- Clements, Kayla C., G. Geraci, and Aaron J. Olson. "A variance deconvolution approach to sampling uncertainty quantification for Monte Carlo radiation transport solvers." Computer Science Research Institute Summer Proceedings 2021 (2021): 293-307. https://www.osti.gov/biblio/1855061 + +Miscellany +---------- + +- Lame, Ethan, et al. "Compressed Sensing Methods for Memory Reduction in Monte Carlo Simulations." arXiv preprint arXiv:2602.07771 (2026). https://doi.org/10.48550/arXiv.2602.07771 + +- Cuneo, Braxton S., and Ilham Variansyah. “An Alternative to Stride-Based RNG for Monte Carlo Transport.” In Transactions of The American Nuclear Society, volume 130 (1), pp. 423–426 (2024). Preprint: https://arxiv.org/abs/2403.06362 + +- Variansyah, Ilham, and Ryan G. McClarren. “High-fidelity treatment for object movement in time-dependent Monte Carlo transport simulations.” In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Niagara Falls, Ontario, Canada (2023). Preprint: https://doi.org/10.48550/arXiv.2305.07641 + +- Variansyah, Ilham, and Ryan G. McClarren. “An effective initial particle sampling technique for Monte Carlo reactor transient simulations.” In International Conference on Mathematics and Computational Methods Applied to Nuclear Science and Engineering. Niagara Falls, Ontario, Canada (2023). Preprint: https://doi.org/10.48550/arXiv.2305.07646 + diff --git a/mcdc/docs/source/pythonapi/index.rst b/mcdc/docs/source/pythonapi/index.rst new file mode 100644 index 000000000..3b108838a --- /dev/null +++ b/mcdc/docs/source/pythonapi/index.rst @@ -0,0 +1,110 @@ +.. _pythonapi: + +================ +Input Definition +================ + +Full API documentation. + + +Defining materials +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcclass.rst + + mcdc.Material + mcdc.MaterialMG + + +Defining geometry +----------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcclass.rst + + mcdc.Cell + mcdc.Lattice + mcdc.Surface + mcdc.Universe + +Defining meshes +--------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcclass.rst + + mcdc.MeshUniform + mcdc.MeshStructured + +Defining sources +---------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcclass.rst + + mcdc.Source + +Defining tallies +---------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcclass.rst + + mcdc.Tally + +Defining simulation settings +----------------------------- + +Settings are configured by assigning attributes on the ``mcdc.settings`` singleton. +Key attributes include: + +- ``mcdc.settings.N_particle`` — Number of particles. +- ``mcdc.settings.N_batch`` — Number of batches. +- ``mcdc.settings.rng_seed`` — RNG seed. +- ``mcdc.settings.output_name`` — Output file name (default: ``"output"``). +- ``mcdc.settings.time_boundary`` — Time boundary. + +Methods: + +- ``mcdc.settings.set_eigenmode(N_inactive=..., N_active=..., k_init=...)`` — Enable k-eigenvalue mode. +- ``mcdc.settings.set_time_census(time, tally_frequency=...)`` — Set time census parameters. +- ``mcdc.settings.set_source_file(source_file_name)`` — Load source particles from file. + +Defining techniques +------------------- + +Techniques are enabled by calling methods on the ``mcdc.simulation`` singleton: + +- ``mcdc.simulation.implicit_capture(active=True)`` +- ``mcdc.simulation.weighted_emission(active=True, weight_target=1.0)`` +- ``mcdc.simulation.weight_roulette(weight_threshold=0.0, weight_target=1.0)`` +- ``mcdc.simulation.population_control(active=True)`` + +Running +------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: omcfunction.rst + + mcdc.run + + + + + + + + diff --git a/mcdc/docs/source/theory/ana.rst b/mcdc/docs/source/theory/ana.rst new file mode 100644 index 000000000..7ed8fcc65 --- /dev/null +++ b/mcdc/docs/source/theory/ana.rst @@ -0,0 +1,53 @@ +.. _ana: + +============================ +Acceleration and Abstraction +============================ + +MC/DC employs a layered compilation and abstraction strategy that allows the same Python source code to target CPUs (pure Python or Numba JIT) and GPUs (via Harmonize) without modification to the transport algorithms. + +Execution Modes +--------------- + +MC/DC supports three execution modes, selected at runtime with the ``--mode`` flag: + +- **Python mode** (``--mode=python``): Transport kernels run as interpreted Python. Useful for debugging and rapid prototyping. +- **Numba mode** (``--mode=numba``): Transport kernels are just-in-time compiled to native machine code using `Numba `_. This provides significant speedup (often 100x or more) at the cost of an initial compilation overhead of 15–80 seconds. +- **Numba debug mode** (``--mode=numba_debug``): JIT compilation with extra debug instrumentation (bounds checking, full tracebacks, type inference logging). Slower, but produces actionable error messages. + +The ``--target`` flag selects the hardware target: ``cpu`` (default) or ``gpu``. + +Numba Object Generation +----------------------- + +MC/DC's simulation state (materials, surfaces, cells, tallies, settings, particle banks, etc.) is defined as annotated Python classes in ``mcdc/object_/``. +At startup, the **Numba object generator** (``mcdc/code_factory/numba_objects_generator.py``) converts these class hierarchies into NumPy structured array dtypes: + +#. Class annotations (type hints) are read and mapped to NumPy dtypes. +#. Polymorphic objects (e.g., different surface types) are represented using ``parent_ID``/``child_ID`` fields that index into typed sub-arrays. +#. All simulation data is flattened into a single contiguous NumPy buffer (``data``), enabling efficient access from JIT-compiled code. +#. Getter and setter access functions (in ``mcdc/mcdc_get/`` and ``mcdc/mcdc_set/``) are auto-generated so that JIT-compiled transport kernels can read and write simulation state without Python object overhead. + +This approach allows the transport code to be written in natural, object-oriented Python while still achieving the performance of flat array access in compiled mode. + +GPU Portability +--------------- + +When targeting GPUs, MC/DC uses the `Harmonize `_ library as its GPU runtime. +The GPU program builder (``mcdc/code_factory/gpu/program_builder.py``) constructs a Harmonize ``RuntimeSpec`` that includes: + +- **Global state**: the simulation structured array and the flat data buffer. +- **Device functions**: the MC/DC transport kernels, compiled from Python to device code via Numba. +- **Scheduling strategy**: either event-based (``--gpu_strategy=event``) or asynchronous (``--gpu_strategy=async``, Nvidia only). + +The compilation pipeline differs by vendor: + +- **Nvidia**: Python → PTX (via ``numba.cuda``) → relocatable device code (via ``nvcc``) → linked shared library. +- **AMD**: Python → LLVM-IR (via a `Numba-HIP patch `_) → relocatable device code (via ``hipcc`` / ``clang``) → linked shared library. + +For a detailed walkthrough of the compilation flow, see the :ref:`theory_gpu` section. + +For more details, see: + +- J. P. Morgan, I. Variansyah, B. Cuneo, T. S. Palmer, and K. E. Niemeyer. "Performance Portable Monte Carlo Neutron Transport in MCDC via Numba." Preprint DOI 10.48550/arXiv.2306.07847. +- B. Cuneo and M. Bailey. "Divergence Reduction in Monte Carlo Neutron Transport with On-GPU Asynchronous Scheduling." *ACM TOMACS* (2023). DOI 10.1145/3626957. diff --git a/mcdc/docs/source/theory/compressed_sensing.rst b/mcdc/docs/source/theory/compressed_sensing.rst new file mode 100644 index 000000000..b255f2f3d --- /dev/null +++ b/mcdc/docs/source/theory/compressed_sensing.rst @@ -0,0 +1,34 @@ +.. _compressed_sensing: + +================== +Compressed Sensing +================== + +Compressed sensing is a signal-processing technique that reconstructs a sparse signal from far fewer measurements than traditional Nyquist sampling would require. +In the context of Monte Carlo neutron transport, compressed sensing can be applied to recover spatial or temporal tally distributions from a limited number of particle histories. + +Motivation +---------- + +Monte Carlo simulations can be expensive, and tallies over fine spatial or temporal meshes may have large statistical uncertainties unless a prohibitive number of particles are tracked. +If the underlying solution is sparse or compressible in some basis (e.g., wavelet, Fourier, or modal), compressed sensing theory guarantees that accurate reconstruction is possible from significantly fewer observations. + +This is particularly relevant for: + +- Fine-mesh tally reconstruction from coarse-mesh Monte Carlo results, +- Time-eigenvalue estimation using dynamic mode decomposition (DMD) with limited snapshots, +- Reducing the overall particle count needed for acceptable tally quality in large problems. + +Related Work +------------ + +Smith, Variansyah, and McClarren explored **Compressed Dynamic Mode Decomposition** for time-eigenvalue calculations, combining sparse sampling ideas with DMD to extract dominant time eigenvalues from transient MC simulations with fewer snapshots than standard approaches require. + +For more details, see: + +- E. Smith, I. Variansyah, and R. G. McClarren. "Compressed Dynamic Mode Decomposition for Time-Eigenvalue Calculations." *M&C 2023*. Preprint DOI 10.48550/arXiv.2208.10942. + +.. note:: + + Compressed sensing is an active research area within CEMeNT. + Full integration into the MC/DC transport solver is under development. \ No newline at end of file diff --git a/mcdc/docs/source/theory/cont_energy.rst b/mcdc/docs/source/theory/cont_energy.rst new file mode 100644 index 000000000..7fe8aa35f --- /dev/null +++ b/mcdc/docs/source/theory/cont_energy.rst @@ -0,0 +1,180 @@ +.. _cont_energy: + +================= +Continuous Energy +================= + +MC/DC supports continuous energy (CE) neutron transport using pointwise nuclear data libraries. +In CE mode, cross sections are represented as energy-dependent tabulated data rather than multi-group averages, enabling higher-fidelity simulations. + +Data Libraries +-------------- + +CE data is loaded from HDF5 files for each nuclide at a specified temperature. +The environment variable ``MCDC_LIB`` must point to the library directory. +Each nuclide file (e.g., ``U235-293.6K.h5``) contains: + +- An energy grid (converted from MeV to eV on load), +- Pointwise total, elastic, capture, inelastic, and fission cross sections, +- Angular and energy distributions for secondary particles, +- Prompt and delayed fission neutron multiplicities and spectra, +- Delayed neutron precursor data (fractions, decay constants, and energy spectra). + +Supported temperature points are 0.1, 233.15, 273.15, 293.6, 600.0, 900.0, 1200.0, and 2500.0 K. +MC/DC selects the nearest available temperature for each nuclide. + +Cross Section Evaluation +------------------------ + +Microscopic cross sections are evaluated by binary search on the energy grid followed by linear interpolation between bounding points. +Macroscopic cross sections for a material are computed as the sum over constituent nuclides: + +.. math:: + + \Sigma(E) = \sum_i N_i \, \sigma_i(E) + +where :math:`N_i` is the atom density and :math:`\sigma_i(E)` is the microscopic cross section of nuclide :math:`i` at energy :math:`E`. + +Collision Physics +----------------- + +CE collision processing implements full center-of-mass (COM) kinematics: + +- **Elastic scattering** (MT-2): Thermal motion of the target nucleus is sampled from a Maxwellian distribution parameterized by :math:`\beta = \sqrt{A m / (2 k_B T)}`, where :math:`A` is the mass ratio. Rejection sampling is used for the relative speed. +- **Inelastic scattering**: Multiple MT channels with tabulated energy-angle distributions (Kalbach-Mann, evaporation, Maxwellian, N-body, level scattering). +- **Capture**: Particle is absorbed; implicit capture can be enabled as a variance reduction technique. +- **Fission**: Secondary particles are emitted using :math:`\nu(E)/k_\text{eff}` scaling, with prompt and delayed components sampled separately. + +Relativistic particle speed is computed as: + +.. math:: + + v = c \, \frac{\sqrt{E(E + 2m_n c^2)}}{E + m_n c^2} + + +Generating a Data Library from ACE Files +----------------------------------------- + +MC/DC ships with a conversion tool in ``tools/data_library_generator/`` that reads +standard ACE-format nuclear data files and writes them into MC/DC's per-nuclide +HDF5 format. This is the primary path for creating CE libraries. + +**Prerequisites:** + +.. code-block:: sh + + pip install ACEtk h5py numpy tqdm + +You also need a set of ACE files (e.g., from `NJOY `_ or +an ENDF/B distribution). + +**Environment variables:** + +.. list-table:: + :widths: 25 75 + :header-rows: 1 + + * - Variable + - Description + * - ``MCDC_ACELIB`` + - Path to the directory containing your ACE files. + * - ``MCDC_LIB`` + - Path to the output directory where MC/DC HDF5 files will be written. + +**Running the generator:** + +.. code-block:: sh + + export MCDC_ACELIB=/path/to/ace/files + export MCDC_LIB=/path/to/mcdc/library + + cd tools/data_library_generator + python generate.py + +By default the tool only converts nuclides that do not already have a corresponding +HDF5 file in ``$MCDC_LIB``. Use ``--rewrite`` to regenerate all files, or +``--verbose`` for detailed per-nuclide output: + +.. code-block:: sh + + python generate.py --rewrite --verbose + +The generator processes each ACE file as follows: + +#. Reads the ACE header to determine nuclide identity (Z, A, isomeric state) + and temperature. +#. Extracts the principal cross-section block (energy grid, elastic, capture, + fission, inelastic channels) and writes them as HDF5 datasets grouped by + reaction type (elastic scattering, capture, inelastic scattering, fission). +#. Extracts angular distributions (tabulated cosine PDFs) and energy + distributions (level scattering, evaporation, Maxwellian, Kalbach-Mann, + N-body phase space, tabulated outgoing energy) for each reaction channel. +#. For fissionable nuclides, extracts prompt/delayed :math:`\nu(E)` multiplicities, + delayed neutron precursor fractions, decay constants, and energy spectra. + +The resulting HDF5 file (e.g., ``U235-293.6K.h5``) is ready for use with +``mcdc.Material()``. + + +Using CE Materials in an Input Deck +------------------------------------ + +Once the library is generated, set the ``MCDC_LIB`` environment variable and +define materials with ``mcdc.Material()``: + +.. code-block:: python3 + + import mcdc + + # Define nuclides with atom densities (atoms/barn-cm) + fuel = mcdc.Material( + nuclides=["U235", "U238", "O16"], + density=[5.58e-4, 2.24e-2, 4.583e-2], + temperature=293.6, + ) + +MC/DC will automatically look up the matching HDF5 file in ``$MCDC_LIB`` +(e.g., ``U235-293.6K.h5``) and load the pointwise cross sections. + + +Note on External Data Sources +------------------------------ + +MC/DC's internal HDF5 format is independent of the original data source. +While the shipped tool converts from **ACE format**, users with data in other +formats (e.g., OpenMC HDF5 nuclear data) can write their own converter +following the same HDF5 schema used by ``generate.py``. + +The key HDF5 structure expected by MC/DC is: + +.. code-block:: text + + -K.h5 + ├── nuclide_name (string) + ├── temperature (float, K) + ├── atomic_weight_ratio (float) + ├── fissionable (bool) + └── neutron_reactions/ + ├── xs_energy_grid (1-D array, MeV) + ├── elastic_scattering/ + │ └── MT-002/ + │ ├── xs (1-D array, barns) + │ ├── cosine/ (angular distribution) + │ └── energy/ (energy distribution) + ├── capture/ + │ └── MT-102/ ... + ├── inelastic_scattering/ + │ └── MT-051/ ... + └── fission/ + └── MT-018/ + ├── xs + ├── cosine/ + ├── energy/ + ├── nu_total/ + ├── nu_prompt/ + ├── nu_delayed/ + └── delayed_neutron/ ... + +A converter from OpenMC's ``IncidentNeutron`` HDF5 format to this schema is +a planned future addition. Contributions are welcome — see +`Issue #333 `_. diff --git a/mcdc/docs/source/theory/cont_movement.rst b/mcdc/docs/source/theory/cont_movement.rst new file mode 100644 index 000000000..0b484115d --- /dev/null +++ b/mcdc/docs/source/theory/cont_movement.rst @@ -0,0 +1,51 @@ +.. _cont_movement: + +=================== +Continuous Movement +=================== + +MC/DC implements **continuous geometry movement** for time-dependent Monte Carlo transport. +Unlike the step-function approach used by most codes (where geometry is frozen within each time step and updated discretely), MC/DC moves surfaces and sources continuously in time, yielding higher-fidelity results for transient problems. + +Motivation +---------- + +In many reactor transient scenarios — such as control rod insertion or withdrawal, fuel pellet movement, or pulsed neutron experiments — the geometry changes during the simulation. +Step-function approximations introduce temporal discretization error that decreases only with finer time steps. +Continuous movement eliminates this error source entirely by solving for the exact intersection of a particle trajectory with a moving surface. + +Implementation +-------------- + +Surfaces and sources can be assigned piecewise-constant velocities using the ``move`` method: + +.. code-block:: python3 + + surface.move(velocities=[[vx, vy, vz]], durations=[dt]) + +Each call specifies one or more velocity segments and their durations. +A final static segment (zero velocity, infinite duration) is appended automatically to keep the surface stationary after the prescribed motion ends. +Internally, MC/DC precomputes a ``move_time_grid`` and cumulative ``move_translations`` for each moving object. + +Distance-to-Moving-Surface +-------------------------- + +When computing the distance to a moving surface, MC/DC transforms the particle into the **surface's reference frame** by subtracting the surface translation and adjusting the direction for the relative velocity: + +.. math:: + + \mathbf{r}' = \mathbf{r} - \left(\mathbf{T}_0 + \mathbf{V} \cdot t_{\text{local}}\right) + +.. math:: + + \hat{\mathbf{u}}' = \hat{\mathbf{u}} - \frac{\mathbf{V}}{v} + +where :math:`\mathbf{V}` is the surface velocity in the current time segment and :math:`v` is the particle speed. +In each piecewise-constant velocity segment, the problem reduces to a stationary distance calculation. +If no intersection is found within the current segment, the particle is advanced to the next time boundary and the computation repeats with the next velocity segment. + +Sources can be moved with the same interface (``source.move(...)``), and source particle positions are adjusted to account for the source's displacement at the particle's birth time. + +For more details, see: + +- I. Variansyah and R. G. McClarren. "High-fidelity treatment for object movement in time-dependent Monte Carlo transport simulations." *M&C 2023*. Preprint DOI 10.48550/arXiv.2305.07641. diff --git a/mcdc/docs/source/theory/domain_decomp.rst b/mcdc/docs/source/theory/domain_decomp.rst new file mode 100644 index 000000000..b0b0edb7d --- /dev/null +++ b/mcdc/docs/source/theory/domain_decomp.rst @@ -0,0 +1,31 @@ +.. _dd: + +==================== +Domain Decomposition +==================== + +Domain decomposition (DD) enables MC/DC to distribute large transport problems across multiple MPI ranks by partitioning the spatial domain. +Each rank is responsible for tracking particles within its assigned subdomain, and particles that cross subdomain boundaries are communicated to the appropriate neighbor. + +Algorithm +--------- + +The domain decomposition workflow in MC/DC proceeds as follows: + +#. **Domain check-in** (``dd_check_in``): At the start of each transport step, each particle is verified to belong to the local subdomain. Particles that have drifted out of bounds are flagged for transfer. +#. **Particle transport**: Standard Monte Carlo transport is performed on all local particles. +#. **Particle send** (``dd_particle_send``): Particles that have crossed a subdomain boundary during transport are packed and sent to the neighboring rank via MPI communication. +#. **Source resolution** (``source_dd_resolution``): After the source iteration or cycle completes, any remaining load imbalances are resolved by redistributing source particles across ranks. + +Each rank maintains its own particle banks (active, census, source, future) and only tracks particles physically located in its subdomain. + +Usage +----- + +Domain decomposition is activated as a technique flag within the simulation. +The decomposition is spatial, meaning the global geometry mesh is divided along one or more axes, and each MPI rank handles a contiguous block of the mesh. + +.. note:: + + Domain decomposition support is under active development. + More sophisticated partitioning strategies (e.g., load-balanced or graph-based decomposition) are planned for future releases. diff --git a/mcdc/docs/source/theory/geometry.rst b/mcdc/docs/source/theory/geometry.rst new file mode 100644 index 000000000..062523ef8 --- /dev/null +++ b/mcdc/docs/source/theory/geometry.rst @@ -0,0 +1,202 @@ +.. _geometry: + +====================== +Geometry and CSG Model +====================== + +MC/DC uses **Constructive Solid Geometry** (CSG) to define the spatial +domain. Complex geometries are built by combining simple surfaces with +Boolean region operators. + +Surfaces +-------- + +A surface is a mathematical equation that divides space into a positive +half-space (:math:`f(\mathbf{r}) > 0`) and a negative half-space +(:math:`f(\mathbf{r}) < 0`). MC/DC provides the following surface types: + +.. list-table:: + :widths: 25 45 30 + :header-rows: 1 + + * - Surface + - Equation + - Constructor + * - Plane X + - :math:`x - x_0 = 0` + - ``Surface.PlaneX(x=x0)`` + * - Plane Y + - :math:`y - y_0 = 0` + - ``Surface.PlaneY(y=y0)`` + * - Plane Z + - :math:`z - z_0 = 0` + - ``Surface.PlaneZ(z=z0)`` + * - Cylinder X + - :math:`(y-y_0)^2 + (z-z_0)^2 - R^2 = 0` + - ``Surface.CylinderX(center, radius)`` + * - Cylinder Y + - :math:`(x-x_0)^2 + (z-z_0)^2 - R^2 = 0` + - ``Surface.CylinderY(center, radius)`` + * - Cylinder Z + - :math:`(x-x_0)^2 + (y-y_0)^2 - R^2 = 0` + - ``Surface.CylinderZ(center, radius)`` + * - Sphere + - :math:`|\mathbf{r} - \mathbf{r}_0|^2 - R^2 = 0` + - ``Surface.Sphere(center, radius)`` + +**Boundary conditions** are set on outermost surfaces: + +.. code-block:: python3 + + s = mcdc.Surface.PlaneX(x=10.0, boundary_condition="vacuum") # particles leak + s = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") # mirror reflection + + +Region Operators +----------------- + +A **region** (also called a *half-space*) is obtained by applying the +unary ``+`` or ``-`` operator to a surface: + +- ``+s`` — the positive half-space (:math:`f > 0`). +- ``-s`` — the negative half-space (:math:`f < 0`). + +Regions are combined with Boolean operators: + +.. list-table:: + :widths: 15 25 60 + :header-rows: 1 + + * - Operator + - Meaning + - Example + * - ``&`` + - Intersection (AND) + - ``+s1 & -s2`` — between planes ``s1`` and ``s2`` + * - ``|`` + - Union (OR) + - ``region_A | region_B`` — either region + * - ``~`` + - Complement (NOT) + - ``~region_A`` — everything outside ``region_A`` + +**Operator precedence**: ``~`` > ``&`` > ``|``. Use parentheses for +clarity. + + +Cells +------ + +A **cell** is a region filled with a material: + +.. code-block:: python3 + + mcdc.Cell(region=+s1 & -s2 & +s3 & -s4, fill=material) + +Every point in the problem domain must belong to exactly one cell. +MC/DC does not verify non-overlapping coverage automatically — the user +is responsible for ensuring consistent cell definitions. + +**Named cells** can be used for cell-filtered tallies: + +.. code-block:: python3 + + sphere_cell = mcdc.Cell(name="Fuel sphere", region=-sphere, fill=fuel) + mcdc.Cell(cell=sphere_cell, scores=["fission"]) + + +Universes and Packaging +------------------------ + +A **universe** groups a set of cells into a reusable geometry unit. +Universes can be **translated** and **rotated** when placed inside a +container cell, enabling duplication of complex assemblies without +redefining their internal geometry. + +.. code-block:: python3 + + assembly = mcdc.Universe(cells=[fuel_cell, clad_cell, water_cell]) + + # Place two copies with different positions and rotations + mcdc.Cell(region=left_region, fill=assembly, translation=[-5, 0, 0]) + mcdc.Cell(region=right_region, fill=assembly, translation=[+5, 0, 0], + rotation=[0, 10, 0]) + +When a particle enters a universe cell, MC/DC transforms coordinates +into the universe's local frame, tracks through local surfaces, then +transforms back. + +For a complete example, see :ref:`example_fuel_array_packaged`. + + +Lattices +--------- + +A **lattice** is a regular array of universes arranged on a Cartesian +grid. Each grid position is mapped to a universe by an integer index. + +.. code-block:: python3 + + lattice = mcdc.Lattice( + x=[-5.0, 0.0, 5.0], # 2 cells in x + y=[-5.0, 0.0, 5.0], # 2 cells in y + universes=[[u1, u2], + [u3, u4]], + ) + +Lattices are powerful for reactor-core models where fuel assemblies +repeat in a regular pattern. See the :ref:`example_c5g7_k_eigenvalue` +for a full-core lattice example. + +Root Universe +^^^^^^^^^^^^^ + +When using universes or lattices, the top-level cell collection is +registered as the **root universe**: + +.. code-block:: python3 + + mcdc.simulation.set_root_universe(cells=[cell_left, cell_right]) + + +Geometry Visualization +----------------------- + +MC/DC includes a built-in ray-casting visualizer for inspecting CSG +geometries before running a full transport simulation: + +.. code-block:: python3 + + mcdc.visualize( + "xz", # projection plane + y=0.0, # slice position + x=[-10.0, 10.0], # plot range + z=[-5.0, 5.0], + pixels=(400, 400), + colors={fuel: "red", water: "blue"}, + ) + +This produces a pixel map showing which material fills each pixel. +Animated geometry (moving surfaces) can be visualized with the ``time`` +argument: + +.. code-block:: python3 + + mcdc.visualize(..., time=np.linspace(0, 9, 19), save_as="geo_animation") + +For details on moving surfaces and sources, see :ref:`cont_movement`. + + +Moving Surfaces +---------------- + +Any surface can be given a piecewise-constant velocity using the +``move()`` method, enabling time-dependent geometry: + +.. code-block:: python3 + + surface.move(velocities=[[vx, vy, vz]], durations=[dt]) + +MC/DC solves for the exact intersection of a particle trajectory with +the moving surface — no time-step discretization error is introduced. +For the mathematical formulation, see :ref:`cont_movement`. diff --git a/mcdc/docs/source/theory/gpu.rst b/mcdc/docs/source/theory/gpu.rst new file mode 100644 index 000000000..d3c8db863 --- /dev/null +++ b/mcdc/docs/source/theory/gpu.rst @@ -0,0 +1,96 @@ +.. _theory_gpu: + +================= +GPU Functionality +================= + +GPU Compilation +--------------- + +When targeting GPUs, MC/DC functions are just-in-time (JIT) compiled with Harmonize. +To JIT compile and execute on AMD or Nvidia GPUs, MC/DC users need only to append their terminal launches with a ``--target=gpu`` option. +When considered in totality the MC/DC+Numba+Harmonize JIT compilation structure is akin to "portability framework", in that it allows dynamic targeting and developer abstraction of hardware architectures, like OpenMP target-offloading used by OpenMC. +This JIT compilation process allows MC/DC to pair the idea of a portability framework with a high-level language in an effort to enable more rapid methods development on Exascale systems. + +Monte Carlo transport functions from MC/DC are treated as device functions with global, host, and additional device functions coming from Harmonize. +Mixing codes from various sources (Python and C++) requires the user to provide an *exacting* set of compiler options to achieve an operable executable. +We provide in-depth descriptions of these sets of commands as we found the definition of this JIT compilation process one of the most difficult parts to get the MC/DC+Harmonize software engineering structure operable. + +To examine the compilation strategy in-depth, a simple proxy problem is provided in Figures figcodenvcc and figcodeclang. +The figures show a simple Python function that does integer addition on a provided value (representing MC/DC transport operations) and a C++ snippet (representing Harmonize) showing first the declaration of an extern device function (eventually coming from Python) and a global function which will act as the GPU runtime for our Python device function. +Note that for the operability of these examples, extra functions are required in ``dep.cpp`` and ``add_one.py`` but are truncated for brevity. + +-------------- +Nvidia Targets +-------------- + +To compile to Nvidia GPU hardware-targets at runtime, we rely entirely on the Nvidia C-Compiler (`nvcc`). +Current versions of Numba come with CUDA operability natively, but this is set to be deprecated in future releases in favor of a more modular approach where the Numba-CUDA package will be an optional separate feature. + +.. image:: ../images/theory/gpu_comp/nvcc_flow.png + :width: 800 + :alt: Simple proxy example describing how to compile device functions in Numba-Python with external C++ code for targeting Nvidia GPUs. In this simplified proxy, the Python function corresponds to MC/DC, and the C++ code corresponds to Harmonize. + + +Simple proxy example describing how to compile device functions in Numba-Python with external C++ code for targeting Nvidia GPUs. In this simplified proxy, the Python function corresponds to MC/DC, and the C++ code corresponds to Harmonize + +We begin by + +#. Compiling Python device code to Nvidia PTX by ``numba.cuda.compile_ptx_for_current_device`` (which requires typed function signatures), then place that output into ``add_one.ptx`` file; next +#. Compiling PTX to relocatable device code using ``nvcc -rdc=true -dc -arch= --cudart shared --compiler-options -fPIC add.ptx -o add.o`` where ``-dc`` asks the compiler for device code, ``-rdc`` asks to make that device code relocatable, ``--cudart shared`` asks for shared CUDA runtime libraries and ``-fPIC`` generates position-independent code; +#. Compiling that relocatable byte code into a library of executable device functions is done with ``nvcc -dlink add.o -arch= --cudart shared -o device.o --compiler-options -fPIC`` where ``-dlink`` asks the compiler for relocatable device code; and finally +#. Compiling the C-CUDA file containing the global function and linking with the library of device functions originating from Python with ``nvcc -shared add.o device.o -arch= --cudart shared``. + + +While the complexity of the functions both from MC/DC (Python) and Harmonize (C++) increases dramatically when moving toward implementation in MC/DC, this compilation strategy remains mostly the same. +The exact compilation commands Harmonize calls when compiling MC/DC functions can be viewed by setting ``VERBOSE=True`` in ``harmonize/python/config.py``. +This compilation strategy also allows for the extension of functions defined in the CUDA API but not in Numba-CUDA as they can come from the C-CUDA source in ``dep.cpp``. + +----------- +AMD Targets +----------- + +Just in time compilation and execution to AMD devices are enabled as of `MC/DC v0.11.0 `_. +Significant adaptations from the process of Nvidia compilation are required to target AMD GPUs. +PTX is a proprietary Nvidia standard, so when targeting AMD GPUs, we rely on intermediate compiler representation (IR) from LLVM for an AMD GPU hardware-target (also called an LLVM target triple). +AMD's compiler toolchain is based in the LLVM-Clang ecosystem, so we will be calling LLVM-Clang-based tools (e.g., ``hipcc`` is a wrapper function for ``clang``). +Note that while the LLVM-Clang commands are generic, AMD variations of compilers, linkers, etc. must be invoked. +For example, to invoke the correct Clang compiler point to the ROCm installed variation (often on LinuxOS at ``opt/rocm/llvm/bin/clang``). + +To generate AMD target LLVM-IR from Python script, a `patch to Numba is provided by AMD `_. +This patch can also execute produced functions from the Python interpreter, much like Numba-CUDA. +As this patch is a port of AMD's Heterogeneous-computing Interface for Portability (HIP) API, it attempts to be a one-to-one implementation of operations implemented in Numba-CUDA. +The Numba-HIP development team has gone as far as to provide a ``numba.hip.pose_as_cuda()`` function, which, after being called in Python script, will alias all supported Numba-CUDA functions to Numba-HIP ones and compile/run automatically. + + +When moving to compile and execute full MC/DC+Harmonize, we must again enable the compilation of device functions from Numba-HIP and device, global, and host functions from C++. +To show that process, we again explore a simple proxy application shown in figure fig:codeclang where a Numba-HIP function adds one to an integer value and a C++ function declares an extern function by the same name and runs that function for all values of an array. + +Every GPU program is technically a bound set of two complementary applications: one that runs on the host side (CPU) and the other on the device side (GPU), with global functions linking them together. +To link external device code together for AMD hardware-targets, we have to unbundle these two programs, link the extra device functions (coming from Python) to the device side, then re-bundle the device and host functions back together. +This process is done in LLVM-IR. + +.. image:: ../images/theory/gpu_comp/amd_flow.png + :width: 800 + :alt: Simple proxy example describing how to compile device functions in Numba-HIP with external C++ code to AMD GPU targets. In this simplified proxy, the Python function corresponds to MC/DC, and the C++ code corresponds to Harmonize. + +Simple proxy example describing how to compile device functions in Numba-HIP with external C++ code to AMD GPU targets. In this simplified proxy, the Python function corresponds to MC/DC, and the C++ code corresponds to Harmonize + +Figure fig:codeclang shows the compilation structure. +We begin compilation by + +#. Compiling C++ source in ``dep.cpp`` to LLVM-IR with host and device code bundled together with ``hipcc -c -fgpu-rdc -S -emit-llvm -o dep.ll -x hip dep.cpp -g`` where ``-fgpu-rdc`` asks the compiler for relocatable device code ``-emit-llvm`` requests the LLVM-IR, ``-c`` only runs preprocess, compile, and assemble steps, and ``-x hip`` specifies that ``dep.cpp`` is HIP code; +#. Unbundling the LLVM-IR: + + a. first the device half ``clang-offload-bundler --type=ll --unbundle --input=dep.ll --output=dep_gpu.ll --targets=hip-amdgcn-amd-amdhsa--gfx90a`` where ``amdgcn-amd-amdhsa`` is the LLVM target-tipple and ``gfx90a`` is compiler designation for an MI250X + b. then the host half ``clang-offload-bundler --type=ll --unbundle --input=dep.ll --output=dep_cpu.ll --targets=host-x86_64-unknown-linux-gnu``; then + +#. Compiling device functions from Python source with ``numba.hip.generate_llvmir()`` and place into ``add_one.ll``; +#. Linking the now unbundled device code in ``dep_gpu.ll`` and the device code from Python in ``add_one.ll`` together with ``llvm-link dep_gpu.ll add_one.ll -S -o dep_gpu_linked.ll``; +#. Rebundling the now combined Python/C++ device LLVM-IR back to the host LLVM-IR with ``clang-offload-bundler --type=ll --input=dep_gpu_linked.ll --input=dep_cpu.ll --output=dep_bundled.ll --targets=hip-amdgcn-amd-amdhsa--gfx90a, host-x86_64-unknown-linux-gnu``; and finally +#. Compiling to an executable with ``hipcc -v -fgpu-rdc --hip-link dep_bundled.ll-o program`` where ``--hip-link`` links clang-offload-bundles for HIP + +As in the Nvidia compilation, non-implemented functions can be brought into the final program via the C++ source. +This was required for MC/DC on AMD GPUs as vector operable atomics are not currently implemented in the Numba HIP port and thus must come from the C++ side. +We hope that these more generic adaptations (relying on LLVM-Clang infrastructure instead of CUDA) will allow for greater extensibility as we move to target future accelerator platforms---namely, Intel GPUs. +For compilation to Nvidia hardware-targets, we will still keep the PTX-based compilation structure. \ No newline at end of file diff --git a/mcdc/docs/source/theory/index.rst b/mcdc/docs/source/theory/index.rst new file mode 100644 index 000000000..dd40c302f --- /dev/null +++ b/mcdc/docs/source/theory/index.rst @@ -0,0 +1,49 @@ +.. _theory: + +============ +Theory Guide +============ + +We provided a brief theory guide into the methods, algorithms, and compilation +schemes in MC/DC. + +New to Monte Carlo transport? Start with :ref:`mc_basics` for the +fundamentals and :ref:`geometry` for how MC/DC represents problem domains. +Then explore the advanced topics below. + +For an additional external resource, see the +`OpenMC theory guide `_. + +Fundamentals +------------ + +.. toctree:: + :maxdepth: 1 + + mc_basics + geometry + k_eigenvalue + +Advanced Methods +---------------- + +.. toctree:: + :maxdepth: 1 + + variance_reduction + ana + iqmc + ww + uq + compressed_sensing + +Implementation +-------------- + +.. toctree:: + :maxdepth: 1 + + gpu + cont_energy + domain_decomp + cont_movement diff --git a/mcdc/docs/source/theory/iqmc.rst b/mcdc/docs/source/theory/iqmc.rst new file mode 100644 index 000000000..124406a82 --- /dev/null +++ b/mcdc/docs/source/theory/iqmc.rst @@ -0,0 +1,39 @@ +.. _iqmc: + +==== +iQMC +==== + +The iterative Quasi-Monte Carlo (iQMC) method replaces the pseudo-random number sequences used in conventional Monte Carlo transport with **low-discrepancy sequences** (e.g., Halton or Sobol sequences). +This reduces the variance convergence rate from the standard :math:`O(1/\sqrt{N})` to as fast as :math:`O((\log N)^d / N)` for :math:`d`-dimensional problems, yielding significant efficiency gains for smooth solutions. + +Algorithm +--------- + +iQMC reformulates the transport problem as a fixed-point iteration and combines it with Krylov linear solvers such as GMRES. +At each iteration: + +#. Low-discrepancy sample points are generated for the source particle phase-space coordinates. +#. Particles are transported using the standard MC kernel, but with quasi-random initial conditions rather than pseudo-random ones. +#. Scattering and fission source tallies are accumulated to update the right-hand side of the transport equation. +#. A Krylov solver (e.g., GMRES) accelerates the convergence of the source iteration. + +This process repeats until the scattering/fission source converges. + +Spatial Error Mitigation +------------------------ + +A known challenge with iQMC is spatial discretization error introduced by the tally mesh. +MC/DC addresses this with **linear discontinuous (LD) source tilting**, which uses a piecewise-linear representation of the scattering and fission sources within each mesh cell rather than a flat (piecewise-constant) approximation. +Additionally, **effective scattering and fission rate tallies** improve the consistency between the transport solve and the source update, reducing spatial bias. + +Applications +------------ + +iQMC is applicable to both fixed-source and k-eigenvalue problems. +Output data from iQMC simulations is stored in the ``iqmc/tally/`` group of the HDF5 output file. + +For more details, see: + +- S. Pasmann, I. Variansyah, C. T. Kelley, and R. G. McClarren. "Mitigating Spatial Error in iQMC with Linear Discontinuous Source Tilting and Effective Scattering and Fission Rate Tallies." *NSE* (2024). Preprint DOI 10.48550/arXiv.2401.04029. +- S. Pasmann, I. Variansyah, C. T. Kelley, and R. G. McClarren. "A Quasi-Monte Carlo Method with Krylov Linear Solvers for Multigroup Neutron Transport Simulations." *NSE* (2023). DOI 10.1080/00295639.2022.2143704. \ No newline at end of file diff --git a/mcdc/docs/source/theory/k_eigenvalue.rst b/mcdc/docs/source/theory/k_eigenvalue.rst new file mode 100644 index 000000000..521261d26 --- /dev/null +++ b/mcdc/docs/source/theory/k_eigenvalue.rst @@ -0,0 +1,128 @@ +.. _k_eigenvalue: + +=========================== +k-Eigenvalue Calculations +=========================== + +A k-eigenvalue (criticality) calculation determines the effective +neutron multiplication factor :math:`k_{\text{eff}}` and the +fundamental-mode fission source distribution of a system. +MC/DC implements this via the standard **power iteration** algorithm. + +The k-Eigenvalue Equation +-------------------------- + +In steady state the Boltzmann equation with fission becomes an +eigenvalue problem: + +.. math:: + + \hat{\Omega}\cdot\nabla\psi + + \Sigma_t\,\psi + = + \int \Sigma_s\,\psi'\,dE'\,d\Omega' + + \frac{1}{k}\,\frac{\chi}{4\pi}\int \nu\Sigma_f\,\phi'\,dE' + +where :math:`k = k_{\text{eff}}` is the eigenvalue. A system is + +- **critical** if :math:`k = 1`, +- **supercritical** if :math:`k > 1`, +- **subcritical** if :math:`k < 1`. + + +Power Iteration +---------------- + +MC/DC solves the eigenvalue problem using power (source) iteration: + +#. An initial guess for :math:`k^{(0)}` is provided (default: 1.0). +#. Source particles are sampled from the current fission source + distribution. +#. Particles are transported; fission sites are banked. +#. The new eigenvalue estimate is updated: + + .. math:: + + k^{(i+1)} = k^{(i)} \;\frac{W_{\text{fission}}^{(i+1)}}{W_{\text{source}}^{(i)}} + + where :math:`W` denotes the total statistical weight. +#. The fission bank becomes the source for the next cycle. +#. Repeat until convergence. + +Users configure eigenmode via: + +.. code-block:: python3 + + mcdc.settings.set_eigenmode(N_inactive=50, N_active=200, k_init=1.0) + +- ``N_inactive`` — Cycles discarded for fission source convergence. +- ``N_active`` — Cycles used for tally accumulation. +- ``k_init`` — Initial :math:`k` guess. + + +Inactive vs. Active Cycles +---------------------------- + +The first ``N_inactive`` cycles allow the fission source distribution +to converge from the (often arbitrary) initial guess to the +fundamental eigenmode. Tallies are **not** accumulated during inactive +cycles to avoid bias. + +During the ``N_active`` cycles, tally scores are accumulated and +batch statistics (mean, standard deviation) are computed for both +:math:`k_{\text{eff}}` and spatial quantities. + + +Shannon Entropy +---------------- + +MC/DC can optionally track the **Shannon entropy** of the fission +source distribution as a convergence diagnostic. The spatial domain +is divided into :math:`M` mesh cells, and the entropy at cycle +:math:`i` is: + +.. math:: + + H^{(i)} = -\sum_{m=1}^{M} p_m \log_2 p_m + +where :math:`p_m` is the fraction of fission source weight in cell +:math:`m`. A plateau in :math:`H` over successive cycles indicates +that the source has converged. + + +Gyration Radius +---------------- + +The **gyration radius** measures the spatial spread of the fission +source around its centre of mass: + +.. math:: + + R_g = \sqrt{\frac{\sum_j w_j\,|\mathbf{r}_j - \mathbf{r}_{\text{cm}}|^2} + {\sum_j w_j}} + +MC/DC reports gyration radius diagnostics in the output when running +eigenvalue problems (see the :ref:`example_c5g7_k_eigenvalue` for an +example). + + +Tips for k-Eigenvalue Simulations +---------------------------------- + +- **Sufficient inactive cycles**: Too few inactive cycles biases + :math:`k_{\text{eff}}` and spatial tallies. Check Shannon entropy + convergence. +- **Particle count**: Start with :math:`10^3`–:math:`10^4` particles per + cycle for development, then increase to :math:`10^5`–:math:`10^6` for + production. +- **Initial source distribution**: A uniform distribution over the + fissile region is a reasonable default. + +References +---------- + +- T. M. Sutton and A. Morel. "Iteration Acceleration Techniques for + Monte Carlo Eigenvalue Calculations." *Transactions of ANS* (1996). +- F. B. Brown. "On the Use of Shannon Entropy of the Fission + Distribution for Assessing Convergence of Monte Carlo Criticality + Calculations." PHYSOR (2006). diff --git a/mcdc/docs/source/theory/mc_basics.rst b/mcdc/docs/source/theory/mc_basics.rst new file mode 100644 index 000000000..1b76a680a --- /dev/null +++ b/mcdc/docs/source/theory/mc_basics.rst @@ -0,0 +1,131 @@ +.. _mc_basics: + +============================ +Monte Carlo Transport Basics +============================ + +This page provides a concise primer on the Monte Carlo method for neutron +transport as implemented in MC/DC. Readers already familiar with MC +transport may skip ahead to the advanced theory pages. + +The Boltzmann Transport Equation +--------------------------------- + +MC/DC solves the linear Boltzmann transport equation for the angular +neutron flux :math:`\psi(\mathbf{r}, \hat{\Omega}, E, t)`: + +.. math:: + + \frac{1}{v}\frac{\partial\psi}{\partial t} + + \hat{\Omega}\cdot\nabla\psi + + \Sigma_t\,\psi + = + \int_{4\pi}\!\int_0^\infty \Sigma_s(E'\to E,\hat{\Omega}'\to\hat{\Omega})\,\psi'\,dE'\,d\Omega' + + \frac{\chi(E)}{4\pi}\int_0^\infty \nu\Sigma_f(E')\,\phi(E')\,dE' + + Q + +where + +- :math:`\Sigma_t`, :math:`\Sigma_s`, :math:`\Sigma_f` are the macroscopic total, + scattering, and fission cross sections (cm\ :sup:`-1`), +- :math:`\nu` is the average number of neutrons emitted per fission, +- :math:`\chi(E)` is the fission spectrum, +- :math:`\phi(E) = \int_{4\pi}\psi\,d\Omega` is the scalar flux, +- :math:`Q` is an external source. + +The scalar flux :math:`\phi` is the primary quantity of interest for +most tallies. + + +Random Walk +----------- + +Monte Carlo solves the transport equation by simulating individual +neutron **random walks** (histories). Each history proceeds as: + +#. **Birth** — A neutron is sampled from the source distribution + :math:`Q(\mathbf{r}, \hat{\Omega}, E, t)`. +#. **Free flight** — The distance to the next collision is sampled + from the exponential distribution: + + .. math:: + + d = -\frac{\ln\xi}{\Sigma_t(E)} + + where :math:`\xi` is a uniform random number on :math:`(0,1)`. + If a geometry boundary is reached first, the particle crosses (or + reflects/leaks) and the free flight continues. +#. **Collision** — The interaction type is selected by the ratio of + partial to total cross sections: scattering + (:math:`\Sigma_s / \Sigma_t`), capture (:math:`\Sigma_c / \Sigma_t`), + or fission (:math:`\Sigma_f / \Sigma_t`). + + - **Scattering**: New direction and energy are sampled from the + differential scattering kernel. + - **Capture**: The neutron is absorbed (history terminates in analog mode). + - **Fission**: :math:`\lfloor\nu + \xi\rfloor` secondary neutrons are + produced and banked for later processing. + +#. **Termination** — The history ends when the particle is absorbed, + leaks out of the domain, or falls below a weight threshold. + + +Tallying +-------- + +During the random walk, MC/DC accumulates **tally scores** — estimates +of physical quantities such as scalar flux, fission rate, or neutron +density. + +MC/DC supports two main estimator types: + +**Track-length estimator** + Accumulates the path length :math:`\ell` of each flight through a + tally region, weighted by the particle weight :math:`w`: + + .. math:: + + \hat{\phi}_V = \frac{1}{V} \sum_{\text{tracks}} w\,\ell + +**Collision estimator** + Scores at each collision site: + + .. math:: + + \hat{\phi}_V = \frac{1}{V\,\Sigma_t} \sum_{\text{collisions}} w + +Both estimators are unbiased for the volume-averaged scalar flux. +The track-length estimator generally has lower variance because it +scores on every flight segment, not just at collision points. + +Statistical Uncertainty +----------------------- + +MC/DC uses **batch statistics** to estimate the uncertainty of tally +results. The simulation is divided into :math:`N_b` independent +batches, each with :math:`N_p / N_b` particles. The sample mean and +sample standard deviation of the batch means provide the central +estimate and its statistical error: + +.. math:: + + \bar{x} = \frac{1}{N_b}\sum_{b=1}^{N_b} x_b, \qquad + \sigma_{\bar{x}} = \sqrt{\frac{1}{N_b(N_b-1)}\sum_{b=1}^{N_b}(x_b - \bar{x})^2} + +The relative standard deviation +:math:`\sigma_{\bar{x}} / \bar{x}` decreases as +:math:`O(1/\sqrt{N_p})`. For faster convergence, see: + +- :ref:`variance_reduction` — implicit capture, weight roulette, + population control. +- :ref:`ww` — weight windows. +- :ref:`iqmc` — quasi-Monte Carlo methods for :math:`O((\log N)^d / N)` + convergence. + +References +---------- + +- E. E. Lewis and W. F. Miller, Jr. *Computational Methods of + Neutron Transport.* John Wiley & Sons (1984). +- L. L. Carter and E. D. Cashwell. *Particle-Transport Simulation + with the Monte Carlo Method.* ERDA (1975). diff --git a/mcdc/docs/source/theory/uq.rst b/mcdc/docs/source/theory/uq.rst new file mode 100644 index 000000000..96e07f39d --- /dev/null +++ b/mcdc/docs/source/theory/uq.rst @@ -0,0 +1,35 @@ +.. _uq: + +========================== +Uncertainty Quantification +========================== + +MC/DC supports sampling-based (non-intrusive) uncertainty quantification (UQ). +The fundamental challenge in combining UQ with Monte Carlo transport is that the solver itself introduces stochastic noise, which can bias or obscure the UQ statistics of interest. + +Variance Deconvolution +---------------------- + +The core technique implemented in MC/DC is the **variance deconvolution estimator**. +When uncertain input parameters (e.g., cross sections, densities) are sampled across multiple realizations, the total observed variance in the output contains contributions from both the parametric uncertainty and the Monte Carlo statistical noise. + +By the law of total variance, + +.. math:: + + \text{Var}[Y] = \text{Var}[\mathbb{E}[Y \mid \theta]] + \mathbb{E}[\text{Var}[Y \mid \theta]] + +where :math:`Y` is the quantity of interest and :math:`\theta` represents the uncertain parameters. +The first term is the **UQ variance** we want to estimate, while the second term is the **MC noise** that must be subtracted. +The variance deconvolution estimator uses the batched variance estimates from each realization to separate these two contributions, enabling accurate UQ variance computation even when the MC noise is significant. + +Implementation +-------------- + +MC/DC uses a dedicated RNG stream (``SEED_SPLIT_UQ``) for UQ samples, keeping them statistically independent from the transport random walks. +UQ variance statistics are stored in the output HDF5 file alongside the standard tally results, allowing post-processing of both the mean response and its parametric uncertainty. + +For more details, see: + +- K. B. Clements, G. Geraci, A. J. Olson, and T. S. Palmer. "A variance deconvolution estimator for efficient uncertainty quantification in Monte Carlo radiation transport applications." *JQSRT* (2024). DOI 10.1016/j.jqsrt.2024.108958. +- K. B. Clements, G. Geraci, A. J. Olson, and T. S. Palmer. "Global Sensitivity Analysis in Monte Carlo Radiation Transport." *M&C 2023*. diff --git a/mcdc/docs/source/theory/variance_reduction.rst b/mcdc/docs/source/theory/variance_reduction.rst new file mode 100644 index 000000000..29f8dc66b --- /dev/null +++ b/mcdc/docs/source/theory/variance_reduction.rst @@ -0,0 +1,156 @@ +.. _variance_reduction: + +================================= +Variance Reduction Techniques +================================= + +Analog Monte Carlo converges as :math:`O(1/\sqrt{N})`, which can be +prohibitively slow for deep-penetration or rare-event problems. +MC/DC provides several **variance reduction** (VR) techniques that +reduce the statistical uncertainty per particle history without +introducing bias. + +All techniques below are activated through the ``mcdc.simulation`` +interface. + +Implicit Capture +----------------- + +Also called **survival biasing** or **absorption suppression**. +Instead of terminating a particle at every capture event, the particle's +weight is reduced by the non-absorption probability after each +collision: + +.. math:: + + w' = w \; \frac{\Sigma_s + \Sigma_f}{\Sigma_t} + +The particle continues with the reduced weight, ensuring that every +history contributes to tallies for longer. This is especially +effective in highly absorbing media. + +**Usage:** + +.. code-block:: python3 + + mcdc.simulation.implicit_capture() + +.. note:: + + Implicit capture is enabled by default in most MC/DC examples. + Without a companion technique (weight roulette or weight windows) + to eliminate very-low-weight particles, a memory overhead can build + up over time. + +Weight Roulette +---------------- + +When a particle's weight drops below a threshold +:math:`w_{\text{thresh}}`, **Russian roulette** is applied: + +- With probability :math:`p = w / w_{\text{target}}`, the particle + survives and its weight is set to :math:`w_{\text{target}}`. +- With probability :math:`1 - p`, the particle is killed. + +This prevents an ever-growing population of low-weight particles while +preserving the expected weight (unbiased). + +**Usage:** + +.. code-block:: python3 + + mcdc.simulation.weight_roulette(weight_threshold=0.25, weight_target=1.0) + +``weight_threshold`` and ``weight_target`` should be chosen so that +:math:`w_{\text{thresh}} < w_{\text{target}}`; a common ratio is +:math:`w_{\text{thresh}} / w_{\text{target}} \approx 0.25`. + + +Weighted Emission +------------------ + +In fission problems, the number of secondary neutrons is +:math:`\lfloor\nu + \xi\rfloor` in analog mode. **Weighted emission** +instead emits a fixed number of secondaries (``weight_target`` worth +of weight), adjusting their weights so that the total expected weight +is preserved: + +.. math:: + + w_{\text{child}} = \frac{w \cdot \nu}{n_{\text{emit}}} + +This reduces the variance of the fission source weight distribution. + +**Usage:** + +.. code-block:: python3 + + mcdc.simulation.weighted_emission(active=True, weight_target=1.0) + + +Population Control +------------------- + +In time-dependent (transient) problems, the neutron population can +grow or decay exponentially, making it difficult to maintain a +well-sampled phase space. **Population control** adjusts the particle +bank at each time census by splitting high-weight particles and +rouletting low-weight ones, targeting a uniform weight close to +:math:`w_{\text{target}}`. + +**Usage:** + +.. code-block:: python3 + + mcdc.simulation.population_control() + +Population control is typically combined with a time census +(``set_time_census``) that checkpoints the particle population at +specified time boundaries. + + +Weight Windows +--------------- + +Weight windows define position-dependent (and optionally energy- and +time-dependent) target weights and bounds. They combine splitting and +roulette to focus computational effort in regions of high importance. + +MC/DC supports both user-defined and automatically generated weight +windows. See :ref:`ww` for a full description of the available +strategies (``WW_USER`` and ``WW_PREVIOUS``) and modification schemes +(``WW_MIN`` and ``WW_WOLLABER``). + + +Combining Techniques +--------------------- + +VR techniques are designed to be composable. A typical production +setup might use: + +.. code-block:: python3 + + mcdc.simulation.implicit_capture() + mcdc.simulation.weight_roulette(weight_threshold=0.25, weight_target=1.0) + +For time-dependent fission problems: + +.. code-block:: python3 + + mcdc.simulation.implicit_capture() + mcdc.simulation.weighted_emission(active=True, weight_target=1.0) + mcdc.simulation.population_control() + +The order of activation does not matter — MC/DC applies them in the +correct transport-physics order internally. + +For quasi-Monte Carlo acceleration of the source iteration, see +:ref:`iqmc`. + +References +---------- + +- T. E. Booth. "A Sample Problem for Variance Reduction in MCNP." + LA-10363-MS, LANL (1985). +- A. B. Wollaber. "Advanced Monte Carlo Methods for Radiation + Transport." PhD diss., Univ. of Michigan (2016). diff --git a/mcdc/docs/source/theory/ww.rst b/mcdc/docs/source/theory/ww.rst new file mode 100644 index 000000000..33701163e --- /dev/null +++ b/mcdc/docs/source/theory/ww.rst @@ -0,0 +1,38 @@ +.. _ww: + +=============== +Weight Windows +=============== + +Weight windows are a variance reduction technique used to control the statistical weight of particles as they traverse different regions of the problem domain. +They improve computational efficiency by preferentially sampling particles in regions of high importance while suppressing computation in less important regions. + +Overview +-------- + +A weight window defines, for each region of phase space (position, energy, time), a target weight :math:`w_t` and a lower bound :math:`w_{\ell}`. +When a particle enters a region: + +- If its weight :math:`w` falls below :math:`w_{\ell}`, Russian roulette is applied: the particle is either killed or its weight is increased to :math:`w_t`. +- If :math:`w` exceeds :math:`w_t / w_{\ell} \cdot w_t` (the upper bound), the particle is split into multiple particles each carrying weight :math:`w_t`. +- Otherwise, the particle is left unchanged. + +This keeps particle weights within a controlled band, reducing the variance of tally estimators. + +Methods +------- + +MC/DC defines two weight window strategies: + +- **User-defined** (``WW_USER``): the user explicitly provides the weight window parameters for each region. +- **Previous-iteration** (``WW_PREVIOUS``): weight window bounds are derived from the tally results of a previous simulation or iteration, enabling adaptive weight windows without manual tuning. + +Two modification schemes are supported: + +- **Minimum** (``WW_MIN``): a straightforward lower-bound–based splitting and roulette scheme. +- **Wollaber** (``WW_WOLLABER``): an automatic parameter adjustment method following the approach of Wollaber, which uses the importance map to dynamically set weight window bounds. + +.. note:: + + Weight window support is under active development. + The constants and interfaces are defined, but full transport integration may not be available in all execution modes. diff --git a/mcdc/docs/source/user/batch_scripts.rst b/mcdc/docs/source/user/batch_scripts.rst new file mode 100644 index 000000000..53b18538b --- /dev/null +++ b/mcdc/docs/source/user/batch_scripts.rst @@ -0,0 +1,146 @@ +.. _batch_scripts: + +================= +Batch Job Scripts +================= + +This page provides ready-to-use batch script templates for running MC/DC on +HPC systems with the three most common job schedulers: Slurm, Flux, and LSF. + +Each template follows the same workflow: + +#. Load required modules. +#. Activate the Python environment. +#. Launch MC/DC with the appropriate MPI wrapper. + +Adapt the resource requests (nodes, tasks, GPUs, wall-time, queue) to your +allocation and problem size. + + +Slurm +----- + +`Slurm `_ is widely used on LLNL's Quartz and Dane, +as well as many university and national lab clusters. + +**CPU-only (Numba mode):** + +.. code-block:: bash + + #!/bin/bash + #SBATCH --job-name=mcdc_run + #SBATCH --nodes=2 + #SBATCH --ntasks-per-node=36 + #SBATCH --time=01:00:00 + #SBATCH --partition=pbatch + + module load python/3.11 + source /path/to/your/venv/bin/activate + + srun python input.py --mode=numba --caching + +**GPU (Nvidia, e.g., Lassen-like systems with Slurm):** + +.. code-block:: bash + + #!/bin/bash + #SBATCH --job-name=mcdc_gpu + #SBATCH --nodes=1 + #SBATCH --ntasks-per-node=4 + #SBATCH --gpus-per-task=1 + #SBATCH --time=00:30:00 + #SBATCH --partition=gpu + + module load python/3.11 cuda/11.8 + source /path/to/your/venv/bin/activate + + srun python input.py --mode=numba --target=gpu --gpu_strategy=event + + +Flux +---- + +`Flux `_ is the scheduler on LLNL's Tioga and +El Capitan systems (AMD MI250X / MI300A GPUs). + +**CPU-only:** + +.. code-block:: bash + + #!/bin/bash + + module load cray-mpich python/3.11 + source /path/to/your/venv/bin/activate + + flux run -N 2 -n 72 python input.py --mode=numba --caching + +**GPU (AMD MI300A, El Capitan):** + +.. code-block:: bash + + #!/bin/bash + + module load cray-mpich rocm/6.0.0 python/3.11 + source /path/to/your/venv/bin/activate + + flux run -N 2 -n 8 -g 1 --queue=mi300a \ + python input.py --mode=numba --target=gpu \ + --gpu_arena_size=100000000 --gpu_strategy=event + +This launches MC/DC on 2 nodes with 8 GPUs total (4 per node) on the MI300A partition. + + +LSF +--- + +`LSF `_ is used on LLNL's +Lassen (IBM POWER9 + Nvidia V100). + +**CPU-only:** + +.. code-block:: bash + + #!/bin/bash + #BSUB -J mcdc_run + #BSUB -nnodes 2 + #BSUB -W 60 + #BSUB -q pbatch + + module load gcc/8 cuda/11.8 + conda activate mcdc-env + + jsrun -n 8 -r 4 -a 1 -c 10 python input.py --mode=numba --caching + +**GPU (Nvidia V100, Lassen):** + +.. code-block:: bash + + #!/bin/bash + #BSUB -J mcdc_gpu + #BSUB -nnodes 1 + #BSUB -W 30 + #BSUB -q pbatch + + module load gcc/8 cuda/11.8 + conda activate mcdc-env + + jsrun -n 4 -r 4 -a 1 -g 1 \ + python input.py --mode=numba --target=gpu --gpu_strategy=async + +This runs MC/DC on 1 node with 4 GPUs using the asynchronous scheduler. + + +Tips +---- + +* **Start small:** Test with a short wall-time and few particles before + submitting large production runs. +* **Use caching:** Adding ``--caching`` saves Numba-compiled binaries so that + subsequent runs skip the JIT compilation step. +* **Clear cache when updating MC/DC:** If you update the code, run once with + ``--clear_cache --caching`` to regenerate binaries. +* **Check module order:** On some systems the order of ``module load`` commands + matters. Load the compiler/MPI module before CUDA/ROCm. +* **Interactive debugging:** Request an interactive node first + (``salloc``, ``flux alloc``, or ``lalloc``) to test your command before + committing to a batch job. diff --git a/mcdc/docs/source/user/container.rst b/mcdc/docs/source/user/container.rst new file mode 100644 index 000000000..b74af97c3 --- /dev/null +++ b/mcdc/docs/source/user/container.rst @@ -0,0 +1,135 @@ +MC/DC Container Guide +===================== + +What Are Containers? +-------------------- + +A container is a lightweight, portable package that bundles an application +together with everything it needs to run: code, libraries, system tools, +and settings. Think of it like a shipping container — no matter what ship +(computer) carries it, the contents inside stay the same. + +**Why does this matter for MC/DC?** + +Installing MC/DC requires Python, MPI, Numba, and many other dependencies. +Getting all of these working together — especially on HPC systems where you +don't have admin access — can be painful. A container solves this by giving +you a pre-built environment where everything is already installed and tested. + +Tested Platforms +---------------- + ++-------------+------------+--------+--------------------+--------+ +| System | OS | Arch | Container Tool | Status | ++=============+============+========+====================+========+ +| MacBook Pro | macOS 26.3 | arm64 | Docker 29.2.0 | ✓ | ++-------------+------------+--------+--------------------+--------+ +| Tuolumne | RHEL 8.10 | x86_64 | Podman 4.9.4 | ✓ | ++-------------+------------+--------+--------------------+--------+ +| Dane | RHEL 8.10 | x86_64 | Podman 4.9.4 | ✓ | ++-------------+------------+--------+--------------------+--------+ +| Tioga | RHEL 8.10 | x86_64 | Podman 4.9.4 | ✓ | ++-------------+------------+--------+--------------------+--------+ +| COE (OSU) | Rocky 8.10 | x86_64 | Apptainer 1.4.5 | ✓ | ++-------------+------------+--------+--------------------+--------+ + +All platforms produce identical containers: Debian 13, Python 3.11, +MPICH 4.2.1, MC/DC 0.12.0. + +Getting Started (New Users) +--------------------------- + +This section is for anyone who just wants to **run MC/DC** in a container. +No prior container experience needed. + +Step 1: Pull the Pre-Built Image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You don't need to build anything. A ready-to-use image is available on +the GitHub Container Registry. + +.. rubric:: Local Machine (Docker) + +First, install Docker Desktop if you haven't already. Then open a terminal +and run: + +.. code-block:: bash + + docker pull ghcr.io/cement-psaap/mcdc:dev + docker run --rm -it ghcr.io/cement-psaap/mcdc:dev + +You are now inside the container. Try: + +.. code-block:: bash + + python -c "import mcdc; print('MC/DC OK')" + +Type ``exit`` to leave the container. + +.. rubric:: LLNL Systems — Tuolumne, Tioga, Dane (Podman) + +Podman is already installed on LLNL systems. It works just like Docker. + +.. code-block:: bash + + podman pull ghcr.io/cement-psaap/mcdc:dev + podman run --rm -it ghcr.io/cement-psaap/mcdc:dev + +.. note:: + + If you see ``lsetxattr: operation not supported``, + see *LLNL Storage Setup* in Part 2. + +.. rubric:: OSU Systems — COE (Apptainer) + +Apptainer is already installed on COE. + +.. code-block:: bash + + apptainer build --sandbox mcdc_sandbox docker://ghcr.io/cement-psaap/mcdc:dev + apptainer exec mcdc_sandbox python -c "import mcdc; print('MC/DC OK')" + +.. note:: + + If ``apptainer pull`` fails with "Out of memory", use ``--sandbox``. + +Step 2: Run Your Simulation +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. rubric:: Docker / Podman + +.. code-block:: bash + + docker run --rm -v $(pwd):/work -w /work mcdc:dev python input.py + docker run --rm mcdc:dev mpirun -n 4 python input.py + +For Podman, replace ``docker`` with ``podman``. + +**Flags explanation** + +- ``--rm``: Automatically clean up container. +- ``-it``: Interactive terminal. +- ``-v $(pwd):/work``: Share current folder. +- ``-w /work``: Start inside shared folder. + +.. rubric:: Apptainer (OSU) + +.. code-block:: bash + + apptainer exec mcdc_sandbox python input.py + apptainer exec mcdc_sandbox mpirun -launcher fork -n 4 python input.py + +.. note:: + + Apptainer automatically shares your home directory. + +Step 3: Docker Compose (Optional) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +From MC/DC repo root: + +.. code-block:: bash + + docker compose -f containers/docker-compose.yml run --rm dev bash + docker compose -f containers/docker-compose.yml run --rm test + docker compose -f containers/docker-compose.yml run --rm mpi mpirun -n 4 python input.py diff --git a/mcdc/docs/source/user/cpu.rst b/mcdc/docs/source/user/cpu.rst new file mode 100644 index 000000000..33f4f47f5 --- /dev/null +++ b/mcdc/docs/source/user/cpu.rst @@ -0,0 +1,62 @@ + +.. _cpu: + +===================== +Running MC/DC on CPUs +===================== + +Executing MC/DC in something like a jupyter notebook is possible but not recommended, +especially when using MPI and/or Numba. +The instructions below assume you have an existing MC/DC installation. +MPI can be quite tricky to configure if on an HPC; if you're having trouble, +consult our :ref:`install`, your HPC admin, or our `GitHub issues page `_. + +Pure Python Mode +---------------- + +To run in pure Python mode (slower, no acceleration) + +.. code-block:: python3 + + python input.py + +Numba Mode +---------- + +.. code-block:: python3 + + python input.py --mode=numba + +When running in Numba mode a significant amount of time is taken compiling Python functions to performant binaries. +Only the functions used in a specific simulation will be compiled. +These binaries will be cached, meaning that in subsequent runs of the same simulation the compilation step can be avoided. +The cache can be used as an effective ahead-of-time compilation scheme where binaries can be compiled once and shared between machines. +For more information on caching see :ref:`contribution/index:Caching ` and `Numba Caching `_. + +MC/DC also has the ability to run Numba in a debugging mode. +This will result in less performant code and longer compile times but will allow for better error messages from Numba and other packages. + +.. code-block:: python3 + + python input.py --mode=numba_debug + + +For more information on the exact behavior of this option see :ref:`contribution/index:Debugging ` + +Using MPI +--------- + +MC/DC can be executed using MPI with or without Numba acceleration. +If ``numba-mode`` is enabled the ``jit`` compilation, which is executed on all threads, can take between 30s-2min. +For smaller problems, Numba compilation time could exceed runtime, and pure python mode could be preferable. +Below, ``--mode`` can equal python or numba. MC/DC gets MPI functionality via `mpi4py `_. +As an example, to run on 36 processes in Numba mode with `SLURM `_: + +.. code-block:: python3 + + srun -n 36 python input.py --mode= + +For systems that do not use SLURM (i.e., a local system) try ``mpiexec`` or ``mpirun`` in its stead. + +CPU Profiling +------------- diff --git a/mcdc/docs/source/user/faq.rst b/mcdc/docs/source/user/faq.rst new file mode 100644 index 000000000..4bb2af19b --- /dev/null +++ b/mcdc/docs/source/user/faq.rst @@ -0,0 +1,144 @@ +.. _faq: + +========================== +Frequently Asked Questions +========================== + +General +------- + +**What Python versions does MC/DC support?** + +MC/DC supports Python ``>3.10``. +We recommend Python 3.11 for the best performance and compatibility with Numba. + +**What platforms are supported?** + +MC/DC is validated on linux-64 (x86), win-64, osx-64 (Intel), osx-arm64 (Apple Silicon), +linux-ppc64 (IBM POWER9), linux-nvidia-cuda, and linux-amd-rocm. + +**Should I use pip or conda to install MC/DC?** + +For **personal machines and simple setups**, ``pip`` inside a ``venv`` is the easiest route +(see :ref:`install:Installing with pip`). +For **HPCs or non-standard hardware** (e.g., POWER9 on Lassen, or when mpi4py is +troublesome), a **conda environment** with the ``install.sh`` script is more robust +(see :ref:`install:Installing MC/DC via conda`). + +.. list-table:: pip vs. conda at a glance + :widths: 30 35 35 + :header-rows: 1 + + * - + - **pip + venv** + - **conda** + * - Ease of setup + - Easier + - More steps + * - HPC compatibility + - Good (most systems) + - Best (handles mpi4py, POWER9) + * - Dependency isolation + - Good + - Excellent + * - MPI support + - Needs system MPI + - Can build mpi4py from source via ``install.sh`` + +**Where can I find cross-section data for continuous-energy simulations?** + +CE data libraries are provided to CEMeNT members via an internal repository. +Due to export controls they cannot be publicly distributed. +If you need cross-section data, we recommend using +`OpenMC `_ or `NJOY `_ to generate it, +then converting to MC/DC format with the tool in ``tools/data_library_generator/``. +See :ref:`install-data-library` for setup instructions. + +Installation +------------ + +**I get** ``ModuleNotFoundError: No module named 'mcdc'`` **right after installing.** + +Make sure you installed MC/DC inside the same environment you are running from. +If you used ``pip install -e .``, confirm the environment is activated: + +.. code-block:: sh + + # venv + source /bin/activate + + # conda + conda activate + +**pip install fails with mpi4py errors on an HPC.** + +mpi4py must be compiled against the system MPI. +Load the correct MPI module first, then install from source: + +.. code-block:: sh + + module load # e.g., mvapich2, openmpi, spectrum-mpi + CC=mpicc pip install --no-binary mpi4py mpi4py + +Or use the conda path with ``bash install.sh --hpc``, which handles this automatically. +See :ref:`user/troubleshooting:Building mpi4py from Source` for more details. + +**I get Numba version errors or** ``TypingError`` **on older Numba versions.** + +MC/DC requires **Numba >= 0.60.0**. +If you are on an older version, upgrade: + +.. code-block:: sh + + pip install 'numba>=0.60.0' + +If your system constrains the Numba version (e.g., due to CUDA toolkit compatibility), +see :ref:`user/troubleshooting:Numba Version Compatibility` for patching guidance. + +Running Simulations +------------------- + +**How do I run in parallel with MPI?** + +.. code-block:: sh + + mpiexec -n python input.py --mode=numba + +On HPCs, use the appropriate launcher (``srun``, ``jsrun``, ``flux run``). +See :ref:`user/batch_scripts:Batch Job Scripts` for ready-to-use templates. + +**My simulation is very slow — what should I check?** + +#. Are you running in ``--mode=numba``? Python mode is orders of magnitude slower. +#. First Numba run incurs JIT compilation overhead (15–80 s). + Subsequent runs with ``--caching`` are much faster. +#. Check your particle count — start small and scale up. + +**I see** ``SyntaxWarning: invalid escape sequence`` **on import.** + +This is a known cosmetic warning in some older releases (see `#211 `_). +It does not affect simulation results. +Updating to the latest MC/DC version resolves it. + +Post-processing +--------------- + +**How do I read MC/DC output files?** + +MC/DC writes results to HDF5 (``.h5``) files. +Use ``h5py`` to read them: + +.. code-block:: python3 + + import h5py + with h5py.File("output.h5", "r") as f: + print(list(f.keys())) # ['runtime', 'tallies'] + print(list(f["tallies"].keys())) # list of tally names + +See the post-processing section in :ref:`user/first_mcdc:First MC/DC Simulation` for a complete example. + +**What visualization tools work with MC/DC output?** + +- ``matplotlib`` for quick 1-D / 2-D plots. +- MC/DC's built-in ``mcdc.visualize()`` for geometry inspection. +- `ParaView `_ or `VisIt `_ for 3-D data. diff --git a/mcdc/docs/source/user/first_mcdc.rst b/mcdc/docs/source/user/first_mcdc.rst new file mode 100644 index 000000000..541c224ec --- /dev/null +++ b/mcdc/docs/source/user/first_mcdc.rst @@ -0,0 +1,397 @@ +.. _first_mcdc: + + +====================== +First MC/DC Simulation +====================== + +This guide presupposes you are familiar with modeling nuclear systems using a Monte Carlo method. +If you are completely new, we suggest checking out `OpenMC's theory guide `_ as most the basic underlying algorithms and core concepts are the same. +Our input decks and keyword phrases are designed so that if you are familiar with tools like OpenMC or MCNP, you should be able to get up and running quickly. + +While this guide is a great place to start, the next best place to look when getting started are our ``MCDC/examples`` or ``MCDC/test`` directories. +Run a few problems there, change a few inputs around, and keep looking around until you get the general hang of what we are doing. +Believe it or not, there is a method to all this madness. +If you find yourself with errors you really don't know what to do with, take look at our `GitHub issues page `_. +If it looks like you are the first to have a given problem feel free to submit a new ticket! + +A note on testing: +Just because something seems right doesn't mean it is. +Care must be taken to ensure that you are running the problem you think you are. +The software only knows what you tell it. + +MC/DC Workflow +-------------- + +MC/DC uses an ``input`` -> ``run`` -> ``post-process`` workflow, where users + +#. build input decks using scripts that import ``mcdc`` as a package and call functions to build geometries, tally meshes, and set other simulation parameters, +#. define a runtime sequence in the terminal to execute the ``input`` script (terminal operations are required for MPI calls), +#. export the results from ``.h5`` files and use the MC/DC visualizer or tools like ``matplotlib`` to view results. + +Building an Input Script +------------------------ + +Building an input deck can be a complicated and nuanced process. Depending on the type of simulation you need to build, you could end up touching most of the functions in MC/DC, or very few. +Again, the best way to start building input decks is to look at what we have already done in the ``MCDC/examples`` or ``MCDC/test`` directories. +To see more on the available input functions, look through the :doc:`../pythonapi/index` section. + +As an example, we walk through building the input for the ``MCDC/test/regression/slab_absorbium`` problem, which simulates a three-region, purely absorbing, mono-energetic slab wall. + +We start with our imports: + +.. code-block:: python3 + + import numpy as np + + import mcdc + +You may require more packages depending on the methods you are constructing, but most of what you need will be in these two. +Now, we define the materials for the problem: + +.. code-block:: python3 + + # Set materials + m1 = mcdc.MaterialMG(capture=np.array([1.0])) + m2 = mcdc.MaterialMG(capture=np.array([1.5])) + m3 = mcdc.MaterialMG(capture=np.array([2.0])) + +In this problem we only have mono-energetic capture, but MC/DC has support for multi-group (capture, scatter, fission) and continuous energy (capture, scatter, fission). +Multi-group materials are created with ``mcdc.MaterialMG``; for example, a 3-group capture cross section would be ``capture=np.array([1.0, 1.1, 0.8])``. +Continuous-energy materials are created with ``mcdc.Material``. + +If you are a member of CEMeNT, we have internal repositories containing the data required for continuous-energy simulation. +Unfortunately due to export controls we can not publicly distribute this data. +If you are looking for cross-section data to plug into MC/DC, we recommend you look at OpenMC or `NJOY `_. + +After setting material data, we define the problem space by setting up surfaces with their boundary conditions. +If no boundary condition is defined, the surface is assumed to be internal (``boundary_condition="none"``). +Surfaces are created using class methods on ``mcdc.Surface`` (e.g., ``PlaneX``, ``PlaneY``, ``PlaneZ``, ``Sphere``, ``CylinderZ``). + +.. code-block:: python3 + + # Set surfaces + s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") + s2 = mcdc.Surface.PlaneZ(z=2.0) + s3 = mcdc.Surface.PlaneZ(z=4.0) + s4 = mcdc.Surface.PlaneZ(z=6.0, boundary_condition="vacuum") + +Remember that the radiation transport equation is a 7-dimensional integro-differential equation, +so it's possible your problem will need both initial and boundary conditions. +While we have tried to include warnings and errors if an ill-posed problem is detected, +we cannot forecast all the ways in which things might go haywire. +For transient simulations, initial conditions are assumed to be 0 everywhere. + +We create problem geometry using cells, which are defined by the surfaces that constrain them and the material that fills them. +The ``+/-`` convention is used to indicate whether the cell volume is outside (+) or inside (-) a given surface. +For example, below, the first cell is filled with material m2 and is positive with respect to s1, negative with respect to s2. +This corresponds to being bound on the left by s1 and on the right by s2. +Cells are created with ``mcdc.Cell``, using the ``region`` and ``fill`` keyword arguments. + +.. code-block:: python3 + + mcdc.Cell(region=+s1 & -s2, fill=m2) + mcdc.Cell(region=+s2 & -s3, fill=m3) + mcdc.Cell(region=+s3 & -s4, fill=m1) + +We define a uniform isotropic source throughout the domain: + +.. code-block:: python3 + + mcdc.Source(z=[0.0, 6.0], isotropic=True, energy_group=0) + +Next we set tallies and specify the specific parameters of interest. Here, we're interested in the space-averaged flux +and collision rate. A mesh is created first, then a mesh-filtered ``Tally`` is constructed on that mesh. +Direction bins can also be specified on the tally. +Regardless of problem specifics, particles are simulated through all space, direction, and time; +the tally definitions are used to indicate in which dimensions a record of particle behavior should be kept. +Available scores include ``"flux"``, ``"density"``, ``"collision"``, ``"capture"``, ``"fission"``, and ``"net-current"``. + +.. code-block:: python3 + + # Tally: cell-average fluxes and collisions + mesh = mcdc.MeshStructured(z=np.linspace(0.0, 6.0, 61)) + mcdc.Tally( + mesh=mesh, + scores=["flux", "collision"], + mu=np.linspace(-1.0, 1.0, 32 + 1), + ) + +Next we set simulation settings. The only required setting is the number of particles. +Settings are configured by assigning attributes on the ``mcdc.settings`` singleton. +Additional settings include, for example, the cycles to use for a k-eigenvalue problem +(via ``mcdc.settings.set_eigenmode(...)``) or the output file name. + +.. code-block:: python3 + + mcdc.settings.N_particle = 1000 + +Finally, execute the problem. + +.. code-block:: python3 + + mcdc.run() + +Put together, our example ``input.py`` file: + +.. code-block:: python3 + + import numpy as np + import mcdc + + # ============================================================================= + # Set model + # ============================================================================= + # Three slab layers with different purely-absorbing materials + + # Set materials + m1 = mcdc.MaterialMG(capture=np.array([1.0])) + m2 = mcdc.MaterialMG(capture=np.array([1.5])) + m3 = mcdc.MaterialMG(capture=np.array([2.0])) + + # Set surfaces + s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") + s2 = mcdc.Surface.PlaneZ(z=2.0) + s3 = mcdc.Surface.PlaneZ(z=4.0) + s4 = mcdc.Surface.PlaneZ(z=6.0, boundary_condition="vacuum") + + # Set cells + mcdc.Cell(region=+s1 & -s2, fill=m2) + mcdc.Cell(region=+s2 & -s3, fill=m3) + mcdc.Cell(region=+s3 & -s4, fill=m1) + + # ============================================================================= + # Set source + # ============================================================================= + # Uniform isotropic source throughout the domain + + mcdc.Source(z=[0.0, 6.0], isotropic=True, energy_group=0) + + # ============================================================================= + # Set tally, setting, and run mcdc + # ============================================================================= + + # Tally: cell-average fluxes and collisions + mesh = mcdc.MeshStructured(z=np.linspace(0.0, 6.0, 61)) + mcdc.Tally( + mesh=mesh, + scores=["flux", "collision"], + mu=np.linspace(-1.0, 1.0, 32 + 1), + ) + + # Setting + mcdc.settings.N_particle = 1000 + + # Run + mcdc.run() + +Now that we have a script to run, how do we actually run it? + +Running a Simulation +-------------------- + +MC/DC supports execution purely in the Python interpreter, compiled to CPUs (x86, ARM64 and Power9-64), +and GPUs (AMD and Nvidia) and supports threading with MPI (Python or compiled modes). +Other guides are included to execute in these modes but for the sake of this first +MC/DC simulation we will simply execute in Python mode (slower, no acceleration) simply with + +.. code-block:: python3 + + python input.py + +from a command line. +For more performance see how to execute MC/DC on CPUs and GPUs + +Postprocessing Results +---------------------- + +While the entire workflow of running and post-processing MC/DC could be done in one script, +unless the problem is very small (or you're an expert), +we recommend using separate simulation and post-processing/visualization scripts. + +When a problem is executed tallied results are compiled, compressed, and saved in ``.h5`` files. +The size of these files can vary widely depending on your tally settings, +the geometric size of the problem (e.g. number of surfaces), and the number of particles tracked. +Expect sizes as small as ``kB`` or as large as ``TB``. + +These result files can be exported, manipulated, and visualized. +Data can be pulled from an ``.h5`` file using something like, + +.. code-block:: python3 + + import h5py + import numpy as np + # Load results + with h5py.File("output.h5", "r") as f: + # The tally name matches the auto-generated name (e.g., "mesh_tally_0") + tally_name = list(f["tallies"].keys())[0] + tally = f[f"tallies/{tally_name}"] + + z = tally["grid/z"][:] + dz = z[1:] - z[:-1] + z_mid = 0.5 * (z[:-1] + z[1:]) + + mu = tally["grid/mu"][:] + dmu = mu[1:] - mu[:-1] + mu_mid = 0.5 * (mu[:-1] + mu[1:]) + + psi = tally["flux/mean"][:] + psi_sd = tally["flux/sdev"][:] + +While there can be some nuance to the dimensions of these data arrays, the folder structures should be evident from your tally settings. +You can see the structure of the file layer-by-layer using the ``keys`` attribute of an h5 group. +For example, ``f.keys()`` will return + +.. code-block:: bash + + + +and ``f['tallies'].keys()`` will list all tally names. + +If needed, you can look around a ``.h5`` file using something like `h5Viewer `_ (which on linux can be installed with ``sudo apt-get install hdfview``). +Otherwise these arrays can then be manipulated and modified like any other. +Results are stored as NumPy arrays, so any tool that works with NumPy arrays (*e.g.*, SciPy and Pandas) +can be used to analyze the data from your simulations. + +A tool like ``matplotlib`` will work great for plotting results. +For more complex simulations, open source professional visualization software like +`Paraview `_ or `Visit `_ are available. + +As the problem we ran above is pretty simple and has no scattering or fission, we have an `analytic solution we can import `_: + +.. code-block:: python3 + + from reference import reference + +In the script below, we plot the space-averaged flux and space-averaged current, including their statistical noise. +We also use the space-averaged flux and current to compute a new quantity, the space-averaged angular flux, and +plot it over space and angle in a heat map. +Remember that when reporting results from a Monte Carlo solver, you should **always include the statistical error!** + + +.. code-block:: python3 + + import matplotlib.pyplot as plt + import numpy as np + + I = len(z) - 1 + N = len(mu) - 1 + + # Scalar flux + phi = np.zeros(I) + phi_sd = np.zeros(I) + for i in range(I): + phi[i] += np.sum(psi[i, :]) + phi_sd[i] += np.linalg.norm(psi_sd[i, :]) + + # Normalize + phi /= dz + phi_sd /= dz + J /= dz + J_sd /= dz + for n in range(N): + psi[:, n] = psi[:, n] / dz / dmu[n] + psi_sd[:, n] = psi_sd[:, n] / dz / dmu[n] + + # Reference solution + phi_ref, J_ref, psi_ref = reference(z, mu) + + # Flux - spatial average + plt.plot(z_mid, phi, "-b", label="MC") + plt.fill_between(z_mid, phi - phi_sd, phi + phi_sd, alpha=0.2, color="b") + plt.plot(z_mid, phi_ref, "--r", label="Ref.") + plt.xlabel(r"$z$, cm") + plt.ylabel("Flux") + plt.ylim([0.06, 0.16]) + plt.grid() + plt.legend() + plt.title(r"$\bar{\phi}_i$") + plt.show() + + # Current - spatial average + plt.plot(z_mid, J, "-b", label="MC") + plt.fill_between(z_mid, J - J_sd, J + J_sd, alpha=0.2, color="b") + plt.plot(z_mid, J_ref, "--r", label="Ref.") + plt.xlabel(r"$z$, cm") + plt.ylabel("Current") + plt.ylim([-0.03, 0.045]) + plt.grid() + plt.legend() + plt.title(r"$\bar{J}_i$") + plt.show() + + # Angular flux - spatial average + vmin = min(np.min(psi_ref), np.min(psi)) + vmax = max(np.max(psi_ref), np.max(psi)) + fig, ax = plt.subplots(1, 2, sharey=True) + Z, MU = np.meshgrid(z_mid, mu_mid) + im = ax[0].pcolormesh(MU.T, Z.T, psi_ref, vmin=vmin, vmax=vmax) + ax[0].set_xlabel(r"Polar cosine, $\mu$") + ax[0].set_ylabel(r"$z$") + ax[0].set_title(r"\psi") + ax[0].set_title(r"$\bar{\psi}_i(\mu)$ [Ref.]") + ax[1].pcolormesh(MU.T, Z.T, psi, vmin=vmin, vmax=vmax) + ax[1].set_xlabel(r"Polar cosine, $\mu$") + ax[1].set_ylabel(r"$z$") + ax[1].set_title(r"$\bar{\psi}_i(\mu)$ [MC]") + fig.subplots_adjust(right=0.8) + cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) + cbar = fig.colorbar(im, cax=cbar_ax) + cbar.set_label("Angular flux") + plt.show() + +While this script does look rather long, most of these commands are controlling things like axis labels and whatnot. +But at the end we have something like this. + +.. image:: ../images/user/sf_slab_1.png + :width: 266 + :alt: Reference v computed scalar flux, 1e3 particles +.. image:: ../images/user/j_slab_1.png + :width: 266 + :alt: Reference v computed current, 1e3 particles +.. image:: ../images/user/af_slab_1.png + :width: 266 + :alt: Reference v computed angular flux, 1e3 particles + +Notice how noisy these solutions are? We only ran 1e3 particles. +We need more particles to get a less statistically noisy, more converged solution. +Here's results from the same simulation run with 1e6 particles: + +.. image:: ../images/user/sf_slab_2.png + :width: 266 + :alt: Reference v computed scalar flux, 1e6 particles +.. image:: ../images/user/j_slab_2.png + :width: 266 + :alt: Reference v computed current, 1e6 particles +.. image:: ../images/user/af_slab_2.png + :width: 266 + :alt: Reference v computed angular flux, 1e6 particles + +This is much better converged around the analytic solution. +As with everything else, the best way to see what you can do is sniff around the examples. +We have examples with animated solutions, subplots, moving regions and more! + +Additional Simulation Results +----------------------------- + +- Neutron flux distribution on a shielded dog-leg vacuum channel after a neutron pulse is completed + +.. image:: ../images/user/kobayashi-white.png + :width: 266 + :alt: Neutron flux distribution on a shielded dog-leg vacuum channel after a neutron pulse is completed + +- Bottom-view of a micro reactor fission rate distribution when a control rod-driven runaway prompt supercritical occurs + +.. image:: ../images/user/c5g7.png + :width: 266 + :alt: Bottom-view of a micro reactor fission rate distribution when a control rod-driven runaway prompt supercritical occurs + +- Fission and flux bursts of a neutron excursion driven by a drop of highly-enriched uranium. + +.. image:: ../images/user/dragon.gif + :width: 266 + :alt: Fission and flux bursts of a neutron excursion driven by a drop of highly-enriched uranium + +------------------------------------- +MC/DC's built in model ``visualizer`` +------------------------------------- diff --git a/mcdc/docs/source/user/gpu.rst b/mcdc/docs/source/user/gpu.rst new file mode 100644 index 000000000..ab0d1234c --- /dev/null +++ b/mcdc/docs/source/user/gpu.rst @@ -0,0 +1,78 @@ + +.. _gpu: + +===================== +Running MC/DC on GPUs +===================== + +MC/DC supports most of its Numba enabled features for GPU compilation. +When targeting GPUs execution MC/DC uses the Harmonize library to schedule events. +Harmonize acts as the GPU runtime for MC/DC and has two major scheduling schemes including a novel asynchronous event scheduler. +For more information on Harmonize and how we compile MC/DC with it see our publications in M&C 2025. + +Single GPU Launches +------------------- + +To run problems on the GPU evoke input decks with a ``--mode=numba --target=gpu`` option appended on the python command. +For example, +.. code-block:: sh + + python input.py --mode=numba --target=gpu + +A cache folder will be generated in the same directory as the input deck titled ``__harmonize_cache__`` which contains the intermediate compiler representations and compiled biniaries. + +MC/DC Harmonize Runtime Options +------------------------------- + +At runtime the user can interface with the Harmonize scheduler that MC/DC uses as its GPU runtime. +Configurable options include: + +#. Specifying scheduling modes with ``--gpu_strategy=`` either ``event`` (default) or ``async`` (only enabled for Nvidia GPUs) +#. Declaring the GPU arena size (size of memory allocated on the GPU measured in particles) ``--gpu_arena_size= [int_value]`` +#. Clearing the previous cache (and forcing recompilation) ``--clear_cache`` +#. Requesting Harmonize to cache its results: ``--caching`` +#. Clearing the previous cache and making a new one: ``--clear_cache --caching`` + +Other configurable compile-time options are available in ` ``harmonize/python/config.py`` `_ starting on line 15. + +#. Verbose compiler operations: ``VERBOSE = False/True`` +#. Harmonize debug mode: ``DEBUG = False/True`` +#. Printing raw compiled errors: ``ERROR_PRINT = True/False`` +#. Using color terminal printing ``COLOR_LOG = True/False`` + +MPI+GPU Operability +------------------- + +Multi-GPU runs are enabled and require only to be dispatched with appropriate MPI calls. +The workflow for MPI+GPU calls is the same as with normal MPI calls and looks something like (assuming you are on an HPC): + +#. load modules +#. source python environment (either with conda or venv) +#. launch nodes (either interactive or batch) +#. evoke MPI calls + +For example on an interactive node using SLURM it would look something like +.. code-block:: sh + + module load + source python_venv/bin/activate + salloc -N 1 + srun python mcdc_input.py + +Or when using `flux `_ scheduler (the scheduler LLNL scheduler uses on `Tioga and El Capitan `_) (assuming an interactive node ``salloc -n1``): +.. code-block:: sh + + flux run -N 2 -n 8 -g 1 --queue=mi300a python3 input.py --mode=numba --target=gpu --gpu_arena_size=100000000 --gpu_strategy=event + +which launches event scheduled MC/DC on GPUs with a GPU arena 1e9 2 nodes with 8 GPUs total (4/node) on the MI300A partition. +An example of `LSF `_ scheduling (the scheduler LLNL uses on `Lassen `_) assuming an interactive node (``lalloc 1``) +.. code-block:: sh + + jsrun -n 4 -r 4 -a 1 -g 1 python input.py --mode=numba --target=gpu --gpu_strategy=async + +which launches async scheduled MC/DC on Nvidia GPUs with a GPU arena of 1e9 on 1 node with 4 GPUs total (4/node). + +GPU Profiling +------------- + +Pro diff --git a/mcdc/docs/source/user/index.rst b/mcdc/docs/source/user/index.rst new file mode 100644 index 000000000..a67d3fc2e --- /dev/null +++ b/mcdc/docs/source/user/index.rst @@ -0,0 +1,36 @@ +.. _users: + +============ +User's Guide +============ + +We include a simple "first simulation guide" as well as more in-depth descriptions on how to execute MC/DC in compiled modes to CPUs and GPUs with or without MPI. + +These instructions all assume you have an operable and working version of MC/DC installed in an appropriate environment for your system. + +Getting Started +--------------- + +.. toctree:: + :maxdepth: 1 + + first_mcdc + +Execution Modes +--------------- + +.. toctree:: + :maxdepth: 1 + + cpu + gpu + batch_scripts + +Help & Support +-------------- + +.. toctree:: + :maxdepth: 1 + + faq + troubleshooting diff --git a/mcdc/docs/source/user/troubleshooting.rst b/mcdc/docs/source/user/troubleshooting.rst new file mode 100644 index 000000000..1e3595fbe --- /dev/null +++ b/mcdc/docs/source/user/troubleshooting.rst @@ -0,0 +1,241 @@ +.. _troubleshooting: + +=============== +Troubleshooting +=============== + +This page collects solutions to common installation and runtime problems. +If your issue is not listed here, please check our +`GitHub issues `_ +or open a new one. + +Numba Version Compatibility +---------------------------- + +MC/DC requires **Numba >= 0.60.0**. +Symptoms of version mismatch include ``TypingError``, unexpected ``LoweringError``, +or missing ``@njit`` features. + +Check your version: + +.. code-block:: sh + + python -c "import numba; print(numba.__version__)" + +**Upgrading Numba:** + +.. code-block:: sh + + # pip + pip install --upgrade 'numba>=0.60.0' + + # conda + conda install numba>=0.60.0 -c conda-forge + +**Pinning Numba for CUDA compatibility:** + +If your system requires a specific CUDA toolkit, +Numba and ``cuda-toolkit`` versions must match. +For example, CUDA 11.8 works best with Numba 0.60.x: + +.. code-block:: sh + + conda install numba=0.60 cudatoolkit=11.8 -c conda-forge + +**Patching Numba for AMD GPUs (HIP):** + +AMD ROCm GPU support requires a patched Numba build. +Follow the `numba-hip instructions `_ +to apply the HIP target triple patch. +This is required before installing Harmonize for AMD targets. +See also :ref:`install-amd-gpus`. + + +Building mpi4py from Source +---------------------------- + +On most HPCs, prebuilt mpi4py wheels are incompatible with the system MPI library. +Symptoms include ``MPI_Init`` failures, segfaults at launch, or +``ImportError: libmpi.so: cannot open shared object file``. + +**Step 1 — Load the correct MPI module:** + +.. code-block:: sh + + # Examples for different systems: + module load mvapich2 # Quartz (LLNL) + module load gcc/8 cuda/11.8 # Lassen (LLNL) + module load cray-mpich # Tioga / El Capitan (LLNL) + +**Step 2 — Install mpi4py from source:** + +.. code-block:: sh + + CC=mpicc pip install --no-cache-dir --no-binary mpi4py mpi4py + +Or, if using conda with the ``install.sh`` script: + +.. code-block:: sh + + bash install.sh --hpc + +The ``--hpc`` flag instructs the install script to build mpi4py from source +using the currently loaded MPI module. + +**Verifying the installation:** + +.. code-block:: sh + + python -c "from mpi4py import MPI; print(MPI.Get_library_version())" + +This should print the system MPI library version (e.g., MVAPICH2, Open MPI, Cray MPICH). + + +HPC Module Environments +------------------------ + +Before installing or running MC/DC on an HPC, load the required modules. +Incorrect or missing modules are the most common source of build failures. + +.. list-table:: Recommended module loads by system + :widths: 20 40 40 + :header-rows: 1 + + * - **System** + - **Module loads** + - **Notes** + * - Quartz (LLNL) + - ``module load python/3.11`` + - Default ``intel-classic`` + ``mvapich2`` are sufficient + * - Dane (LLNL) + - ``module load python/3.11`` + - x86_64, similar to Quartz + * - Lassen (LLNL) + - ``module load gcc/8 cuda/11.8`` + - POWER9; conda recommended + * - Tioga (LLNL) + - ``module load cray-mpich rocm/6.0.0`` + - AMD MI250X GPUs + * - El Capitan (LLNL) + - ``module load cray-mpich rocm/6.0.0`` + - AMD MI300A GPUs + +After loading modules, activate your Python environment (venv or conda) +before running ``pip install`` or ``bash install.sh``. + +Container Errors +---------------- + +``lsetxattr`` error +~~~~~~~~~~~~~~~~~~~ +Cause: Podman storage on network filesystem. + +``setgroups 65534 failed`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Cause: Rootless Podman user mapping. + +``permission denied`` +~~~~~~~~~~~~~~~~~~~~~ +Fix: + +.. code-block:: bash + + podman run --rm -it --user root mcdc:dev + +``Out of memory`` (Apptainer) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use sandbox mode. + +``HYDU_create_process`` error +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use: + +.. code-block:: bash + + mpirun -launcher fork -n 4 python input.py + + +Common Runtime Errors +---------------------- + +**"No module named 'mcdc'"** + +Your Python environment is not activated, or MC/DC was installed in a different environment. +Activate the correct one: + +.. code-block:: sh + + source /bin/activate + # or + conda activate + +**"MCDC_LIB is not set" when running continuous-energy problems** + +Set the ``MCDC_LIB`` environment variable to point to your data library directory: + +.. code-block:: sh + + export MCDC_LIB=/path/to/mcdc_xsec_library + +See :ref:`install-data-library`. + +**Numba compilation takes very long (> 2 minutes)** + +First compilation is expected to be slow (15–80 s depending on problem complexity). +Use ``--caching`` to save compiled binaries: + +.. code-block:: sh + + python input.py --mode=numba --caching + +Subsequent runs will skip compilation. +If compilation seems stuck, check that you are not running +on a login node with limited resources. + +**Segmentation fault during MPI runs** + +This typically indicates an mpi4py / system MPI mismatch. +Rebuild mpi4py from source as described above. +Also ensure that the number of MPI ranks does not exceed available cores: + +.. code-block:: sh + + srun -n python input.py --mode=numba + +**"AttributeError: 'list' object has no attribute 'ID'" in** ``mcdc.cell()`` + +This error occurs when passing a Python list instead of using the ``&`` (intersection) +and ``|`` (union) region operators. +Use the operator syntax: + +.. code-block:: python3 + + # Correct + mcdc.Cell(region=+s1 & -s2, fill=material) + + # Wrong — do NOT use a list + mcdc.Cell(region=[+s1, -s2], fill=material) + +See `#348 `_. + + +Bugs and Issues +--------------- + +Our documentation is in the early stages of development, so thank you for bearing with us +while we bring it up to snuff. If you find a novel bug or anything else you feel we should +be aware of, feel free to `open an issue `_. + +Getting More Help +~~~~~~~~~~~~~~~~~ + +If you are still stuck after reviewing this troubleshooting guide: + +#. Search the `GitHub issues `_ for similar problems. +#. Run in debug mode for more informative error messages: + + .. code-block:: sh + + python input.py --mode=numba_debug + +#. Open a new issue with your error message, Python/Numba versions, and platform info. diff --git a/mcdc/examples/c5g7/MGXS-C5G7-TD.h5 b/mcdc/examples/c5g7/MGXS-C5G7-TD.h5 new file mode 100644 index 000000000..218b300f5 Binary files /dev/null and b/mcdc/examples/c5g7/MGXS-C5G7-TD.h5 differ diff --git a/mcdc/examples/c5g7/k-eigenvalue/input.py b/mcdc/examples/c5g7/k-eigenvalue/input.py new file mode 100644 index 000000000..15ecfdc2e --- /dev/null +++ b/mcdc/examples/c5g7/k-eigenvalue/input.py @@ -0,0 +1,306 @@ +import h5py +import numpy as np + +import mcdc + +# ============================================================================= +# Materials +# ============================================================================= + +# Load material data +lib = h5py.File("../MGXS-C5G7-TD.h5", "r") + + +# Setter +def set_mat(mat): + return mcdc.MaterialMG( + capture=mat["capture"][:], + scatter=mat["scatter"][:], + fission=mat["fission"][:], + nu_p=mat["nu_p"][:], + nu_d=mat["nu_d"][:], + chi_p=mat["chi_p"][:], + chi_d=mat["chi_d"][:], + speed=mat["speed"][:], + decay_rate=mat["decay"][:], + ) + + +# Materials +mat_uo2 = set_mat(lib["uo2"]) # Fuel: UO2 +mat_mox43 = set_mat(lib["mox43"]) # Fuel: MOX 4.3% +mat_mox7 = set_mat(lib["mox7"]) # Fuel: MOX 7.0% +mat_mox87 = set_mat(lib["mox87"]) # Fuel: MOX 8.7% +mat_gt = set_mat(lib["gt"]) # Guide tube +mat_fc = set_mat(lib["fc"]) # Fission chamber +mat_cr = set_mat(lib["cr"]) # Control rod +mat_mod = set_mat(lib["mod"]) # Moderator + +# ============================================================================= +# Pin cells +# ============================================================================= + +pitch = 1.26 +radius = 0.54 +core_height = 128.52 +refl_thick = 21.42 + +# Control rod banks fractions +# All out: 0.0 +# All in : 1.0 +cr1 = 0.0 +cr2 = 0.0 +cr3 = 0.0 +cr4 = 0.0 +# Control rod banks interfaces +cr1 = core_height * (0.5 - cr1) +cr2 = core_height * (0.5 - cr2) +cr3 = core_height * (0.5 - cr3) +cr4 = core_height * (0.5 - cr4) + +# Surfaces +cy = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=radius) +z1 = mcdc.Surface.PlaneZ(z=cr1) # Control rod banks interfaces +z2 = mcdc.Surface.PlaneZ(z=cr2) +z3 = mcdc.Surface.PlaneZ(z=cr3) +z4 = mcdc.Surface.PlaneZ(z=cr4) +zf = mcdc.Surface.PlaneZ(z=core_height / 2) + +# Fission chamber +fc = mcdc.Cell(-cy, mat_fc) +mod = mcdc.Cell(+cy, mat_mod) +fission_chamber = mcdc.Universe(cells=[fc, mod]) + +# Fuel rods +uo2 = mcdc.Cell(-cy & -zf, mat_uo2) +mox4 = mcdc.Cell(-cy & -zf, mat_mox43) +mox7 = mcdc.Cell(-cy & -zf, mat_mox7) +mox8 = mcdc.Cell(-cy & -zf, mat_mox87) +moda = mcdc.Cell(-cy & +zf, mat_mod) # Water above pin +fuel_uo2 = mcdc.Universe(cells=[uo2, mod, moda]) +fuel_mox43 = mcdc.Universe(cells=[mox4, mod, moda]) +fuel_mox7 = mcdc.Universe(cells=[mox7, mod, moda]) +fuel_mox87 = mcdc.Universe(cells=[mox8, mod, moda]) + +# Control rods and guide tubes +cr1 = mcdc.Cell(-cy & +z1, mat_cr) +cr2 = mcdc.Cell(-cy & +z2, mat_cr) +cr3 = mcdc.Cell(-cy & +z3, mat_cr) +cr4 = mcdc.Cell(-cy & +z4, mat_cr) +gt1 = mcdc.Cell(-cy & -z1, mat_gt) +gt2 = mcdc.Cell(-cy & -z2, mat_gt) +gt3 = mcdc.Cell(-cy & -z3, mat_gt) +gt4 = mcdc.Cell(-cy & -z4, mat_gt) +control_rod1 = mcdc.Universe(cells=[cr1, gt1, mod]) +control_rod2 = mcdc.Universe(cells=[cr2, gt2, mod]) +control_rod3 = mcdc.Universe(cells=[cr3, gt3, mod]) +control_rod4 = mcdc.Universe(cells=[cr4, gt4, mod]) + +# ============================================================================= +# Fuel lattices +# ============================================================================= + +# UO2 lattice 1 +u = fuel_uo2 +c = control_rod1 +f = fission_chamber +lattice_1 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# MOX lattice 2 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod2 +f = fission_chamber +lattice_2 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# MOX lattice 3 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod3 +f = fission_chamber +lattice_3 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# UO2 lattice 4 +u = fuel_uo2 +c = control_rod4 +f = fission_chamber +lattice_4 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# ============================================================================= +# Assemblies and core +# ============================================================================= + +# Surfaces +x0 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch * 17) +x2 = mcdc.Surface.PlaneX(x=pitch * 17 * 2) +x3 = mcdc.Surface.PlaneX(x=pitch * 17 * 3, boundary_condition="vacuum") + +y0 = mcdc.Surface.PlaneY(y=-pitch * 17 * 3, boundary_condition="vacuum") +y1 = mcdc.Surface.PlaneY(y=-pitch * 17 * 2) +y2 = mcdc.Surface.PlaneY(y=-pitch * 17) +y3 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") + +z0 = mcdc.Surface.PlaneZ(z=-(core_height / 2 + refl_thick), boundary_condition="vacuum") +z1 = mcdc.Surface.PlaneZ(z=-(core_height / 2)) +z2 = mcdc.Surface.PlaneZ(z=(core_height / 2 + refl_thick), boundary_condition="vacuum") + +# Assembly cells +center = np.array([pitch * 17 / 2, -pitch * 17 / 2, 0.0]) +assembly_1 = mcdc.Cell(+x0 & -x1 & +y2 & -y3 & +z1 & -z2, lattice_1, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_2 = mcdc.Cell(+x1 & -x2 & +y2 & -y3 & +z1 & -z2, lattice_2, translation=center) + +center += np.array([-pitch * 17, -pitch * 17, 0.0]) +assembly_3 = mcdc.Cell(+x0 & -x1 & +y1 & -y2 & +z1 & -z2, lattice_3, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_4 = mcdc.Cell(+x1 & -x2 & +y1 & -y2 & +z1 & -z2, lattice_4, translation=center) + +# Bottom reflector cell +reflector_bottom = mcdc.Cell(+x0 & -x3 & +y0 & -y3 & +z0 & -z1, mat_mod) + +# Side reflectors +reflector_south = mcdc.Cell(+x0 & -x3 & +y0 & -y1 & +z1 & -z2, mat_mod) +reflector_east = mcdc.Cell(+x2 & -x3 & +y1 & -y3 & +z1 & -z2, mat_mod) + +# Root universe +mcdc.simulation.set_root_universe( + cells=[ + assembly_1, + assembly_2, + assembly_3, + assembly_4, + reflector_bottom, + reflector_south, + reflector_east, + ], +) + +# ============================================================================= +# Set source +# ============================================================================= + +mcdc.Source( + x=[0.0, pitch * 17 * 2], + y=[-pitch * 17 * 2, 0.0], + z=[-core_height / 2, core_height / 2], + isotropic=True, + energy_group=0, # Highest energy +) + +# ============================================================================= +# Set tallies, settings, techniques and run MC/DC +# ============================================================================= + +# Tally +x_grid = np.linspace(0.0, pitch * 17 * 3, 17 * 3 + 1) +y_grid = np.linspace(-pitch * 17 * 3, 0.0, 17 * 3 + 1) +z_grid = np.linspace( + -(core_height / 2 + refl_thick), (core_height / 2 + refl_thick), 102 + 17 * 2 + 1 +) +g_grid = np.array([-0.5, 3.5, 6.5]) # Collapsing to fast (1-4) and slow (5-7) +mesh = mcdc.MeshStructured(x=x_grid, y=y_grid, z=z_grid) +mcdc.Tally(mesh=mesh, scores=["flux"], energy=g_grid) + +# Settings +mcdc.settings.N_particle = 50 +mcdc.settings.census_bank_buffer_ratio = 4.0 +mcdc.settings.set_eigenmode(N_inactive=5, N_active=10, gyration_radius="all") + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/examples/c5g7/k-eigenvalue/process-output.py b/mcdc/examples/c5g7/k-eigenvalue/process-output.py new file mode 100644 index 000000000..c0f40e28b --- /dev/null +++ b/mcdc/examples/c5g7/k-eigenvalue/process-output.py @@ -0,0 +1,189 @@ +import numpy as np +import matplotlib.pyplot as plt +import h5py +from matplotlib import cm +from matplotlib import colors + +# ============================================================================= +# Plot results +# ============================================================================= + +# Results +with h5py.File("output.h5", "r") as f: + phi_avg = f["tallies/tracklength_tally_0/flux/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/flux/sdev"][:] + k = f["k_cycle"][:] + k_avg = f["k_mean"][()] + k_sd = f["k_sdev"][()] + rg = f["gyration_radius"][:] + x = f["tallies/tracklength_tally_0/grid/x"][:] + y = f["tallies/tracklength_tally_0/grid/y"][:] + z = f["tallies/tracklength_tally_0/grid/z"][:] + +dx = x[1] - x[0] +dz = z[1] - z[0] +dV = dx * dx * dz + +phi_avg /= dV +phi_sd /= dV + +phi_fast = phi_avg[0] +phi_thermal = phi_avg[1] +phi_fast_sd = phi_sd[0] +phi_thermal_sd = phi_sd[1] + +print("k = %.5f +- %.5f" % (k_avg, k_sd)) + +# Plot +N_iter = len(k) +(p1,) = plt.plot(np.arange(1, N_iter + 1), k, "-b", label="MC") +(p2,) = plt.plot( + np.arange(1, N_iter + 1), np.ones(N_iter) * k_avg, ":r", label="MC-avg" +) +plt.fill_between( + np.arange(1, N_iter + 1), + np.ones(N_iter) * (k_avg - k_sd), + np.ones(N_iter) * (k_avg + k_sd), + alpha=0.2, + color="r", +) +plt.xlabel("Iteration #") +plt.ylabel(r"$k$") +plt.grid() +ax2 = plt.gca().twinx() +(p3,) = ax2.plot(np.arange(1, N_iter + 1), rg, "g--", label="GyRad") +plt.ylabel(r"Gyration radius [cm]") +lines = [p1, p2, p3] +plt.legend(lines, [l.get_label() for l in lines]) +plt.show() + +# X-Y plane +x_mid = 0.5 * (x[1:] + x[:-1]) +Y, X = np.meshgrid(x_mid, x_mid) +phi_fast_xy = np.sum(phi_fast, axis=2) +phi_thermal_xy = np.sum(phi_thermal, axis=2) +phi_fast_xy_sd = np.sqrt(np.sum(np.square(phi_fast_sd), axis=2)) / phi_fast_xy +phi_thermal_xy_sd = np.sqrt(np.sum(np.square(phi_thermal_sd), axis=2)) / phi_thermal_xy + +plt.pcolormesh(X, Y, phi_fast_xy, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$y$ [cm]") +plt.title(r"Fast neutron flux") +plt.show() + +plt.pcolormesh(X, Y, phi_fast_xy_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$y$ [cm]") +plt.title(r"Fast neutron flux stdev") +plt.show() + +plt.pcolormesh(X, Y, phi_thermal_xy, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$y$ [cm]") +plt.title(r"Thermal neutron flux") +plt.show() + +plt.pcolormesh(X, Y, phi_thermal_xy_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$y$ [cm]") +plt.title(r"Thermal neutron flux stdev") +plt.show() + +# X-Z plane +z_mid = 0.5 * (z[1:] + z[:-1]) +X, Z = np.meshgrid(z_mid, x_mid) +phi_fast_xz = np.sum(phi_fast, axis=1) +phi_thermal_xz = np.sum(phi_thermal, axis=1) +phi_fast_xz_sd = np.sqrt(np.sum(np.square(phi_fast_sd), axis=1)) / phi_fast_xz +phi_thermal_xz_sd = np.sqrt(np.sum(np.square(phi_thermal_sd), axis=1)) / phi_thermal_xz + +plt.pcolormesh(Z, X, phi_fast_xz, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Fast neutron flux") +plt.show() + +plt.pcolormesh(Z, X, phi_fast_xz_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Fast neutron flux stdev") +plt.show() + +plt.pcolormesh(Z, X, phi_thermal_xz, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Thermal neutron flux") +plt.show() + +plt.pcolormesh(Z, X, phi_thermal_xz_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$x$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Thermal neutron flux stdev") +plt.show() + +# Y-Z plane +X, Z = np.meshgrid(z_mid, x_mid) +phi_fast_xz = np.sum(phi_fast, axis=0) +phi_thermal_xz = np.sum(phi_thermal, axis=0) +phi_fast_xz_sd = np.sqrt(np.sum(np.square(phi_fast_sd), axis=0)) / phi_fast_xz +phi_thermal_xz_sd = np.sqrt(np.sum(np.square(phi_thermal_sd), axis=0)) / phi_thermal_xz + +plt.pcolormesh(Z, X, phi_fast_xz, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$y$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Fast neutron flux") +plt.show() + +plt.pcolormesh(Z, X, phi_fast_xz_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$y$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Fast neutron flux stdev") +plt.show() + +plt.pcolormesh(Z, X, phi_thermal_xz, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$y$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Thermal neutron flux") +plt.show() + +plt.pcolormesh(Z, X, phi_thermal_xz_sd, shading="nearest") +plt.colorbar() +ax = plt.gca() +ax.set_aspect("equal") +plt.xlabel(r"$y$ [cm]") +plt.ylabel(r"$z$ [cm]") +plt.title(r"Thermal neutron flux stdev") +plt.show() diff --git a/mcdc/examples/c5g7/transient/input.py b/mcdc/examples/c5g7/transient/input.py new file mode 100644 index 000000000..2381f8c00 --- /dev/null +++ b/mcdc/examples/c5g7/transient/input.py @@ -0,0 +1,367 @@ +import h5py +import numpy as np + +import mcdc + +# ============================================================================= +# Materials +# ============================================================================= + +# Load material data +lib = h5py.File("../MGXS-C5G7-TD.h5", "r") + + +# Setter +def set_mat(mat): + return mcdc.MaterialMG( + capture=mat["capture"][:], + scatter=mat["scatter"][:], + fission=mat["fission"][:], + nu_p=mat["nu_p"][:], + nu_d=mat["nu_d"][:], + chi_p=mat["chi_p"][:], + chi_d=mat["chi_d"][:], + speed=mat["speed"][:], + decay_rate=mat["decay"][:], + ) + + +# Materials +mat_uo2 = set_mat(lib["uo2"]) # Fuel: UO2 +mat_mox43 = set_mat(lib["mox43"]) # Fuel: MOX 4.3% +mat_mox7 = set_mat(lib["mox7"]) # Fuel: MOX 7.0% +mat_mox87 = set_mat(lib["mox87"]) # Fuel: MOX 8.7% +mat_gt = set_mat(lib["gt"]) # Guide tube +mat_fc = set_mat(lib["fc"]) # Fission chamber +mat_cr = set_mat(lib["cr"]) # Control rod +mat_mod = set_mat(lib["mod"]) # Moderator + +# ============================================================================= +# Pin cells +# ============================================================================= + +pitch = 1.26 +radius = 0.54 +core_height = 128.52 +reflector_thickness = 21.42 + +# Control rod banks fractions +# All out: 0.0 +# All in : 1.0 +cr1 = np.array([1.0, 1.0, 0.89, 1.0]) +cr1_t = np.array([0.0, 10.0, 15.0, 15.0 + 1.0 - cr1[-2]]) + +cr2 = np.array([1.0, 1.0, 0.0, 0.0, 0.8]) +cr2_t = np.array([0.0, 5.0, 10.0, 15.0, 15.8]) + +cr3 = np.array([0.75, 0.75, 1.0]) +cr3_t = np.array([0.0, 15.0, 15.25]) + +cr4 = np.array([1.0, 1.0, 0.5, 0.5, 1.0]) +cr4_t = np.array( + [0.0, 5.0, 5.0 + (cr4[1] - cr4[2]) / 2 * 10, 15.0, 15.0 + 1.0 - cr4[-2]] +) + +# Tips of the control rod banks +cr1_bottom = core_height * (0.5 - cr1) +cr2_bottom = core_height * (0.5 - cr2) +cr3_bottom = core_height * (0.5 - cr3) +cr4_bottom = core_height * (0.5 - cr4) +cr1_top = cr1_bottom + core_height +cr2_top = cr2_bottom + core_height +cr3_top = cr3_bottom + core_height +cr4_top = cr4_bottom + core_height + +# Durations of the moving tips +cr1_durations = cr1_t[1:] - cr1_t[:-1] +cr2_durations = cr2_t[1:] - cr2_t[:-1] +cr3_durations = cr3_t[1:] - cr3_t[:-1] +cr4_durations = cr4_t[1:] - cr4_t[:-1] + +# Velocities of the moving tips +cr1_velocities = np.zeros((len(cr1) - 1, 3)) +cr2_velocities = np.zeros((len(cr2) - 1, 3)) +cr3_velocities = np.zeros((len(cr3) - 1, 3)) +cr4_velocities = np.zeros((len(cr4) - 1, 3)) +cr1_velocities[:, 2] = (cr1_top[1:] - cr1_top[:-1]) / cr1_durations +cr2_velocities[:, 2] = (cr2_top[1:] - cr2_top[:-1]) / cr2_durations +cr3_velocities[:, 2] = (cr3_top[1:] - cr3_top[:-1]) / cr3_durations +cr4_velocities[:, 2] = (cr4_top[1:] - cr4_top[:-1]) / cr4_durations + +# Surfaces +cy = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=radius) +# Control rod top and bottom tips +z1_top = mcdc.Surface.PlaneZ(z=cr1_top[0]) +z1_bottom = mcdc.Surface.PlaneZ(z=cr1_bottom[0]) +z2_top = mcdc.Surface.PlaneZ(z=cr2_top[0]) +z2_bottom = mcdc.Surface.PlaneZ(z=cr2_bottom[0]) +z3_top = mcdc.Surface.PlaneZ(z=cr3_top[0]) +z3_bottom = mcdc.Surface.PlaneZ(z=cr3_bottom[0]) +z4_top = mcdc.Surface.PlaneZ(z=cr4_top[0]) +z4_bottom = mcdc.Surface.PlaneZ(z=cr4_bottom[0]) +# Fuel top +# (Bottom is bounded by the universe cell) +zf = mcdc.Surface.PlaneZ(z=0.5 * core_height) + +# Move the control tips +z1_top.move(cr1_velocities, cr1_durations) +z1_bottom.move(cr1_velocities, cr1_durations) +z2_top.move(cr2_velocities, cr2_durations) +z2_bottom.move(cr2_velocities, cr2_durations) +z3_top.move(cr3_velocities, cr3_durations) +z3_bottom.move(cr3_velocities, cr3_durations) +z4_top.move(cr4_velocities, cr4_durations) +z4_bottom.move(cr4_velocities, cr4_durations) + +# Fission chamber pin +fc = mcdc.Cell(-cy, mat_fc) +mod = mcdc.Cell(+cy, mat_mod) +fission_chamber = mcdc.Universe(cells=[fc, mod]) + +# Fuel rods +uo2 = mcdc.Cell(-cy & -zf, mat_uo2) +mox4 = mcdc.Cell(-cy & -zf, mat_mox43) +mox7 = mcdc.Cell(-cy & -zf, mat_mox7) +mox8 = mcdc.Cell(-cy & -zf, mat_mox87) +moda = mcdc.Cell(-cy & +zf, mat_mod) # Water above pin +fuel_uo2 = mcdc.Universe(cells=[uo2, mod, moda]) +fuel_mox43 = mcdc.Universe(cells=[mox4, mod, moda]) +fuel_mox7 = mcdc.Universe(cells=[mox7, mod, moda]) +fuel_mox87 = mcdc.Universe(cells=[mox8, mod, moda]) + +# Control rods and guide tubes +cr1 = mcdc.Cell(-cy & +z1_bottom & -z1_top, mat_cr) +gt1_lower = mcdc.Cell(-cy & -z1_bottom, mat_gt) +gt1_upper = mcdc.Cell(-cy & +z1_top, mat_gt) +# +cr2 = mcdc.Cell(-cy & +z2_bottom & -z2_top, mat_cr) +gt2_lower = mcdc.Cell(-cy & -z2_bottom, mat_gt) +gt2_upper = mcdc.Cell(-cy & +z2_top, mat_gt) +# +cr3 = mcdc.Cell(-cy & +z3_bottom & -z3_top, mat_cr) +gt3_lower = mcdc.Cell(-cy & -z3_bottom, mat_gt) +gt3_upper = mcdc.Cell(-cy & +z3_top, mat_gt) +# +cr4 = mcdc.Cell(-cy & +z4_bottom & -z4_top, mat_cr) +gt4_lower = mcdc.Cell(-cy & -z4_bottom, mat_gt) +gt4_upper = mcdc.Cell(-cy & +z4_top, mat_gt) +# +control_rod1 = mcdc.Universe(cells=[cr1, gt1_lower, gt1_upper, mod]) +control_rod2 = mcdc.Universe(cells=[cr2, gt2_lower, gt2_upper, mod]) +control_rod3 = mcdc.Universe(cells=[cr3, gt3_lower, gt3_upper, mod]) +control_rod4 = mcdc.Universe(cells=[cr4, gt4_lower, gt4_upper, mod]) + +# ============================================================================= +# Fuel lattices +# ============================================================================= + +# UO2 lattice 1 +u = fuel_uo2 +c = control_rod1 +f = fission_chamber +lattice_1 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# MOX lattice 2 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod2 +f = fission_chamber +lattice_2 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# MOX lattice 3 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod3 +f = fission_chamber +lattice_3 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# UO2 lattice 4 +u = fuel_uo2 +c = control_rod4 +f = fission_chamber +lattice_4 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# ============================================================================= +# Assemblies and core +# ============================================================================= + +# Surfaces +x0 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch * 17) +x2 = mcdc.Surface.PlaneX(x=pitch * 17 * 2) +x3 = mcdc.Surface.PlaneX(x=pitch * 17 * 3, boundary_condition="vacuum") + +y0 = mcdc.Surface.PlaneY(y=-pitch * 17 * 3, boundary_condition="vacuum") +y1 = mcdc.Surface.PlaneY(y=-pitch * 17 * 2) +y2 = mcdc.Surface.PlaneY(y=-pitch * 17) +y3 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") + +z0 = mcdc.Surface.PlaneZ( + z=-(core_height / 2 + reflector_thickness), boundary_condition="vacuum" +) +z1 = mcdc.Surface.PlaneZ(z=-(core_height / 2)) +z2 = mcdc.Surface.PlaneZ( + z=(core_height / 2 + reflector_thickness), boundary_condition="vacuum" +) + +# Assembly cells +center = np.array([pitch * 17 / 2, -pitch * 17 / 2, 0.0]) +assembly_1 = mcdc.Cell(+x0 & -x1 & +y2 & -y3 & +z1 & -z2, lattice_1, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_2 = mcdc.Cell(+x1 & -x2 & +y2 & -y3 & +z1 & -z2, lattice_2, translation=center) + +center += np.array([-pitch * 17, -pitch * 17, 0.0]) +assembly_3 = mcdc.Cell(+x0 & -x1 & +y1 & -y2 & +z1 & -z2, lattice_3, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_4 = mcdc.Cell(+x1 & -x2 & +y1 & -y2 & +z1 & -z2, lattice_4, translation=center) + +# Bottom reflector cell +reflector_bottom = mcdc.Cell(+x0 & -x3 & +y0 & -y3 & +z0 & -z1, mat_mod) + +# Side reflectors +reflector_south = mcdc.Cell(+x0 & -x3 & +y0 & -y1 & +z1 & -z2, mat_mod) +reflector_east = mcdc.Cell(+x2 & -x3 & +y1 & -y3 & +z1 & -z2, mat_mod) + +# Root universe +mcdc.simulation.set_root_universe( + cells=[ + assembly_1, + assembly_2, + assembly_3, + assembly_4, + reflector_bottom, + reflector_south, + reflector_east, + ], +) + +# ============================================================================= +# Set source +# ============================================================================= +# Throughout the active center pin of Assembly four, at highest energy, +# for the first 15 seconds + +source = mcdc.Source( + x=np.array([pitch * 17 * 3 / 2] * 2) + np.array([-pitch / 2, +pitch / 2]), + y=np.array([-pitch * 17 * 3 / 2] * 2) + np.array([-pitch / 2, +pitch / 2]), + z=[-core_height / 2, core_height / 2], + isotropic=True, + energy_group=0, # Highest energy + time=[0.0, 15.0], +) + +# ============================================================================= +# Set tallies, settings, techniques and run MC/DC +# ============================================================================= + +# Tallies +Nt = 100 +Nx = 17 * 2 +Ny = 17 * 2 +Nz = 17 * 6 +t = np.linspace(0.0, 20.0, Nt + 1) +x = np.linspace(0.0, pitch * 17 * 2, Nx + 1) +y = np.linspace(-pitch * 17 * 2, 0.0, Ny + 1) +z = np.linspace(-core_height / 2, core_height / 2, Nz + 1) +mesh = mcdc.MeshStructured(x=x, y=y, z=z) +mcdc.Tally(mesh=mesh, scores=["fission"], time=t) + +# Settings +mcdc.settings.N_particle = 10000 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run +mcdc.run() diff --git a/mcdc/examples/c5g7/transient/plot-fission-sdev.py b/mcdc/examples/c5g7/transient/plot-fission-sdev.py new file mode 100644 index 000000000..b6e800666 --- /dev/null +++ b/mcdc/examples/c5g7/transient/plot-fission-sdev.py @@ -0,0 +1,97 @@ +import h5py +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +import numpy as np +import os +import shutil + +# Get fission rates +with h5py.File("output.h5", "r") as f: + fissions = f["tallies/tracklength_tally_0/fission/mean"][()] + fissions_sd = f["tallies/tracklength_tally_0/fission/sdev"][()] + + x = f["tallies/tracklength_tally_0/grid/x"][()] + y = f["tallies/tracklength_tally_0/grid/y"][()] + z = f["tallies/tracklength_tally_0/grid/z"][()] + t = f["tallies/tracklength_tally_0/grid/time"][()] + +# The grids +t_mid = 0.5 * (t[:-1] + t[1:]) +XY_X, XY_Y = np.meshgrid(x, y, indexing="ij") +XZ_X, XZ_Z = np.meshgrid(x, z, indexing="ij") +YZ_Y, YZ_Z = np.meshgrid(y, z, indexing="ij") + +# Relative stdevs +fissions_sd[fissions == 0.0] = 0.0 +non_zeros = fissions != 0.0 +fissions_sd[non_zeros] /= fissions[non_zeros] + +# Average relative stdev (in %) +fission_sd_avg = np.average(fissions_sd, axis=(1, 2, 3)) * 100.0 + +# Create clean folder for output figures +# Check if the folder exists +if os.path.exists("fission-sdev"): + shutil.rmtree("fission-sdev") # Remove the existing folder +os.makedirs("fission-sdev") # Create a new folder + +# Iterate over time step and create figures +N = len(fissions) +for i in range(N): + fission_sd = fissions_sd[i] + + # Calculate fission averages + fission_x_sd = np.average(fission_sd, axis=0) + fission_y_sd = np.average(fission_sd, axis=1) + fission_z_sd = np.average(fission_sd, axis=2) + + # Plot + fig = plt.figure(figsize=(8, 5)) + gs = gridspec.GridSpec( + 2, 3, width_ratios=[0.7, 1, 1], height_ratios=[1, 1], hspace=0.5 + ) + + ax1 = fig.add_subplot(gs[0, 0]) # Top-left + ax2 = fig.add_subplot(gs[1, 0]) # Bottom-left + ax3 = fig.add_subplot(gs[:, 1]) # Entire second column + ax4 = fig.add_subplot(gs[:, 2]) # Entire third column + + # Total fission curve + ax1.plot(t_mid, fission_sd_avg, "b") + ax1.set_yscale("log") + ax1.set_ylabel("Average relative sdev (%)") + ax1.set_xlabel("Time") + ax1.set_title("Average relative sdev") + # Total fission point + ax1.plot(t_mid[i], fission_sd_avg[i], "ro", fillstyle="none") + + # XY fission + ax2.pcolormesh(XY_X, XY_Y, fission_z_sd) + ax2.set_aspect("equal") + ax2.set_xlabel(r"$x$") + ax2.set_ylabel(r"$y$") + ax2.set_title("Fission-XY") + + # XZ fission + ax3.pcolormesh(XZ_X, XZ_Z, fission_y_sd) + ax3.set_aspect("equal") + ax3.set_xlabel(r"$x$") + ax3.set_ylabel(r"$z$") + ax3.set_title("Fission-XZ") + pos = ax3.get_position() + ax3.set_position( + [pos.x0 + 0.02, pos.y0, pos.width, pos.height] + ) # shift right by 0.02 + + # YZ fission + ax4.pcolormesh(YZ_Y, YZ_Z, fission_x_sd) + ax4.set_aspect("equal") + ax4.set_xlabel(r"$y$") + ax4.set_ylabel(r"$z$") + ax4.set_title("Fission-YZ") + + plt.suptitle("MC/DC result - Fission Rate Relative Sdev.") + plt.savefig( + f"fission-sdev/figure_{i:03}.png", dpi=300, bbox_inches="tight", pad_inches=0 + ) + plt.close() diff --git a/mcdc/examples/c5g7/transient/plot-fission.py b/mcdc/examples/c5g7/transient/plot-fission.py new file mode 100644 index 000000000..fdcf2167c --- /dev/null +++ b/mcdc/examples/c5g7/transient/plot-fission.py @@ -0,0 +1,89 @@ +import h5py +import matplotlib.pyplot as plt +import matplotlib.gridspec as gridspec +import numpy as np +import os +import shutil + +# Get results +with h5py.File("output.h5", "r") as f: + fissions = f["tallies/tracklength_tally_0/fission/mean"][()] + x = f["tallies/tracklength_tally_0/grid/x"][()] + y = f["tallies/tracklength_tally_0/grid/y"][()] + z = f["tallies/tracklength_tally_0/grid/z"][()] + t = f["tallies/tracklength_tally_0/grid/time"][()] + +# Total fission +fission_total = np.average(fissions, axis=(1, 2, 3)) +fission_total /= fission_total[0] + +# The grids +t_mid = 0.5 * (t[:-1] + t[1:]) +XY_X, XY_Y = np.meshgrid(x, y, indexing="ij") +XZ_X, XZ_Z = np.meshgrid(x, z, indexing="ij") +YZ_Y, YZ_Z = np.meshgrid(y, z, indexing="ij") + +# Create clean folder for output figures +# Check if the folder exists +if os.path.exists("fission"): + shutil.rmtree("fission") # Remove the existing folder +os.makedirs("fission") # Create a new folder + +# Iterate over time step and create figures +N = len(fissions) +for i in range(N): + fission = fissions[i] + + # Calculate fission averages + fission_x = np.average(fission, axis=0) + fission_y = np.average(fission, axis=1) + fission_z = np.average(fission, axis=2) + + # Plot + fig = plt.figure(figsize=(8, 5)) + gs = gridspec.GridSpec( + 2, 3, width_ratios=[0.7, 1, 1], height_ratios=[1, 1], hspace=0.5 + ) + + ax1 = fig.add_subplot(gs[0, 0]) # Top-left + ax2 = fig.add_subplot(gs[1, 0]) # Bottom-left + ax3 = fig.add_subplot(gs[:, 1]) # Entire second column + ax4 = fig.add_subplot(gs[:, 2]) # Entire third column + + # Total fission curve + ax1.plot(t_mid, fission_total, "b") + ax1.set_yscale("log") + ax1.set_ylabel("Total fission rate") + ax1.set_xlabel("Time") + ax1.set_title("Total fission rate") + # Total fission point + ax1.plot(t_mid[i], fission_total[i], "ro", fillstyle="none") + + # XY fission + ax2.pcolormesh(XY_X, XY_Y, fission_z) + ax2.set_aspect("equal") + ax2.set_xlabel(r"$x$") + ax2.set_ylabel(r"$y$") + ax2.set_title("Fission-XY") + + # XZ fission + ax3.pcolormesh(XZ_X, XZ_Z, fission_y) + ax3.set_aspect("equal") + ax3.set_xlabel(r"$x$") + ax3.set_ylabel(r"$z$") + ax3.set_title("Fission-XZ") + pos = ax3.get_position() + ax3.set_position( + [pos.x0 + 0.02, pos.y0, pos.width, pos.height] + ) # shift right by 0.02 + + # YZ fission + ax4.pcolormesh(YZ_Y, YZ_Z, fission_x) + ax4.set_aspect("equal") + ax4.set_xlabel(r"$y$") + ax4.set_ylabel(r"$z$") + ax4.set_title("Fission-YZ") + + plt.suptitle("MC/DC result - Fission Rate") + plt.savefig(f"fission/figure_{i:03}.png", bbox_inches="tight", pad_inches=0) + plt.close() diff --git a/mcdc/examples/fuel_array_packaged/input.py b/mcdc/examples/fuel_array_packaged/input.py new file mode 100644 index 000000000..c507a0096 --- /dev/null +++ b/mcdc/examples/fuel_array_packaged/input.py @@ -0,0 +1,108 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Materials +# ====================================================================================== + +fuel = mcdc.MaterialMG( + capture=np.array([0.45]), + fission=np.array([0.55]), + nu_p=np.array([2.5]), +) + +cover = mcdc.MaterialMG( + capture=np.array([0.05]), + scatter=np.array([[0.95]]), +) + +water = mcdc.MaterialMG( + capture=np.array([0.02]), + scatter=np.array([[0.08]]), +) + +# ====================================================================================== +# The assembly +# ====================================================================================== + +# Surfaces +cylinder_z = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=1.0) +cylinder_x = mcdc.Surface.CylinderX(center=[0.0, 0.0], radius=1.0) + +top_z = mcdc.Surface.PlaneZ(z=2.5) +bot_z = mcdc.Surface.PlaneZ(z=-2.5) +top_x = mcdc.Surface.PlaneX(x=2.5) +bot_x = mcdc.Surface.PlaneX(x=-2.5) + +sphere = mcdc.Surface.Sphere(center=[0.0, 0.0, 0.0], radius=3.0) + +# Cells +pellet_z = -cylinder_z & +bot_z & -top_z +pellet_x = -cylinder_x & +bot_x & -top_x +shooting_star = pellet_z | pellet_x +fuel_shooting_star = mcdc.Cell(region=shooting_star, fill=fuel) +cover_sphere = mcdc.Cell(region=-sphere & ~shooting_star, fill=cover) +water_tank = mcdc.Cell(region=+sphere, fill=water) + +# ====================================================================================== +# Copy the assembly via universe cells +# ====================================================================================== + +# Set the universe +assembly = mcdc.Universe(cells=[fuel_shooting_star, cover_sphere, water_tank]) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-10.0, boundary_condition="vacuum") +mid_x = mcdc.Surface.PlaneX(x=0.0) +max_x = mcdc.Surface.PlaneX(x=10.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-5.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=5.0, boundary_condition="vacuum") + +# Make copies via universe cells +container_left = +min_y & -max_y & +min_z & -max_z & +min_x & -mid_x +container_right = +min_y & -max_y & +min_z & -max_z & +mid_x & -max_x +assembly_left = mcdc.Cell(region=container_left, fill=assembly, translation=[-5, 0, 0]) +assembly_right = mcdc.Cell( + region=container_right, fill=assembly, translation=[+5, 0, 0], rotation=[0, 10, 0] +) + +# Root universe +mcdc.simulation.set_root_universe(cells=[assembly_left, assembly_right]) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source(x=[-0.1, 0.1], isotropic=True, energy_group=0) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-10, 10, 201), + z=np.linspace(-5, 5, 101), +) +mcdc.Tally(mesh=mesh, scores=["fission"]) + +# Settings +mcdc.settings.N_particle = 1000 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run (or visualize) +visualize = False +if not visualize: + mcdc.run() +else: + colors = { + fuel: "red", + cover: "gray", + water: "blue", + } + mcdc.visualize( + "xz", y=0.0, x=[-11.0, 11.0], z=[-6, 6], pixels=(400, 400), colors=colors + ) diff --git a/mcdc/examples/fuel_array_packaged/process-output.py b/mcdc/examples/fuel_array_packaged/process-output.py new file mode 100644 index 000000000..98b271eab --- /dev/null +++ b/mcdc/examples/fuel_array_packaged/process-output.py @@ -0,0 +1,37 @@ +import matplotlib.pyplot as plt +import h5py, sys +import numpy as np + +# Load result +with h5py.File("output.h5", "r") as f: + x = f["tallies/tracklength_tally_0/grid/x"][:] + z = f["tallies/tracklength_tally_0/grid/z"][:] + dx = [x[1:] - x[:-1]][-1] + x_mid = 0.5 * (x[:-1] + x[1:]) + dz = [z[1:] - z[:-1]][-1] + z_mid = 0.5 * (z[:-1] + z[1:]) + + phi = f["tallies/tracklength_tally_0/fission/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/fission/sdev"][:] + + +# Plot result +fig, ax = plt.subplots(2, 1, figsize=(4, 9)) +X, Y = np.meshgrid(z_mid, x_mid) +Z = phi +flux_plot = ax[0].pcolormesh(Y, X, Z) +ax[0].set_aspect("equal") +ax[0].set_ylabel(r"$x$ [cm]") +ax[0].set_xlabel(r"$z$ [cm]") +fig.colorbar(flux_plot, ax=ax[0], orientation="horizontal") +ax[0].set_title("Flux") +# +Z = phi_sd / phi +sdev_plot = ax[1].pcolormesh(Y, X, Z) +ax[1].set_aspect("equal") +ax[1].set_ylabel(r"$x$ [cm]") +ax[1].set_xlabel(r"$z$ [cm]") +fig.colorbar(sdev_plot, ax=ax[1], orientation="horizontal") +ax[1].set_title("Standard Deviation [%]") + +plt.show() diff --git a/mcdc/examples/kobayashi-TD/input.py b/mcdc/examples/kobayashi-TD/input.py new file mode 100644 index 000000000..05e4808aa --- /dev/null +++ b/mcdc/examples/kobayashi-TD/input.py @@ -0,0 +1,77 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Based on Kobayashi dog-leg benchmark problem +# (PNE 2001, https://doi.org/10.1016/S0149-1970(01)00007-5) + +# Set materials +m = mcdc.MaterialMG(capture=np.array([0.05]), scatter=np.array([[0.05]])) +m_void = mcdc.MaterialMG(capture=np.array([5e-5]), scatter=np.array([[5e-5]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +sx2 = mcdc.Surface.PlaneX(x=10.0) +sx3 = mcdc.Surface.PlaneX(x=30.0) +sx4 = mcdc.Surface.PlaneX(x=40.0) +sx5 = mcdc.Surface.PlaneX(x=60.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +sy2 = mcdc.Surface.PlaneY(y=10.0) +sy3 = mcdc.Surface.PlaneY(y=50.0) +sy4 = mcdc.Surface.PlaneY(y=60.0) +sy5 = mcdc.Surface.PlaneY(y=100.0, boundary_condition="vacuum") +sz1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="reflective") +sz2 = mcdc.Surface.PlaneZ(z=10.0) +sz3 = mcdc.Surface.PlaneZ(z=30.0) +sz4 = mcdc.Surface.PlaneZ(z=40.0) +sz5 = mcdc.Surface.PlaneZ(z=60.0, boundary_condition="vacuum") + +# Set cells +# Source +mcdc.Cell(region=+sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2, fill=m) +# Voids +channel_1 = +sx1 & -sx2 & +sy2 & -sy3 & +sz1 & -sz2 +channel_2 = +sx1 & -sx3 & +sy3 & -sy4 & +sz1 & -sz2 +channel_3 = +sx3 & -sx4 & +sy3 & -sy4 & +sz1 & -sz3 +channel_4 = +sx3 & -sx4 & +sy3 & -sy5 & +sz3 & -sz4 +void_channel = channel_1 | channel_2 | channel_3 | channel_4 +mcdc.Cell(region=void_channel, fill=m_void) +# Shield +box = +sx1 & -sx5 & +sy1 & -sy5 & +sz1 & -sz5 +mcdc.Cell(region=box & ~void_channel, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# The source pulses in t=[0,5] + +mcdc.Source( + x=[0.0, 10.0], + y=[0.0, 10.0], + z=[0.0, 10.0], + isotropic=True, + energy_group=0, + time=[0.0, 50.0], +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +time_grid = np.linspace(0.0, 200.0, 21) +mesh = mcdc.MeshUniform(x=(0.0, 1.0, 60), y=(0.0, 1.0, 100)) +mcdc.Tally(mesh=mesh, scores=["flux"], time=time_grid) +mcdc.Tally(scores=["density"], time=time_grid) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/examples/kobayashi-TD/process-output.py b/mcdc/examples/kobayashi-TD/process-output.py new file mode 100644 index 000000000..10bbc2811 --- /dev/null +++ b/mcdc/examples/kobayashi-TD/process-output.py @@ -0,0 +1,55 @@ +import numpy as np +import matplotlib.pyplot as plt +import h5py +import matplotlib.animation as animation + +# Load result +with h5py.File("output.h5", "r") as f: + x = f["tallies/tracklength_tally_0/grid/x"][:] + x_mid = 0.5 * (x[:-1] + x[1:]) + y = f["tallies/tracklength_tally_0/grid/y"][:] + y_mid = 0.5 * (y[:-1] + y[1:]) + t = f["tallies/tracklength_tally_0/grid/time"][:] + t_mid = 0.5 * (t[:-1] + t[1:]) + X, Y = np.meshgrid(y, x) + + phi = f["tallies/tracklength_tally_0/flux/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/flux/sdev"][:] + + phi_total = f["tallies/tracklength_tally_1/density/mean"][:] + phi_total_sd = f["tallies/tracklength_tally_1/density/sdev"][:] + +# Animate result +fig, ax = plt.subplots(1, 2, figsize=(8, 4), gridspec_kw={"width_ratios": [1.0, 2]}) +# +cax = ax[1].pcolormesh(X, Y, phi[0], vmin=phi[0].min(), vmax=phi[0].max()) +ax[1].set_aspect("equal", "box") +ax[1].set_xlabel("$y$ [cm]") +ax[1].set_ylabel("$x$ [cm]") +# +ax[0].plot(t_mid, phi_total) +ax[0].set_xlabel("$t$ [s]") +ax[0].set_ylabel("Neutron density") +ax[0].set_yscale("log") +ax[0].plot(t_mid, phi_total, "b-") +ax[0].fill_between( + t_mid, phi_total - phi_total_sd, phi_total + phi_total_sd, alpha=0.2, color="b" +) +ax[0].grid() +ax[0].set_box_aspect(1) +(line,) = ax[0].plot([], [], "ok", fillstyle="none") + + +# +def animate(i): + n = np.zeros_like(t_mid) + n[i] = phi_total[i] + line.set_data(t_mid, n) + cax.set_array(phi[i]) + cax.set_clim(phi[i].min(), phi[i].max()) + + +# +K = len(t) - 1 +anim = animation.FuncAnimation(fig, animate, frames=K) +plt.show() diff --git a/mcdc/examples/kobayashi/input.py b/mcdc/examples/kobayashi/input.py new file mode 100644 index 000000000..3f6f11929 --- /dev/null +++ b/mcdc/examples/kobayashi/input.py @@ -0,0 +1,74 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Based on Kobayashi dog-leg benchmark problem +# (PNE 2001, https://doi.org/10.1016/S0149-1970(01)00007-5) + +# Set materials +m = mcdc.MaterialMG(capture=np.array([0.05]), scatter=np.array([[0.05]])) +m_void = mcdc.MaterialMG(capture=np.array([5e-5]), scatter=np.array([[5e-5]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +sx2 = mcdc.Surface.PlaneX(x=10.0) +sx3 = mcdc.Surface.PlaneX(x=30.0) +sx4 = mcdc.Surface.PlaneX(x=40.0) +sx5 = mcdc.Surface.PlaneX(x=60.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +sy2 = mcdc.Surface.PlaneY(y=10.0) +sy3 = mcdc.Surface.PlaneY(y=50.0) +sy4 = mcdc.Surface.PlaneY(y=60.0) +sy5 = mcdc.Surface.PlaneY(y=100.0, boundary_condition="vacuum") +sz1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="reflective") +sz2 = mcdc.Surface.PlaneZ(z=10.0) +sz3 = mcdc.Surface.PlaneZ(z=30.0) +sz4 = mcdc.Surface.PlaneZ(z=40.0) +sz5 = mcdc.Surface.PlaneZ(z=60.0, boundary_condition="vacuum") + +# Set cells +# Source +mcdc.Cell(region=+sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2, fill=m) +# Voids +channel_1 = +sx1 & -sx2 & +sy2 & -sy3 & +sz1 & -sz2 +channel_2 = +sx1 & -sx3 & +sy3 & -sy4 & +sz1 & -sz2 +channel_3 = +sx3 & -sx4 & +sy3 & -sy4 & +sz1 & -sz3 +channel_4 = +sx3 & -sx4 & +sy3 & -sy5 & +sz3 & -sz4 +void_channel = channel_1 | channel_2 | channel_3 | channel_4 +mcdc.Cell(region=void_channel, fill=m_void) +# Shield +box = +sx1 & -sx5 & +sy1 & -sy5 & +sz1 & -sz5 +mcdc.Cell(region=box & ~void_channel, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# The source pulses in t=[0,5] + +mcdc.Source( + x=[0.0, 10.0], + y=[0.0, 10.0], + z=[0.0, 10.0], + isotropic=True, + energy_group=0, +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshUniform(x=(0.0, 1.0, 60), y=(0.0, 1.0, 100), z=(0.0, 1.0, 60)) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 1000 +mcdc.settings.N_batch = 2 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/examples/kobayashi/process-output.py b/mcdc/examples/kobayashi/process-output.py new file mode 100644 index 000000000..e874dce5a --- /dev/null +++ b/mcdc/examples/kobayashi/process-output.py @@ -0,0 +1,48 @@ +import numpy as np +import matplotlib.pyplot as plt +import matplotlib.colors as colors +import h5py + +# Load result and grid +with h5py.File("output.h5", "r") as f: + x = f["tallies/tracklength_tally_0/grid/x"][:] + y = f["tallies/tracklength_tally_0/grid/y"][:] + z = f["tallies/tracklength_tally_0/grid/z"][:] + + phi = f["tallies/tracklength_tally_0/flux/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/flux/sdev"][:] + +# The 2D grid for Z-scan plots +X, Y = np.meshgrid(y, x) + +# Normalization over all Z slices +norm_mean = colors.Normalize(vmin=phi.min(), vmax=phi.max()) +norm_sdev = colors.Normalize(vmin=phi_sd.min(), vmax=phi_sd.max()) + +# Z-scan loop +Nz = len(z) - 1 +for i in range(Nz): + # Get the mean and sdev values for current Z-slice + # The indexing is [x,y,z] + mean = phi[:, :, i] + sdev = phi_sd[:, :, i] + + # Plot mean and sdev side-by-side + fig, ax = plt.subplots(1, 2) + plot_mean = ax[0].pcolormesh(X, Y, mean, norm=norm_mean) + plot_sdev = ax[1].pcolormesh(X, Y, sdev, norm=norm_sdev) + + # Colorbar + fig.colorbar(plot_mean, ax=ax[0]) + fig.colorbar(plot_sdev, ax=ax[1]) + + # Formats + fig.suptitle(f"Flux within Z = [{z[i]}, {z[i+1]}]") + ax[0].set_title("Mean") + ax[0].set_xlabel("$y$ [cm]") + ax[0].set_ylabel("$x$ [cm]") + ax[1].set_title("Std. Dev.") + ax[1].set_xlabel("$y$ [cm]") + ax[1].set_ylabel("$x$ [cm]") + + plt.show() diff --git a/mcdc/examples/moving_pellet/input.py b/mcdc/examples/moving_pellet/input.py new file mode 100644 index 000000000..addcb050e --- /dev/null +++ b/mcdc/examples/moving_pellet/input.py @@ -0,0 +1,95 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== + +# Set materials +fuel = mcdc.MaterialMG( + capture=np.array([0.5]), + fission=np.array([0.25]), + nu_p=np.array([1.5]), + speed=np.array([200000.0]), +) +air = mcdc.MaterialMG( + capture=np.array([0.002]), + scatter=np.array([[0.008]]), + speed=np.array([200000.0]), +) + +# Set surfaces +cylinder_z = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=1.0) +top_z = mcdc.Surface.PlaneZ(z=9.0) +bot_z = mcdc.Surface.PlaneZ(z=6.0) + +# Move surfaces +cylinder_z.move([[-0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [-2.0, 0.0, 0.0]], [2.0, 5.0, 1.0]) +top_z.move([[0.0, 0.0, -2.0], [0.0, 0.0, 4.0], [0.0, 0.0, -10.0]], [5.0, 2.0, 1.0]) +bot_z.move([[0.0, 0.0, -2.0], [0.0, 0.0, 4.0], [0.0, 0.0, -10.0]], [5.0, 2.0, 1.0]) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-5.0, boundary_condition="vacuum") +max_x = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-10.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=10.0, boundary_condition="vacuum") + +# Make cells +fuel_pellet_region = +bot_z & -top_z & -cylinder_z +mcdc.Cell(region=fuel_pellet_region, fill=fuel) +mcdc.Cell( + region=~fuel_pellet_region & +min_x & -max_x & +min_y & -max_y & +min_z & -max_z, + fill=air, +) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + x=[2.0, 3.0], + y=[-0.5, 0.5], + z=[-0.5, 0.5], + isotropic=True, + energy_group=0, + time=[0.0, 9.0], +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-5, 5, 201), + z=np.linspace(-10, 10, 201), +) +mcdc.Tally(mesh=mesh, scores=["fission"], time=np.linspace(0, 9, 46)) + +# Settings +mcdc.settings.N_particle = 100000 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run (or visualize) +visualize = False +if not visualize: + mcdc.run() +else: + colors = { + fuel: "red", + air: "blue", + } + mcdc.visualize( + "xz", + y=0.0, + x=[-5.0, 5.0], + z=[-10, 10], + pixels=(100, 100), + colors=colors, + time=np.linspace(0.0, 9.0, 19), + save_as="figure", + ) diff --git a/mcdc/examples/moving_pellet/process-output.py b/mcdc/examples/moving_pellet/process-output.py new file mode 100644 index 000000000..224e80d72 --- /dev/null +++ b/mcdc/examples/moving_pellet/process-output.py @@ -0,0 +1,39 @@ +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.colors import LogNorm +import h5py +import matplotlib.animation as animation + +# ============================================================================= +# Plot results +# ============================================================================= + +# Results +with h5py.File("output.h5", "r") as f: + x = f["tallies/tracklength_tally_0/grid/x"][:] + x_mid = 0.5 * (x[:-1] + x[1:]) + z = f["tallies/tracklength_tally_0/grid/z"][:] + z_mid = 0.5 * (z[:-1] + z[1:]) + t = f["tallies/tracklength_tally_0/grid/time"][:] + t_mid = 0.5 * (t[:-1] + t[1:]) + X, Y = np.meshgrid(z, x) + + phi = f["tallies/tracklength_tally_0/fission/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/fission/sdev"][:] + +fig, ax = plt.subplots() +cax = ax.pcolormesh(X, Y, phi[0]) +text = ax.text(0.02, 1.02, "", transform=ax.transAxes) +ax.set_aspect("equal", "box") +ax.set_xlabel("$y$ [cm]") +ax.set_ylabel("$x$ [cm]") + + +def animate(i): + cax.set_array(phi[i]) + cax.set_clim(phi[i].min(), phi[i].max()) + text.set_text(r"$t \in [%.1f,%.1f]$ s" % (t[i], t[i + 1])) + + +anim = animation.FuncAnimation(fig, animate, interval=100, frames=len(t) - 1) +plt.show() diff --git a/mcdc/examples/moving_source/input.py b/mcdc/examples/moving_source/input.py new file mode 100644 index 000000000..c11ae3e8e --- /dev/null +++ b/mcdc/examples/moving_source/input.py @@ -0,0 +1,65 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== + +# Set materials +air = mcdc.MaterialMG( + capture=np.array([0.002]), + scatter=np.array([[0.008]]), + speed=np.array([200000.0]), +) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-5.0, boundary_condition="vacuum") +max_x = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-10.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=10.0, boundary_condition="vacuum") + +# Make cells +mcdc.Cell(region=+min_x & -max_x & +min_y & -max_y & +min_z & -max_z, fill=air) + +# ====================================================================================== +# Set source +# ====================================================================================== + +src = mcdc.Source( + x=[-4.0, -3.0], + y=[-0.5, 0.5], + z=[-0.5, 0.5], + direction=[1.0, 1.0, 0.0], + polar_cosine=[-1.0, -0.9], + energy_group=0, + time=[0.0, 10.0], +) +src.move( + velocities=[ + [1.0, 0.0, 0.0], + [-0.5, 2.0, 0.0], + [0.0, -3.0, 0.0], + ], + durations=[7.0, 2.0, 1.0], +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-5.0, 5.0, 201), + y=np.linspace(-5.0, 5.0, 201), +) +mcdc.Tally(mesh=mesh, scores=["flux"], time=np.linspace(0, 10, 46)) + +# Settings +mcdc.settings.N_particle = 100000 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/examples/moving_source/process-output.py b/mcdc/examples/moving_source/process-output.py new file mode 100644 index 000000000..ec12e4381 --- /dev/null +++ b/mcdc/examples/moving_source/process-output.py @@ -0,0 +1,40 @@ +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.colors import LogNorm +import h5py +import matplotlib.animation as animation + +# ============================================================================= +# Plot results +# ============================================================================= + +# Results +with h5py.File("output.h5", "r") as f: + x = f["tallies/tracklength_tally_0/grid/x"][:] + x_mid = 0.5 * (x[:-1] + x[1:]) + y = f["tallies/tracklength_tally_0/grid/y"][:] + y_mid = 0.5 * (y[:-1] + y[1:]) + t = f["tallies/tracklength_tally_0/grid/time"][:] + t_mid = 0.5 * (t[:-1] + t[1:]) + X, Y = np.meshgrid(y, x) + + phi = f["tallies/tracklength_tally_0/flux/mean"][:] + phi_sd = f["tallies/tracklength_tally_0/flux/sdev"][:] + +fig, ax = plt.subplots() +cax = ax.pcolormesh(X, Y, phi[0]) +text = ax.text(0.02, 1.02, "", transform=ax.transAxes) +ax.set_aspect("equal", "box") +ax.set_xlabel("$y$ [cm]") +ax.set_ylabel("$x$ [cm]") + + +def animate(i): + cax.set_array(phi[i]) + cax.set_clim(phi[i].min(), phi[i].max()) + text.set_text(r"$t \in [%.1f,%.1f]$ s" % (t[i], t[i + 1])) + + +anim = animation.FuncAnimation(fig, animate, interval=100, frames=len(t) - 1) +anim.save("moving_source.gif") +plt.show() diff --git a/mcdc/examples/need_update/c5g7/3d/k-eigenvalue/input.py b/mcdc/examples/need_update/c5g7/3d/k-eigenvalue/input.py new file mode 100644 index 000000000..88ce2e27c --- /dev/null +++ b/mcdc/examples/need_update/c5g7/3d/k-eigenvalue/input.py @@ -0,0 +1,305 @@ +import h5py +import numpy as np + +import mcdc + +# ============================================================================= +# Materials +# ============================================================================= + +# Load material data +lib = h5py.File("../../MGXS-C5G7-TD.h5", "r") + + +# Setter +def set_mat(mat): + return mcdc.material( + capture=mat["capture"][:], + scatter=mat["scatter"][:], + fission=mat["fission"][:], + nu_p=mat["nu_p"][:], + nu_d=mat["nu_d"][:], + chi_p=mat["chi_p"][:], + chi_d=mat["chi_d"][:], + speed=mat["speed"], + decay=mat["decay"], + ) + + +# Set the material +mat_uo2 = set_mat(lib["uo2"]) # Fuel: UO2 +mat_mox43 = set_mat(lib["mox43"]) # Fuel: MOX 4.3% +mat_mox7 = set_mat(lib["mox7"]) # Fuel: MOX 7.0% +mat_mox87 = set_mat(lib["mox87"]) # Fuel: MOX 8.7% +mat_gt = set_mat(lib["gt"]) # Guide tube +mat_fc = set_mat(lib["fc"]) # Fission chamber +mat_cr = set_mat(lib["cr"]) # Control rod +mat_mod = set_mat(lib["mod"]) # Moderator + +# ============================================================================= +# Pin cells +# ============================================================================= + +pitch = 1.26 +radius = 0.54 +core_height = 128.52 +refl_thick = 21.42 + +# Control rod banks fractions +# All out: 0.0 +# All in : 1.0 +cr1 = 0.0 +cr2 = 0.0 +cr3 = 0.0 +cr4 = 0.0 +# Control rod banks interfaces +cr1 = core_height * (0.5 - cr1) +cr2 = core_height * (0.5 - cr2) +cr3 = core_height * (0.5 - cr3) +cr4 = core_height * (0.5 - cr4) + +# Surfaces +cy = mcdc.surface("cylinder-z", center=[0.0, 0.0], radius=radius) +z1 = mcdc.surface("plane-z", z=cr1) # Control rod banks interfaces +z2 = mcdc.surface("plane-z", z=cr2) +z3 = mcdc.surface("plane-z", z=cr3) +z4 = mcdc.surface("plane-z", z=cr4) +zf = mcdc.surface("plane-z", z=core_height / 2) + +# Fission chamber +fc = mcdc.cell(-cy, mat_fc) +mod = mcdc.cell(+cy, mat_mod) +fission_chamber = mcdc.universe([fc, mod]) + +# Fuel rods +uo2 = mcdc.cell(-cy & -zf, mat_uo2) +mox4 = mcdc.cell(-cy & -zf, mat_mox43) +mox7 = mcdc.cell(-cy & -zf, mat_mox7) +mox8 = mcdc.cell(-cy & -zf, mat_mox87) +moda = mcdc.cell(-cy & +zf, mat_mod) # Water above pin +fuel_uo2 = mcdc.universe([uo2, mod, moda]) +fuel_mox43 = mcdc.universe([mox4, mod, moda]) +fuel_mox7 = mcdc.universe([mox7, mod, moda]) +fuel_mox87 = mcdc.universe([mox8, mod, moda]) + +# Control rods and guide tubes +cr1 = mcdc.cell(-cy & +z1, mat_cr) +cr2 = mcdc.cell(-cy & +z2, mat_cr) +cr3 = mcdc.cell(-cy & +z3, mat_cr) +cr4 = mcdc.cell(-cy & +z4, mat_cr) +gt1 = mcdc.cell(-cy & -z1, mat_gt) +gt2 = mcdc.cell(-cy & -z2, mat_gt) +gt3 = mcdc.cell(-cy & -z3, mat_gt) +gt4 = mcdc.cell(-cy & -z4, mat_gt) +control_rod1 = mcdc.universe([cr1, gt1, mod]) +control_rod2 = mcdc.universe([cr2, gt2, mod]) +control_rod3 = mcdc.universe([cr3, gt3, mod]) +control_rod4 = mcdc.universe([cr4, gt4, mod]) + +# ============================================================================= +# Fuel lattices +# ============================================================================= + +# UO2 lattice 1 +u = fuel_uo2 +c = control_rod1 +f = fission_chamber +lattice_1 = mcdc.lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# MOX lattice 2 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod2 +f = fission_chamber +lattice_2 = mcdc.lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# MOX lattice 3 +l = fuel_mox43 +m = fuel_mox7 +n = fuel_mox87 +c = control_rod3 +f = fission_chamber +lattice_3 = mcdc.lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, f, n, n, c, n, n, c, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, c, n, n, c, n, n, c, n, n, c, n, n, c, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, c, m, n, n, n, n, n, n, n, m, c, m, m, l], + [l, m, m, m, m, c, m, m, c, m, m, c, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +# UO2 lattice 4 +u = fuel_uo2 +c = control_rod4 +f = fission_chamber +lattice_4 = mcdc.lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, f, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, c, u, u, c, u, u, c, u, u, c, u, u, c, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, c, u, u, u, u, u, u, u, u, u, c, u, u, u], + [u, u, u, u, u, c, u, u, c, u, u, c, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +# ============================================================================= +# Assemblies and core +# ============================================================================= + +# Surfaces +x0 = mcdc.surface("plane-x", x=0.0, bc="reflective") +x1 = mcdc.surface("plane-x", x=pitch * 17) +x2 = mcdc.surface("plane-x", x=pitch * 17 * 2) +x3 = mcdc.surface("plane-x", x=pitch * 17 * 3, bc="vacuum") + +y0 = mcdc.surface("plane-y", y=-pitch * 17 * 3, bc="vacuum") +y1 = mcdc.surface("plane-y", y=-pitch * 17 * 2) +y2 = mcdc.surface("plane-y", y=-pitch * 17) +y3 = mcdc.surface("plane-y", y=0.0, bc="reflective") + +z0 = mcdc.surface("plane-z", z=-(core_height / 2 + refl_thick), bc="vacuum") +z1 = mcdc.surface("plane-z", z=-(core_height / 2)) +z2 = mcdc.surface("plane-z", z=(core_height / 2 + refl_thick), bc="vacuum") + +# Assembly cells +center = np.array([pitch * 17 / 2, -pitch * 17 / 2, 0.0]) +assembly_1 = mcdc.cell(+x0 & -x1 & +y2 & -y3 & +z1 & -z2, lattice_1, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_2 = mcdc.cell(+x1 & -x2 & +y2 & -y3 & +z1 & -z2, lattice_2, translation=center) + +center += np.array([-pitch * 17, -pitch * 17, 0.0]) +assembly_3 = mcdc.cell(+x0 & -x1 & +y1 & -y2 & +z1 & -z2, lattice_3, translation=center) + +center += np.array([pitch * 17, 0.0, 0.0]) +assembly_4 = mcdc.cell(+x1 & -x2 & +y1 & -y2 & +z1 & -z2, lattice_4, translation=center) + +# Bottom reflector cell +reflector_bottom = mcdc.cell(+x0 & -x3 & +y0 & -y3 & +z0 & -z1, mat_mod) + +# Side reflectors +reflector_south = mcdc.cell(+x0 & -x3 & +y0 & -y1 & +z1 & -z2, mat_mod) +reflector_east = mcdc.cell(+x2 & -x3 & +y1 & -y3 & +z1 & -z2, mat_mod) + +# Root universe +mcdc.universe( + [ + assembly_1, + assembly_2, + assembly_3, + assembly_4, + reflector_bottom, + reflector_south, + reflector_east, + ], + root=True, +) + +# ============================================================================= +# Set source +# ============================================================================= +# Uniform in energy + +source = mcdc.source( + x=[0.0, pitch * 17 * 2], + y=[-pitch * 17 * 2, 0.0], + z=[-core_height / 2, core_height / 2], + energy=np.ones(7), +) + +# ============================================================================= +# Set tally and parameter, and then run mcdc +# ============================================================================= + +# Tally +x_grid = np.linspace(0.0, pitch * 17 * 3, 17 * 3 + 1) +y_grid = np.linspace(-pitch * 17 * 3, 0.0, 17 * 3 + 1) +z_grid = np.linspace( + -(core_height / 2 + refl_thick), (core_height / 2 + refl_thick), 102 + 17 * 2 + 1 +) +g_grid = np.array([-0.5, 3.5, 6.5]) # Collapsing to fast (1-4) and slow (5-7) + +mcdc.tally.mesh_tally(scores=["flux"], x=x_grid, y=y_grid, z=z_grid, g=g_grid) + +# Setting +mcdc.setting(N_particle=2e4, census_bank_buff=4) + +mcdc.eigenmode(N_inactive=5, N_active=15, gyration_radius="all") +mcdc.population_control() + +# Run +mcdc.run() diff --git a/mcdc/examples/need_update/fixed_source/azurv1_pl_super/input.py b/mcdc/examples/need_update/fixed_source/azurv1_pl_super/input.py new file mode 100644 index 000000000..1b92ec1f6 --- /dev/null +++ b/mcdc/examples/need_update/fixed_source/azurv1_pl_super/input.py @@ -0,0 +1,49 @@ +import numpy as np + +import mcdc + +# ============================================================================= +# Set model +# ============================================================================= +# Infinite medium with isotropic plane surface at the center +# Based on Ganapol LA-UR-01-1854 (AZURV1 benchmark) +# Effective scattering ratio c = 1.1 + +# Set materials +m = mcdc.material( + capture=np.array([1.0 / 3.0]), + scatter=np.array([[1.0 / 3.0]]), + fission=np.array([1.0 / 3.0]), + nu_p=np.array([2.3]), +) + +# Set surfaces +s1 = mcdc.surface("plane-x", x=-1e10, bc="reflective") +s2 = mcdc.surface("plane-x", x=1e10, bc="reflective") + +# Set cells +mcdc.cell(+s1 & -s2, m) + +# ============================================================================= +# Set source +# ============================================================================= +# Isotropic pulse at x=t=0 + +mcdc.source(point=[0.0, 0.0, 0.0], isotropic=True, time=[1e-10, 1e-10]) + +# ============================================================================= +# Set tally, setting, and run mcdc +# ============================================================================= + +# Tally: cell-average, cell-edge, and time-edge scalar fluxes +mcdc.tally.mesh_tally( + scores=["flux", "time-moment-flux"], + x=np.linspace(-20.5, 20.5, 202), + t=np.linspace(0.0, 20.0, 21), +) + +# Setting +mcdc.setting(N_particle=1e5) + +# Run +mcdc.run() diff --git a/mcdc/examples/need_update/fixed_source/kobayashi3-TD/input.py b/mcdc/examples/need_update/fixed_source/kobayashi3-TD/input.py new file mode 100644 index 000000000..4ea49e35c --- /dev/null +++ b/mcdc/examples/need_update/fixed_source/kobayashi3-TD/input.py @@ -0,0 +1,85 @@ +import numpy as np +import mcdc + +# ============================================================================= +# Set model +# ============================================================================= +# Based on Kobayashi dog-leg benchmark problem +# (PNE 2001, https://doi.org/10.1016/S0149-1970(01)00007-5) + +# Set materials +m = mcdc.material(capture=np.array([0.05]), scatter=np.array([[0.05]])) +m_void = mcdc.material(capture=np.array([5e-5]), scatter=np.array([[5e-5]])) + +# Set surfaces +sx1 = mcdc.surface("plane-x", x=0.0, bc="reflective") +sx2 = mcdc.surface("plane-x", x=10.0) +sx3 = mcdc.surface("plane-x", x=30.0) +sx4 = mcdc.surface("plane-x", x=40.0) +sx5 = mcdc.surface("plane-x", x=60.0, bc="vacuum") +sy1 = mcdc.surface("plane-y", y=0.0, bc="reflective") +sy2 = mcdc.surface("plane-y", y=10.0) +sy3 = mcdc.surface("plane-y", y=50.0) +sy4 = mcdc.surface("plane-y", y=60.0) +sy5 = mcdc.surface("plane-y", y=100.0, bc="vacuum") +sz1 = mcdc.surface("plane-z", z=0.0, bc="reflective") +sz2 = mcdc.surface("plane-z", z=10.0) +sz3 = mcdc.surface("plane-z", z=30.0) +sz4 = mcdc.surface("plane-z", z=40.0) +sz5 = mcdc.surface("plane-z", z=60.0, bc="vacuum") + +# Set cells +# Source +source_cell = mcdc.cell(+sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2, m) +# Voids +channel_1 = +sx1 & -sx2 & +sy2 & -sy3 & +sz1 & -sz2 +channel_2 = +sx1 & -sx3 & +sy3 & -sy4 & +sz1 & -sz2 +channel_3 = +sx3 & -sx4 & +sy3 & -sy4 & +sz1 & -sz3 +channel_4 = +sx3 & -sx4 & +sy3 & -sy5 & +sz3 & -sz4 +void_channel = channel_1 | channel_2 | channel_3 | channel_4 +void_cell = mcdc.cell(void_channel, m_void) +# Shield +box = +sx1 & -sx5 & +sy1 & -sy5 & +sz1 & -sz5 +shield_cell = mcdc.cell(box & ~void_channel, m) + +# ============================================================================= +# Set source +# ============================================================================= +# The source pulses in t=[0,5] + +mcdc.source( + x=[0.0, 10.0], y=[0.0, 10.0], z=[0.0, 10.0], time=[0.0, 50.0], isotropic=True +) + +# ============================================================================= +# Set tally, setting, and run mcdc +# ============================================================================= + +# Tally: z-integrated flux (X-Y section view) +mcdc.tally.mesh_tally( + scores=["flux"], + x=np.linspace(0.0, 60.0, 31), + y=np.linspace(0.0, 100.0, 51), + t=np.linspace(0.0, 200.0, 21), + # g=np.array([-0.5, 3.5, 6.5]) # fast (0, 1, 2, 3) and thermal (4, 5, 6) groups +) + +mcdc.tally.cell_tally(source_cell, scores=["flux"]) +mcdc.tally.cell_tally(void_cell, scores=["flux"]) +mcdc.tally.cell_tally(shield_cell, scores=["flux"]) + + +mcdc.tally.cs_tally( + N_cs_bins=[150], + cs_bin_size=[8.0, 8.0], + x=np.linspace(0.0, 60.0, 31), + y=np.linspace(0.0, 100.0, 51), + scores=["flux"], +) + + +# Setting +mcdc.setting(N_particle=1e5) + +# Run +mcdc.run() diff --git a/mcdc/examples/sphere_in_cube/input.py b/mcdc/examples/sphere_in_cube/input.py new file mode 100644 index 000000000..6b9b4da2e --- /dev/null +++ b/mcdc/examples/sphere_in_cube/input.py @@ -0,0 +1,56 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Homogeneous pure-fission sphere inside a pure-scattering cube + +# Set materials +pure_f = mcdc.MaterialMG(fission=np.array([1.0]), nu_p=np.array([1.2])) +pure_s = mcdc.MaterialMG(scatter=np.array([[1.0]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="vacuum") +sx2 = mcdc.Surface.PlaneX(x=4.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="vacuum") +sy2 = mcdc.Surface.PlaneY(y=4.0, boundary_condition="vacuum") +sz1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") +sz2 = mcdc.Surface.PlaneZ(z=4.0, boundary_condition="vacuum") +sphere = mcdc.Surface.Sphere(center=[2.0, 2.0, 2.0], radius=1.5) +inside_sphere = -sphere +inside_box = +sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2 + +# Set cells +mcdc.Cell(name="Box cover", region=inside_box & ~inside_sphere, fill=pure_s) +sphere_cell = mcdc.Cell(name="The sphere", region=inside_sphere, fill=pure_f) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + x=[0.0, 4.0], + y=[0.0, 4.0], + z=[0.0, 4.0], + isotropic=True, + energy_group=0, + time=[0.0, 50.0], +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mcdc.Tally(name="Spherical fission detector", cell=sphere_cell, scores=["fission"]) + +# Settings +mcdc.settings.N_particle = 1000 +mcdc.settings.N_batch = 2 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/examples/sphere_in_cube/process-output.py b/mcdc/examples/sphere_in_cube/process-output.py new file mode 100644 index 000000000..6df13fa48 --- /dev/null +++ b/mcdc/examples/sphere_in_cube/process-output.py @@ -0,0 +1,9 @@ +import h5py + +tally_name = "Spherical fission detector" + +with h5py.File("output.h5", "r") as file: + mean = file[f"tallies/{tally_name}/fission/mean"][()] + sdev = file[f"tallies/{tally_name}/fission/sdev"][()] + +print(f"\n{tally_name}: {mean} +/- {sdev}\n") diff --git a/mcdc/h5_lib/Pu239-293.6K.h5 b/mcdc/h5_lib/Pu239-293.6K.h5 new file mode 100644 index 000000000..3e3dff878 Binary files /dev/null and b/mcdc/h5_lib/Pu239-293.6K.h5 differ diff --git a/mcdc/h5_lib/U235-293.6K.h5 b/mcdc/h5_lib/U235-293.6K.h5 new file mode 100644 index 000000000..931497506 Binary files /dev/null and b/mcdc/h5_lib/U235-293.6K.h5 differ diff --git a/mcdc/install.sh b/mcdc/install.sh new file mode 100755 index 000000000..23ae2f8ed --- /dev/null +++ b/mcdc/install.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +# Check python version +if ! { python3 -c 'import sys; assert sys.version_info < (3,12)' > /dev/null 2>&1 && python3 -c 'import sys; assert sys.version_info >= (3,9)' > /dev/null 2>&1; }; then + v=$(python3 --version) + p=$(which python) + echo "ERROR: Python version must be < 3.12 and >= 3.9." + echo " Found $v at $p." + echo "ERROR: Installation failed." + exit 1 +fi + +# Install or build mpi4py +if [ $# -eq 0 ]; then + conda install mpi4py <<< "y" +fi +while [ $# -gt 0 ]; do + case $1 in + --hpc) + # Rename legacy compiler option in conda + s=$(which python) + s=${s//bin\/python/compiler_compat} + + if [ ! -f $s/ld.bak ] && [ -f $s/ld ]; then + mv $s/ld $s/ld.bak + fi + + mkdir installs; cd installs + wget https://github.com/mpi4py/mpi4py/releases/download/3.1.4/mpi4py-3.1.4.tar.gz -q + tar -zxf mpi4py-3.1.4.tar.gz + cd mpi4py-3.1.4 + python setup.py install + cd ../../ + rm -rf installs/ + ;; + + esac + shift +done + +# Install MC/DC module (and the dependencies) +pip install -e . + +# Install pre-commit hook +pre-commit install diff --git a/mcdc/mcdc/__init__.py b/mcdc/mcdc/__init__.py new file mode 100644 index 000000000..c159b5521 --- /dev/null +++ b/mcdc/mcdc/__init__.py @@ -0,0 +1,31 @@ +# ====================================================================================== +# Simulation building blocks +# ====================================================================================== + +# The simulation +from mcdc.object_.simulation import simulation + +# The settings +settings = simulation.settings + +# The objects +from mcdc.object_.cell import Cell, Universe, Lattice +from mcdc.object_.material import Material, MaterialMG +from mcdc.object_.mesh import MeshUniform, MeshStructured +from mcdc.object_.source import Source +from mcdc.object_.surface import Surface +from mcdc.object_.tally import Tally + +# ====================================================================================== +# Runners +# ====================================================================================== + +from mcdc.main import run +from mcdc.visualize import visualize + +# ====================================================================================== +# Misc. +# ====================================================================================== + +import mcdc.config +from mcdc.output import recombine_tallies diff --git a/mcdc/mcdc/code_factory/gpu/program_builder.py b/mcdc/mcdc/code_factory/gpu/program_builder.py new file mode 100644 index 000000000..961704044 --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/program_builder.py @@ -0,0 +1,386 @@ +import numba as nb +import numba.extending as nbxt +import numpy as np + +from mpi4py import MPI + +#### + +import mcdc.config as config + +# ====================================================================================== +# Transport function adapter +# ====================================================================================== + + +def adapt_transport_functions(): + global access_simulation + + import mcdc.code_factory.gpu.transport as gpu_transport + import mcdc.transport as transport + + transport.util.access_simulation = access_simulation + + # TODO: Make the following automatic + transport.geometry.interface.report_lost_particle = ( + gpu_transport.geometry.interface.report_lost_particle + ) + transport.particle_bank.bank_active_particle = ( + gpu_transport.particle_bank.bank_active_particle + ) + transport.particle_bank.report_full_bank = ( + gpu_transport.particle_bank.report_full_bank + ) + transport.particle_bank.report_empty_bank = ( + gpu_transport.particle_bank.report_empty_bank + ) + transport.util.atomic_add = gpu_transport.util.atomic_add + transport.util.local_array = gpu_transport.util.local_array + + +def adapt_transport_functions_post_setup(): + import mcdc.code_factory.gpu.transport as gpu_transport + import mcdc.transport as transport + + transport.simulation.source_loop = gpu_transport.simulation.source_loop + + +# ====================================================================================== +# Forward declaration +# ====================================================================================== + +# Main types +none_type = None +simulation_type = None +data_type = None + +# Access functions +state_spec = None +access_simulation = None +access_data_ptr = None +access_group = None +access_thread = None +particle_gpu = None +particle_record_gpu = None + +# Asynchronous transport kernels +step_async = None +find_cell_async = None + +# Memory allocations +alloc_managed_bytes = None +alloc_device_bytes = None + + +def forward_declare_gpu_program(): + import harmonize + import mcdc.numba_types as type_ + + # Get to set the globals + global none_type, simulation_type, data_type + global state_spec, access_simulation, access_data_ptr, access_group, access_thread, particle_gpu, particle_record_gpu + global step_async, find_cell_async + global alloc_managed_bytes, alloc_device_bytes + + # Compilation check + if MPI.COMM_WORLD.Get_rank() == 0: + if config.caching == False: + harmonize.config.should_compile(harmonize.config.ShouldCompile.ALWAYS) + else: + harmonize.config.should_compile(harmonize.config.ShouldCompile.NEVER) + + # ROCm and CUDA paths + if config.args.gpu_cuda_path != None: + harmonize.config.set_cuda_path(config.args.gpu_cuda_path) + if config.args.gpu_rocm_path != None: + harmonize.config.set_rocm_path(config.args.gpu_rocm_path) + + # Main types: none, simulation structure, and simulation data + none_type = nb.from_dtype(np.dtype([])) + simulation_type = nb.types.Array(nb.from_dtype(type_.simulation), (1,), "C") + data_type = nb.types.Array(nb.float64, 1, "C") + + # Set access functions + state_spec = ( + { + "simulation": simulation_type, + "data": data_type, + }, + none_type, + none_type, + ) + access_fns = harmonize.RuntimeSpec.access_fns(state_spec) + access_simulation = access_fns["device"]["simulation"]["indirect"] + access_data_ptr = access_fns["device"]["data"]["direct"] + access_group = access_fns["group"] + access_thread = access_fns["thread"] + particle_gpu = nb.from_dtype(type_.particle) + particle_record_gpu = nb.from_dtype(type_.particle_data) + + # Functions, and their signatures + def step(program: nb.uintp, particle: particle_gpu): + pass + + def find_cell(program: nb.uintp, particle: particle_gpu): + pass + + # Asynchronous versions + step_async, find_cell_async = harmonize.RuntimeSpec.async_dispatch(step, find_cell) + + # Program interfaces + interface = harmonize.RuntimeSpec.program_interface() + halt_early = interface["halt_early"] + + # Byte allocators + alloc_managed_bytes = harmonize.alloc_managed_bytes + alloc_device_bytes = harmonize.alloc_device_bytes + + +# ====================================================================================== +# Program builder +# ====================================================================================== + +alloc_state = None +free_state = None + +alloc_program = None +free_program = None + +load_state_device_simulation = None +store_state_device_simulation = None +store_pointer_state_device_simulation = None + +load_state_device_data = None +store_state_device_data = None +store_pointer_state_device_data = None + +init_program = None +exec_program = None +complete = None +clear_flags = None +set_device = None + +ARENA_SIZE = 0 +BLOCK_COUNT = 0 + + +def build_gpu_program(data_size): + import harmonize + import mcdc.numba_types as type_ + import mcdc.transport.util as util + + from mcdc.transport.simulation import generate_source_particle, step_particle + + global alloc_state, free_state + + global alloc_program, free_program + + global load_state_device_simulation, store_state_device_simulation, store_pointer_state_device_simulation + + global load_state_device_data, store_state_device_data, store_pointer_state_device_data + + global init_program, exec_program, complete, clear_flags, set_device + global ARENA_SIZE, BLOCK_COUNT + + shape = eval(f"{(data_size,)}") + + # ============== + # Base functions + # ============== + + def make_work(program: nb.uintp) -> nb.boolean: + simulation = access_simulation(program) + data_ptr = access_data_ptr(program) + data = harmonize.array_from_ptr(data_ptr, shape, nb.float64) + + util.atomic_add(simulation["mpi_work_iter"], 0, 1) + idx_work = simulation["mpi_work_iter"][0] + + if idx_work >= simulation["mpi_work_size"]: + return False + + work_start = simulation["mpi_work_start"] + + generate_source_particle( + simulation["mpi_work_start"], + nb.uint64(idx_work), + simulation["source_seed"], + program, + data, + ) + return True + + def initialize(program: nb.uintp): + pass + + def finalize(program: nb.uintp): + pass + + # ================ + # Async. functions + # ================ + + def step(program: nb.uintp, particle_input: particle_gpu): + simulation = access_simulation(program) + data_ptr = access_data_ptr(program) + data = harmonize.array_from_ptr(data_ptr, shape, nb.float64) + + particle_container = util.local_array(1, type_.particle) + particle_container[0] = particle_input + particle = particle_container[0] + particle["fresh"] = False + step_particle(particle_container, program, data) + if particle["alive"]: + step_async(program, particle) + + # Bind them all + base_fns = (initialize, finalize, make_work) + async_fns = [step] + src_spec = harmonize.RuntimeSpec("mcdc_source", state_spec, base_fns, async_fns) + harmonize.RuntimeSpec.bind_specs() + + # Load the specs + harmonize.RuntimeSpec.load_specs() + + if config.args.gpu_strategy == "async": + config.args.gpu_arena_size = config.args.gpu_arena_size // 32 + src_fns = src_spec.async_functions() + else: + src_fns = src_spec.event_functions() + + ARENA_SIZE = config.args.gpu_arena_size + BLOCK_COUNT = config.args.gpu_block_count + + alloc_state = src_fns["alloc_state"] + free_state = src_fns["free_state"] + + alloc_program = src_fns["alloc_program"] + free_program = src_fns["free_program"] + + load_state_device_simulation = src_fns["load_state_device_simulation"] + store_state_device_simulation = src_fns["store_state_device_simulation"] + store_pointer_state_device_simulation = src_fns[ + "store_pointer_state_device_simulation" + ] + + load_state_device_data = src_fns["load_state_device_data"] + store_state_device_data = src_fns["store_state_device_data"] + store_pointer_state_device_data = src_fns["store_pointer_state_device_data"] + + init_program = src_fns["init_program"] + exec_program = src_fns["exec_program"] + complete = src_fns["complete"] + clear_flags = src_fns["clear_flags"] + set_device = src_fns["set_device"] + + +# ====================================================================================== +# Setup GPU +# ====================================================================================== + +from numba import njit + +rank = MPI.COMM_WORLD.Get_rank() +device_id = rank % config.args.gpu_share_stride + + +@njit +def setup_gpu_program(simulation_container, data): + simulation = simulation_container[0] + + set_device(device_id) + simulation["gpu_meta"]["state_pointer"] = cast_voidptr_to_uintp(alloc_state()) + + if config.gpu_state_storage == "separate": + store_pointer_state_device_simulation( + simulation["gpu_meta"]["state_pointer"], + simulation["gpu_meta"]["simulation_pointer"], + ) + store_pointer_state_device_data( + simulation["gpu_meta"]["state_pointer"], + simulation["gpu_meta"]["data_pointer"], + ) + else: + store_pointer_state_device_simulation( + simulation["gpu_meta"]["state_pointer"], simulation_container + ) + store_pointer_state_device_data(simulation["gpu_meta"]["state_pointer"], data) + + simulation["gpu_meta"]["program_pointer"] = cast_voidptr_to_uintp( + alloc_program(simulation["gpu_meta"]["state_pointer"], ARENA_SIZE) + ) + init_program(simulation["gpu_meta"]["program_pointer"], BLOCK_COUNT) + + +@njit +def teardown_gpu_program(simulation): + free_program(cast_uintp_to_voidptr(simulation["gpu_meta"]["program_pointer"])) + free_state(cast_uintp_to_voidptr(simulation["gpu_meta"]["state_pointer"])) + + +# ====================================================================================== +# Simulation structure and data creators +# ====================================================================================== + + +def create_data_array(size, dtype): + if config.gpu_state_storage == "managed": + data_tally_ptr = harmonize.alloc_managed_bytes(size) + else: + data_tally_ptr = harmonize.alloc_device_bytes(size) + data_tally_uint = cast_voidptr_to_uintp(data_tally_ptr) + data_tally = nb.carray(data_tally_ptr, (size,), dtype) + return data_tally, data_tally_uint + + +def create_mcdc_container(dtype): + if config.gpu_state_storage == "managed": + mcdc_ptr = harmonize.alloc_managed_bytes(dtype.itemsize) + else: + mcdc_ptr = harmonize.alloc_device_bytes(dtype.itemsize) + mcdc_uint = cast_voidptr_to_uintp(mcdc_ptr) + mcdc_container = nb.carray(mcdc_ptr, (1,), dtype) + return mcdc_container, mcdc_uint + + +# ====================================================================================== +# Type casters +# ====================================================================================== + + +@nbxt.intrinsic +def cast_uintp_to_voidptr(typingctx, src): + # check for accepted types + if isinstance(src, nb.types.Integer): + # create the expected type signature + result_type = nb.types.voidptr + sig = result_type(nb.types.uintp) + + # defines the custom code generation + def codegen(context, builder, signature, args): + # llvm IRBuilder code here + [src] = args + rtype = signature.return_type + llrtype = context.get_value_type(rtype) + return builder.inttoptr(src, llrtype) + + return sig, codegen + + +@nbxt.intrinsic +def cast_voidptr_to_uintp(typingctx, src): + # check for accepted types + if isinstance(src, nb.types.RawPointer): + # create the expected type signature + result_type = nb.types.uintp + sig = result_type(nb.types.voidptr) + + # defines the custom code generation + def codegen(context, builder, signature, args): + # llvm IRBuilder code here + [src] = args + rtype = signature.return_type + llrtype = context.get_value_type(rtype) + return builder.ptrtoint(src, llrtype) + + return sig, codegen diff --git a/mcdc/mcdc/code_factory/gpu/transport/__init__.py b/mcdc/mcdc/code_factory/gpu/transport/__init__.py new file mode 100644 index 000000000..6dfd8b41b --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/__init__.py @@ -0,0 +1,4 @@ +import mcdc.code_factory.gpu.transport.geometry as geometry +import mcdc.code_factory.gpu.transport.particle_bank as particle_bank +import mcdc.code_factory.gpu.transport.simulation as simulation +import mcdc.code_factory.gpu.transport.util as util diff --git a/mcdc/mcdc/code_factory/gpu/transport/geometry/__init__.py b/mcdc/mcdc/code_factory/gpu/transport/geometry/__init__.py new file mode 100644 index 000000000..e10e84a54 --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/geometry/__init__.py @@ -0,0 +1 @@ +import mcdc.code_factory.gpu.transport.geometry.interface as interface diff --git a/mcdc/mcdc/code_factory/gpu/transport/geometry/interface.py b/mcdc/mcdc/code_factory/gpu/transport/geometry/interface.py new file mode 100644 index 000000000..e460f80e1 --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/geometry/interface.py @@ -0,0 +1,11 @@ +from numba import njit + +# ====================================================================================== +# Geometry inspection +# ====================================================================================== + + +@njit +def report_lost_particle(particle_container, simulation): + particle = particle_container[0] + particle["alive"] = False diff --git a/mcdc/mcdc/code_factory/gpu/transport/particle_bank.py b/mcdc/mcdc/code_factory/gpu/transport/particle_bank.py new file mode 100644 index 000000000..aee6c20bc --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/particle_bank.py @@ -0,0 +1,38 @@ +from numba import njit + +### + +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.util as util +import mcdc.code_factory.gpu.program_builder as gpu_program + +from mcdc.constant import GPU_ASYNC_SIMPLE + +# ============================================================================= +# Bank and pop particle +# ============================================================================= + + +@njit +def bank_active_particle(particle_container, program): + simulation = util.access_simulation(program) + + active_particle_container = util.local_array(1, type_.particle) + particle_module.copy(active_particle_container, particle_container) + if simulation["settings"]["gpu_async_type"] == GPU_ASYNC_SIMPLE: + gpu_program.step_async(program, active_particle_container[0]) + """ + else: + gpu_program.find_cell_async(program, active_particle_container[0]) + """ + + +@njit +def report_full_bank(bank): + pass + + +@njit +def report_empty_bank(bank): + pass diff --git a/mcdc/mcdc/code_factory/gpu/transport/simulation.py b/mcdc/mcdc/code_factory/gpu/transport/simulation.py new file mode 100644 index 000000000..591ec618a --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/simulation.py @@ -0,0 +1,85 @@ +import harmonize + +from numba import njit + +### + +import mcdc.code_factory.gpu.program_builder as gpu_module +import mcdc.config as config +import mcdc.transport.particle_bank as particle_bank_module + +from mcdc.constant import GPU_STORAGE_SEPARATE, GPU_STRATEGY_ASYNC +from mcdc.transport.simulation import source_closeout + +caching = config.caching + + +@njit(cache=caching) +def source_loop(seed, simulation, data): + # For async execution + iter_count = 655360000 + # For event-based execution + batch_size = 64 + + settings = simulation["settings"] + + full_work_size = simulation["mpi_work_size"] + + if settings["gpu_strategy"] == GPU_STRATEGY_ASYNC: + phase_size = 1000000000 + else: + phase_size = 1000000 + phase_count = (full_work_size + phase_size - 1) // phase_size + + for phase in range(phase_count): + + simulation["mpi_work_iter"][0] = phase_size * phase + simulation["mpi_work_size"] = min(phase_size * (phase + 1), full_work_size) + simulation["source_seed"] = seed + + # Store the global state to the GPU + if settings["gpu_storage"] == GPU_STORAGE_SEPARATE: + harmonize.memcpy_host_to_device( + simulation["gpu_meta"]["state_pointer"], simulation + ) + harmonize.memcpy_host_to_device( + simulation["gpu_meta"]["state_pointer"], data + ) + + # Execute the program, and continue to do so until it is done + block_count = gpu_module.BLOCK_COUNT + + if settings["gpu_strategy"] == GPU_STRATEGY_ASYNC: + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, iter_count + ) + while not gpu_module.complete(simulation["gpu_meta"]["program_pointer"]): + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, iter_count + ) + else: + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, batch_size + ) + while not gpu_module.complete(simulation["gpu_meta"]["program_pointer"]): + gpu_module.exec_program( + simulation["gpu_meta"]["program_pointer"], block_count, batch_size + ) + gpu_module.clear_flags(simulation["gpu_meta"]["program_pointer"]) + + # Recover the original program state + if config.gpu_state_storage == "separate": + harmonize.memcpy_device_to_host( + simulation, simulation["gpu_meta"]["state_pointer"] + ) + harmonize.memcpy_device_to_host( + data, simulation["gpu_meta"]["state_pointer"] + ) + + gpu_module.clear_flags(simulation["gpu_meta"]["program_pointer"]) + + simulation["mpi_work_size"] = full_work_size + + particle_bank_module.set_bank_size(simulation["bank_active"], 0) + + source_closeout(simulation, 1, 1, data) diff --git a/mcdc/mcdc/code_factory/gpu/transport/util.py b/mcdc/mcdc/code_factory/gpu/transport/util.py new file mode 100644 index 000000000..2acb013c8 --- /dev/null +++ b/mcdc/mcdc/code_factory/gpu/transport/util.py @@ -0,0 +1,165 @@ +import harmonize +import numba as nb +import numpy as np + +from numba import njit, types + + +@njit +def atomic_add(array, idx, value): + harmonize.array_atomic_add(array, idx, value) + + +# ============================================================================= +# Generic GPU/CPU local array variable constructors +# ============================================================================= + + +def local_array(shape, dtype): + return np.zeros(shape, dtype=dtype) + + +@nb.extending.type_callable(local_array) +def type_local_array(context): + + from numba.core.typing.npydecl import parse_dtype, parse_shape + + if isinstance(context, nb.core.typing.context.Context): + + # Function repurposed from Numba's ol_np_empty. + def typer(shape, dtype): + nb.np.arrayobj._check_const_str_dtype("empty", dtype) + + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + raise nb.core.errors.UnsupportedError( + f"Integer shape type {shape} is not literal." + ) + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + raise nb.core.errors.UnsupportedError( + f"At least one element of shape tuple type{shape} is not an integer literal." + ) + else: + raise nb.core.errors.UnsupportedError( + f"Shape is of unsupported type {shape}." + ) + + # No default arguments. + nb_dtype = parse_dtype(dtype) + nb_shape = parse_shape(shape) + + if nb_dtype is not None and nb_shape is not None: + retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") + # Inlining the signature construction from numpy_empty_nd + sig = retty(shape, dtype) + return sig + else: + msg = f"Cannot parse input types to function np.empty({shape}, {dtype})" + raise nb.errors.TypingError(msg) + + return typer + + elif isinstance(context, nb.cuda.target.CUDATypingContext): + + # Function repurposed from Numba's Cuda_array_decl. + def typer(shape, dtype): + + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + return None + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + return None + else: + return None + + ndim = parse_shape(shape) + nb_dtype = parse_dtype(dtype) + if nb_dtype is not None and ndim is not None: + return types.Array(dtype=nb_dtype, ndim=ndim, layout="C") + + return typer + + elif isinstance(context, nb.hip.target.HIPTypingContext): + + def typer(shape, dtype): + # Only integer literals and tuples of integer literals are valid + # shapes + if isinstance(shape, types.Integer): + if not isinstance(shape, types.IntegerLiteral): + return None + elif isinstance(shape, (types.Tuple, types.UniTuple)): + if any([not isinstance(s, types.IntegerLiteral) for s in shape]): + return None + else: + return None + + ndim = parse_shape(shape) + nb_dtype = parse_dtype(dtype) + if nb_dtype is not None and ndim is not None: + result = types.Array(dtype=nb_dtype, ndim=ndim, layout="C") + return result + + return typer + + else: + raise nb.core.errors.UnsupportedError(f"Unsupported target context {context}.") + + +@nb.extending.lower_builtin(local_array, types.IntegerLiteral, types.Any) +def builtin_local_array(context, builder, sig, args): + + shape, dtype = sig.args + + from numba.core.typing.npydecl import parse_dtype, parse_shape + import numba.np.arrayobj as arrayobj + + if isinstance(context, nb.core.cpu.CPUContext): + + # No default arguments. + nb_dtype = parse_dtype(dtype) + nb_shape = parse_shape(shape) + + retty = types.Array(dtype=nb_dtype, ndim=nb_shape, layout="C") + + # In ol_np_empty, the reference type of the array is fed into the + # signatrue as a third argument. This third argument is not used by + # _parse_empty_args. + sig = retty(shape, dtype) + + arrtype, shapes = arrayobj._parse_empty_args(context, builder, sig, args) + ary = arrayobj._empty_nd_impl(context, builder, arrtype, shapes) + + return ary._getvalue() + elif isinstance(context, nb.cuda.target.CUDATargetContext): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + return nb.cuda.cudaimpl._generic_array( + context, + builder, + shape=(length,), + dtype=dtype, + symbol_name="_cudapy_harm_lmem", + addrspace=nb.cuda.cudadrv.nvvm.ADDRSPACE_LOCAL, + can_dynsized=False, + ) + elif isinstance(context, nb.hip.target.HIPTargetContext): + length = sig.args[0].literal_value + dtype = parse_dtype(sig.args[1]) + result = nb.hip.typing_lowering.hip.lowering._generic_array( + context, + builder, + shape=(length,), + dtype=dtype, + symbol_name="_HIPpy_lmem", + addrspace=nb.hip.amdgcn.ADDRSPACE_LOCAL, + can_dynsized=False, + ) + return result + else: + raise nb.core.errors.UnsupportedError(f"Unsupported target context {context}.") diff --git a/mcdc/mcdc/code_factory/literals_generator.py b/mcdc/mcdc/code_factory/literals_generator.py new file mode 100644 index 000000000..3e5329a50 --- /dev/null +++ b/mcdc/mcdc/code_factory/literals_generator.py @@ -0,0 +1,25 @@ +import numpy as np + +from numba import njit + + +def _literalize(value): + namespace = {} + jit_str = f"@njit\ndef impl():\n return {value}\n" + exec(jit_str, globals(), namespace) + return namespace["impl"] + + +def make_literals(simulation): + import mcdc.literals as literals + + # RPN evaluation buffer size + if len(simulation.cells) == 0: + rpn_evaluation_buffer_size = 1 + else: + rpn_evaluation_buffer_size = int( + max( + [np.sum(np.array(x.region_RPN_tokens) >= 0.0) for x in simulation.cells] + ) + ) + literals.rpn_evaluation_buffer_size = _literalize(rpn_evaluation_buffer_size) diff --git a/mcdc/mcdc/code_factory/numba_objects_generator.py b/mcdc/mcdc/code_factory/numba_objects_generator.py new file mode 100644 index 000000000..d07da87ad --- /dev/null +++ b/mcdc/mcdc/code_factory/numba_objects_generator.py @@ -0,0 +1,1265 @@ +from __future__ import annotations + +#### + +import importlib +import numba as nb +import numpy as np + +from mpi4py import MPI +from numba import njit +from numba.extending import intrinsic +from pathlib import Path + +#### + +import mcdc +import mcdc.code_factory.gpu.program_builder as gpu_builder +import mcdc.config as config +import mcdc.object_ as object_module +import mcdc.object_.base as base + +from mcdc.object_.base import ( + ObjectBase, + ObjectNonSingleton, + ObjectPolymorphic, + ObjectSingleton, +) +from mcdc.object_.particle import Particle, ParticleBank, ParticleData +from mcdc.object_.tally import Tally +from mcdc.print_ import print_error +from mcdc.util import flatten + +type_map = { + bool: np.bool_, + float: np.float64, + int: np.int64, + str: "U32", + np.bool_: np.bool_, + np.float64: np.float64, + np.int64: np.int64, + np.uint64: np.uint64, + np.str_: "U32", + np.uintp: np.uintp, +} + +bank_names = ["bank_active", "bank_census", "bank_source", "bank_future"] + +# ====================================================================================== +# Gather and group the classes +# ====================================================================================== + +base_classes = [ + getattr(base, x) + for x in dir(base) + if isinstance(getattr(base, x), type) and issubclass(getattr(base, x), ObjectBase) +] + +all_classes = [ParticleData, Particle] +mcdc_classes = [ParticleData, Particle] +polymorphic_bases = [] + +file_names = [x for x in dir(object_module) if x[:2] != "__" and x != "base"] +for file_name in file_names: + file = getattr(object_module, file_name) + item_names = dir(file) + for item_name in item_names: + item = getattr(file, item_name) + if ( + isinstance(item, type) + and issubclass(item, ObjectBase) + and item not in all_classes + ): + all_classes.append(item) + + if ( + item not in base_classes + and "label" in dir(item) + and item not in mcdc_classes + ): + mcdc_classes.append(item) + +polymorphic_bases = [ + x + for x in all_classes + if (x.__name__[-4:] == "Base" or x.__name__ == "Tally") and "label" in dir(x) +] + +# ====================================================================================== +# Numba object creation +# ====================================================================================== + + +def generate_numba_objects(simulation): + # ================================================================================== + # Allocate key items for the Numba object: + # - Python annotations + # - Numba structures + # - Records + # - Data: flattened vector to store arbitrary-size arrays + # - Accessor targets: to generate getter/setter helpers to easily access data + # ================================================================================== + + annotations = {} + structures = {} + records = {} + data = {"size": 0} + accessor_targets = {} + + for mcdc_class in mcdc_classes: + annotations[mcdc_class.label] = {} + structures[mcdc_class.label] = [] + accessor_targets[mcdc_class.label] = [] + if issubclass(mcdc_class, ObjectNonSingleton): + records[mcdc_class.label] = [] + else: + records[mcdc_class.label] = {} + + # Particle banks + for name in bank_names: + annotations[name] = {} + structures[name] = [] + accessor_targets[name] = [] + + # Move simulation to last + annotations["simulation"] = annotations.pop("simulation") + structures["simulation"] = structures.pop("simulation") + records["simulation"] = records.pop("simulation") + accessor_targets["simulation"] = accessor_targets.pop("simulation") + + # ================================================================================== + # Gather the annotations from the classes + # ================================================================================== + + for mcdc_class in mcdc_classes: + # Include all ancestors, but stop at the MC/DC base classes + classes = [] + for item in mcdc_class.__mro__: + if item in base_classes: + break + classes.append(item) + + # If polymorphic, don't include the polymorphic base + if issubclass(mcdc_class, ObjectPolymorphic): + classes = [mcdc_class] + + # Get the annotations + for class_ in classes: + new_annotations = { + k: v + for k, v in class_.__annotations__.items() + if k not in ["label", "non_numba"] + and ( + "non_numba" not in dir(class_) + or ("non_numba" in dir(class_) and k not in class_.non_numba) + ) + } + # Evaluate stringified annotation + if ( + len(new_annotations) > 0 + and type(next(iter(new_annotations.values()))) == str + ): + new_annotations = parse_annotations_dict(new_annotations) + + annotations[mcdc_class.label].update(new_annotations) + + # Particle banks + for name in bank_names: + annotations[name] = { + k: v + for k, v in ParticleBank.__annotations__.items() + if k not in ["label", "non_numba"] + and ( + "non_numba" not in dir(ParticleBank) + or ( + "non_numba" in dir(ParticleBank) and k not in ParticleBank.non_numba + ) + ) + } + + # ================================================================================== + # Set the structures and accessor targets based on the annotations + # ================================================================================== + + # Temporary simulation object structure + simulation_object_structure = [] + for field in annotations["simulation"]: + hint = annotations["simulation"][field] + hint_origin = get_origin(hint) + hint_args = get_args(hint) + + if hint in all_classes: + simulation_object_structure.append((field, hint)) + continue + if hint_origin == list and hint_args[0] in all_classes: + simulation_object_structure.append((field, list, hint_args[0])) + continue + + # Set the structures and accessor targets + for label in annotations.keys(): + set_structure(label, structures, accessor_targets, annotations) + + # Generate the accessor helper + if MPI.COMM_WORLD.Get_rank() == 0: + generate_mcdc_access(accessor_targets) + + # Add ID for non-singleton + for class_ in mcdc_classes: + if issubclass(class_, ObjectNonSingleton): + structures[class_.label].append(("ID", type_map[int])) + # Set parent and child ID and type if polymorphic + if issubclass(class_, ObjectPolymorphic): + if class_.__name__[-4:] == "Base" or class_.__name__ == "Tally": + structures[class_.label].append(("child_type", type_map[int])) + structures[class_.label].append(("child_ID", type_map[int])) + else: + structures[class_.label].append(("parent_ID", type_map[int])) + + # Add particle data to particle banks and add particle banks to the simulation + for name in bank_names: + bank = getattr(simulation, name) + size = int(bank.size[0]) + structures[name] += [ + ("particle_data", into_dtype(structures["particle_data"]), (size,)) + ] + # + structures["simulation"] = [(name, into_dtype(structures[name]))] + structures[ + "simulation" + ] + + # ================================================================================== + # Set records and data size based on the simulation structures and objects + # ================================================================================== + + # Allocate object containers + objects = [] + + # Gather the objects from the simulation + attribute_names = [ + x + for x in dir(simulation) + if ( + not x.startswith("__") + and ( + isinstance(getattr(simulation, x), ObjectBase) + or not callable(getattr(simulation, x)) + ) + and x not in simulation.non_numba + ) + ] + for attribute_name in attribute_names: + attribute = getattr(simulation, attribute_name) + if type(attribute) in mcdc_classes: + objects.append(attribute) + if type(attribute) == list: + for item in attribute: + if type(item) in mcdc_classes: + objects.append(item) + + # Set the records and the data size + for object_ in objects: + set_object(object_, annotations, structures, records, data) + set_object(simulation, annotations, structures, records, data) + + # ================================================================================== + # Finalize the simulation object structure and set record + # ================================================================================== + + new_structure = [] + record = records["simulation"] + for item in simulation_object_structure: + field = item[0] + type_1 = item[1] + + # List of objects + if type_1 == list: + type_2 = item[2] + + # List of non-polymorphics + if item[2] not in polymorphic_bases: + N = len(records[item[2].label]) + new_structure.append( + (field, into_dtype(structures[item[2].label]), (N,)) + ) + new_structure.append((f"N_{plural_to_singular(field)}", type_map[int])) + record[f"N_{plural_to_singular(field)}"] = N + + # List of polymorphics + else: + for class_ in mcdc_classes: + if issubclass(class_, type_2): + N = len(records[class_.label]) + new_structure.append( + ( + singular_to_plural(class_.label), + into_dtype(structures[class_.label]), + (N,), + ) + ) + new_structure.append((f"N_{class_.label}", type_map[int])) + record[f"N_{class_.label}"] = N + + # Singleton + elif item[1] in mcdc_classes and issubclass(item[1], ObjectSingleton): + new_structure.append((field, into_dtype(structures[item[1].label]))) + + else: + print_error(f"Unknown type: {item}") + + structures["simulation"] = new_structure + structures["simulation"] + + # Print the fields + if MPI.COMM_WORLD.Get_rank() == 0: + with open(f"{Path(mcdc.__file__).parent}/numba_types.py", "w") as f: + text = "# The following is automatically generated by code_factory.py\n\n" + text += "from numpy import bool_\n" + text += "from numpy import float64\n" + text += "from numpy import int64\n" + text += "from numpy import uint64\n" + text += "from numpy import uintp\n" + text += "\n###\n\n" + text += ( + "from mcdc.code_factory.numba_objects_generator import into_dtype\n\n" + ) + + for label in structures.keys(): + # Skip special types + if label in ["gpu_meta"] + bank_names + ["simulation"]: + continue + + text += f"{label} = into_dtype([\n" + structure = structures[label] + + for item in structure: + text += decode_structure_item(item) + text += "])\n\n" + + # GPU meta + text += "gpu_meta = into_dtype([\n" + for item in structures["gpu_meta"]: + if item[0].endswith("pointer"): + text += f" ('{item[0]}', uintp),\n" + else: + text += decode_structure_item(item) + text += "])\n\n" + + # Particle banks + for label in bank_names: + structure = structures[label] + + text += f"{label} = None\n" + text += f"def set_{label}(N: dict):\n" + text += f" global {label}\n" + text += f" {label} = into_dtype([\n" + for item in structure: + if item[0] == "particle_data": + text += ( + f" ('{item[0]}', {item[0]}, (N['{item[0]}'],)),\n" + ) + else: + text += decode_structure_item(item, " ") + text += " ])\n\n" + + # Simulation + text += f"simulation = None\n" + text += f"def set_simulation(N: dict):\n" + text += f" global simulation\n" + text += f" simulation = into_dtype([\n" + for item in structures["simulation"]: + if type(item[1]) == np.dtypes.VoidDType and len(item) == 3: + singular_field = plural_to_singular(item[0]) + text += f" ('{item[0]}', {singular_field}, (N['{singular_field}'])),\n" + else: + text += decode_structure_item(item, " ") + text += " ])\n\n" + + f.write(text) + + # ================================================================================== + # Set numba_types.py + # ================================================================================== + + import mcdc.numba_types as type_ + + # Particle banks + type_.set_bank_active({"particle_data": simulation.bank_active.size[0]}) + type_.set_bank_census({"particle_data": simulation.bank_census.size[0]}) + type_.set_bank_source({"particle_data": simulation.bank_source.size[0]}) + type_.set_bank_future({"particle_data": simulation.bank_future.size[0]}) + + # Simulation + N = {} + for item in structures["simulation"]: + if type(item[1]) == np.dtypes.VoidDType and len(item) == 3: + singular_field = plural_to_singular(item[0]) + N[singular_field] = item[2] + type_.set_simulation(N) + + # ================================================================================== + # GPU preparation: Adapt transport functions, forward declare, and build program + # ================================================================================== + + if config.target == "gpu": + gpu_builder.forward_declare_gpu_program() + gpu_builder.adapt_transport_functions() + gpu_builder.build_gpu_program(data["size"]) + + # ================================================================================== + # Allocate the flattened data and re-set the objects + # ================================================================================== + + data["array"], data["pointer"] = create_data_array(data["size"]) + + data["size"] = 0 + for object_ in objects: + set_object(object_, annotations, structures, records, data, set_data=True) + set_object(simulation, annotations, structures, records, data, set_data=True) + + # ================================================================================== + # Set with records + # ================================================================================== + + # The global structure/variable container + mcdc_simulation_container, mcdc_simulation_pointer = create_simulation_container( + into_dtype(structures["simulation"]) + ) + mcdc_simulation = mcdc_simulation_container[0] + + record = records["simulation"] + structure = structures["simulation"] + for item in structure: + field = item[0] + field_type = item[1] + size = -1 + if len(item) == 3: + size = item[2][0] + + # Skip particular attributes + if field in bank_names: + continue + + # Simple attribute + if type(field_type) != np.dtypes.VoidDType: + mcdc_simulation[field] = record[field] + + # MC/DC objects + else: + # Singleton + if size == -1: + for sub_item in structures[field]: + mcdc_simulation[field][sub_item[0]] = records[field][sub_item[0]] + # Non-singleton + else: + singular_field = plural_to_singular(field) + for i in range(size): + for sub_item in structures[singular_field]: + mcdc_simulation[field][i][sub_item[0]] = records[ + singular_field + ][i][sub_item[0]] + + # Manually set particle bank attributes + for name in bank_names: + mcdc_simulation[name]["tag"] = getattr(simulation, name).tag + + # GPU program setup + if config.target == "gpu": + gpu_builder.setup_gpu_program(mcdc_simulation_container, data["array"]) + gpu_builder.adapt_transport_functions_post_setup() + + return mcdc_simulation_container, data["array"] + + +def set_structure(label, structures, accessor_targets, annotations): + structure = structures[label] + annotation = annotations[label] + accessor_target = accessor_targets[label] + + for field in annotation: + hint = annotation[field] + hint_origin = get_origin(hint) + hint_args = get_args(hint) + hint_origin_shape = None + hint_inner_dtype = None + fixed_size_array = False + + # Process annotation + if hint_origin is Annotated: + hint_decoded = decode_annotated_ndarray(hint) + hint_origin = hint_decoded["origin"] + hint_origin_shape = hint_decoded["shape"] + hint_inner_dtype = get_args(hint_decoded["dtype"])[0] + fixed_size_array = True + + # Mark as arbitrary size if string is used in shape + for dim_size in hint_origin_shape: + if type(dim_size) == str: + fixed_size_array = False + break + + # Skip simulation object structure + if label == "simulation": + if hint in all_classes: + continue + if hint_origin == list and hint_args[0] in all_classes: + hint_origin = np.ndarray + continue + + # ========================================================================== + # Get the type + # ========================================================================== + + # Basics + simple_scalar = hint in type_map.keys() + simple_list = hint_origin == list and hint_args[0] in type_map.keys() + numpy_array = hint_origin == np.ndarray + + # MC/DC class + def non_polymorphic(x): + # Only treat real classes that inherit from ObjectNonSingleton + return ( + isinstance(x, type) + and issubclass(x, ObjectNonSingleton) + and x not in polymorphic_bases + ) + + def polymorphic_base(x): + # Only treat real classes that are registered polymorphic bases + return isinstance(x, type) and x in polymorphic_bases + + # List of MC/DC classes + list_of_non_polymorphics = hint_origin == list and non_polymorphic(hint_args[0]) + list_of_polymorphic_bases = hint_origin == list and polymorphic_base( + hint_args[0] + ) + + # ========================================================================== + # Set the structure + # ========================================================================== + + # Basics + if fixed_size_array: + structure.append((field, type_map[hint_inner_dtype], hint_origin_shape)) + elif simple_scalar: + structure.append((field, type_map[hint])) + elif simple_list or numpy_array: + structure.append((f"{field}_offset", type_map[int])) + structure.append((f"{field}_length", type_map[int])) + if hint_origin_shape is not None: + accessor_target.append((f"{field}", hint_origin_shape)) + else: + accessor_target.append((f"{field}", (f"{field}_length",))) + + # MC/DC classes + elif non_polymorphic(hint) or polymorphic_base(hint): + structure.append((f"{field}_ID", type_map[int])) + + # List of MC/DC classes + elif list_of_non_polymorphics or list_of_polymorphic_bases: + singular = plural_to_singular(field) + structure.append((f"N_{singular}", type_map[int])) + structure.append((f"{singular}_IDs_offset", type_map[int])) + if hint_origin_shape is not None: + accessor_target.append((f"{singular}_IDs", hint_origin_shape)) + else: + accessor_target.append((f"{singular}_IDs", (f"N_{singular}",))) + + # Unknown type + else: + print_error(f"Unknown type hint for {label}/{field}: {hint}") + + +def set_object( + object_, annotations, structures, records, data, class_=None, set_data=False +): + if class_ == None: + class_ = object_.__class__ + + # Set the parent first if polymorphics + if isinstance(object_, ObjectPolymorphic) and class_ not in polymorphic_bases: + for parent_class in polymorphic_bases: + if issubclass(class_, parent_class): + set_object( + object_, + annotations, + structures, + records, + data, + parent_class, + set_data, + ) + + annotation = annotations[class_.label] + structure = structures[class_.label] + record = {} + + if class_.label == "simulation": + record = records["simulation"] + + # Straightforwardly set up attributes + for key in [x[0] for x in structure]: + if key in dir(object_): + # Skip if set already + if key in record.keys(): + continue + record[key] = getattr(object_, key) + + # Loop over the supported attributes + attribute_names = [ + x for x in dir(object_) if (x[:2] != "__" and not callable(getattr(object_, x))) + ] + if "non_numba" in dir(object_): + attribute_names = list(set(attribute_names) - set(object_.non_numba)) + for attribute_name in attribute_names: + # Skip if set already + if attribute_name in record.keys(): + continue + + # Skip if not in annotation + if attribute_name not in annotation.keys(): + continue + attribute = getattr(object_, attribute_name) + + # Convert list of supported types into Numpy array + if type(attribute) == list: + if get_args(annotation[attribute_name])[0] in type_map.keys(): + attribute = np.array(attribute) + + # Numpy array + if type(attribute) == np.ndarray: + attribute_flatten = attribute.flatten() + record[f"{attribute_name}_offset"] = data["size"] + record[f"{attribute_name}_length"] = len(attribute_flatten) + if set_data: + data["array"][data["size"] : data["size"] + len(attribute_flatten)] = ( + attribute_flatten[:] + ) + data["size"] += len(attribute_flatten) + + # Non-singleton object + elif isinstance(attribute, ObjectNonSingleton): + if ( + not isinstance(attribute, ObjectPolymorphic) + or annotation[attribute_name] in polymorphic_bases + ): + record[f"{attribute_name}_ID"] = attribute.ID + else: + record[f"{attribute_name}_ID"] = attribute.child_ID + + # List of Non-singleton objects + elif type(attribute) == list: + inner_type = get_args(annotation[attribute_name])[0] + + # Flatten the list + attribute_flatten = list(flatten(attribute)) + singular_name = plural_to_singular(attribute_name) + + if not issubclass(inner_type, ObjectNonSingleton): + print_error( + f"[ERROR] Get a list of non-object for {attribute_name}: {attribute}" + ) + + record[f"N_{singular_name}"] = len(attribute_flatten) + record[f"{singular_name}_IDs_offset"] = data["size"] + if set_data: + if ( + not issubclass(inner_type, ObjectPolymorphic) + or inner_type in polymorphic_bases + ): + data["array"][ + data["size"] : data["size"] + len(attribute_flatten) + ] = [x.ID for x in attribute_flatten] + else: + data["array"][ + data["size"] : data["size"] + len(attribute_flatten) + ] = [x.child_ID for x in attribute_flatten] + data["size"] += len(attribute_flatten) + + # Complete for simulation object + if class_.label == "simulation": + return + + # Set ID of non-singleton + if isinstance(object_, ObjectNonSingleton): + if not isinstance(object_, ObjectPolymorphic): + record["ID"] = object_.ID + + # Set parent and child ID and type if polymorphic + else: + # Parent + if class_ in polymorphic_bases: + record["ID"] = object_.ID + record["child_ID"] = object_.child_ID + record["child_type"] = object_.type + # Child + else: + record["ID"] = object_.child_ID + record["parent_ID"] = object_.ID + + # Set tally bins + if class_ == Tally: + tally_size = np.prod(object_.bin_shape) + record[f"bin_offset"] = data["size"] + record[f"bin_sum_offset"] = data["size"] + tally_size + record[f"bin_sum_square_offset"] = data["size"] + tally_size * 2 + record[f"bin_length"] = tally_size + record[f"bin_sum_length"] = tally_size + record[f"bin_sum_square_length"] = tally_size + data["size"] += 3 * tally_size + + # Check structure-record compatibility + missing = set([x[0] for x in structure]) - set(record.keys()) + if len(missing) > 0: + print_error(f"Missing structure keys in record for {class_.label}: {missing}") + + # Register the record + if isinstance(object_, ObjectSingleton): + records[class_.label] = record + elif isinstance(object_, ObjectNonSingleton): + records[class_.label].append(record) + + +# ============================================================================= +# Global GPU/CPU variable array constructors +# ============================================================================= + + +def create_data_array(size): + if not config.target == "gpu": + data = np.zeros(size, dtype=np.float64) + return data, 0 + else: + return create_data_array_on_gpu(size * 8) + + +@njit +def create_data_array_on_gpu(size): + if config.gpu_state_storage == "managed": + data_ptr = gpu_builder.alloc_managed_bytes(size) + else: + data_ptr = gpu_builder.alloc_device_bytes(size) + data_uint = voidptr_to_uintp(data_ptr) + data = nb.carray(data_ptr, (size,), dtype=np.float64) + return data, data_uint + + +def create_simulation_container(dtype): + if not config.target == "gpu": + simulation_container = np.zeros((1,), dtype=dtype) + return simulation_container, 0 + else: + return create_simulation_container_on_gpu(dtype, dtype.itemsize) + + +@njit +def create_simulation_container_on_gpu(dtype, size): + if config.gpu_state_storage == "managed": + simulation_ptr = gpu_builder.alloc_managed_bytes(size) + else: + simulation_ptr = gpu_builder.alloc_device_bytes(size) + simulation_uint = voidptr_to_uintp(simulation_ptr) + simulation = nb.carray(simulation_ptr, (1,), dtype) + return simulation, simulation_uint + + +# ============================================================================= +# Type casters +# ============================================================================= + + +@intrinsic +def cast_voidptr_to_uintp(typingctx, src): + # check for accepted types + if isinstance(src, nb.types.RawPointer): + # create the expected type signature + result_type = nb.types.uintp + sig = result_type(nb.types.voidptr) + + # defines the custom code generation + def codegen(context, builder, signature, args): + # llvm IRBuilder code here + [src] = args + rtype = signature.return_type + llrtype = context.get_value_type(rtype) + return builder.ptrtoint(src, llrtype) + + return sig, codegen + + +@njit() +def voidptr_to_uintp(value): + return cast_voidptr_to_uintp(value) + + +# ====================================================================================== +# Alignment Logic +# ====================================================================================== +# While CPU execution can robustly handle all sorts of Numba types, GPU +# execution requires structs to follow some of the basic properties expected of +# C-style structs with standard layout: +# +# - Every primitive field is aligned by its size, and padding is inserted +# between fields to ensure alignment in arrays and nested data structures +# +# - Every field has a unique address +# +# If these rules are violated, memory accesses made in GPUs may encounter +# problems. For example, in cases where an access is not at an address aligned +# by their size, a segfault or similar fault will occur, or information will be +# lost. These issues were fixed by providing a function, align, which ensures the +# field lists fed to np.dtype fulfill these requirements. +# +# The align function does the following: +# +# - Tracks the cumulative offset of fields as they appear in the input list. +# +# - Inserts additional padding fields to ensure that primitive fields are +# aligned by their size +# +# - Re-sizes arrays to have at least one element in their array (this ensure +# they have a non-zero size, and hence cannot overlap base addresses with +# other fields. +# + + +def fixup_dims(dim_tuple): + return tuple([max(d, 1) for d in dim_tuple]) + + +def align(field_list): + result = [] + offset = 0 + pad_id = 0 + for field in field_list: + if len(field) > 3: + print_error("Unexpected struct field specification. Specifications \ + usually only consist of 3 or fewer members") + multiplier = 1 + if len(field) == 3: + field = (field[0], field[1], fixup_dims(field[2])) + for d in field[2]: + multiplier *= d + kind = np.dtype(field[1]) + size = kind.itemsize + + if kind.isbuiltin == 0: + alignment = 8 + elif kind.isbuiltin == 1: + alignment = size + else: + print_error("Unexpected field item type") + + size *= multiplier + + if offset % alignment != 0: + pad_size = alignment - (offset % alignment) + result.append((f"padding_{pad_id}", np.uint8, (pad_size,))) + pad_id += 1 + offset += pad_size + + result.append(field) + offset += size + + if offset % 8 != 0: + pad_size = 8 - (offset % 8) + result.append((f"padding_{pad_id}", np.uint8, (pad_size,))) + pad_id += 1 + + return result + + +def into_dtype(field_list): + result = np.dtype(align(field_list), align=True) + return result + + +# ====================================================================================== +# Type parser +# ====================================================================================== + +from typing import Annotated, Any, ForwardRef, Optional, Union, get_args, get_origin +import numpy as np +from numpy.typing import NDArray + + +# --- Safe locals for eval + ForwardRef fallback --- +class _FwdRefDict(dict): + """If a symbol isn't in the whitelist, treat it as a ForwardRef('Symbol').""" + + def __missing__(self, key): + for class_ in all_classes: + if key == class_.__name__: + return class_ + return ForwardRef(key) + + +_SAFE_GLOBALS = {} # no builtins +_SAFE_LOCALS = _FwdRefDict( + { + # builtins + "str": str, + "int": int, + "float": float, + "bool": bool, + "bytes": bytes, + "object": object, + "list": list, + "dict": dict, + "tuple": tuple, + "set": set, + # typing + "Any": Any, + "Annotated": Annotated, + "Union": Union, + "Optional": Optional, + # numpy typing + "NDArray": NDArray, + # numpy dtypes (extend if you need more) + "float64": np.float64, + "float32": np.float32, + "int64": np.int64, + "int32": np.int32, + } +) + + +def parse_type_hint_str(s: str): + """ + Parse a stringified type hint into a runtime type/typing object. + Unknown identifiers become ForwardRef('Name') so we don't import/resolve. + """ + s = s.strip() + # Special-case empty or 'None' if you ever pass those + if s in {"None", "NoneType"}: + return type(None) + return eval(s, _SAFE_GLOBALS, _SAFE_LOCALS) + + +def parse_annotations_dict(ann: dict[str, str]) -> dict[str, object]: + return {k: parse_type_hint_str(v) for k, v in ann.items()} + + +def decode_annotated_ndarray(hint): + inner, metadata = get_args(hint) + inner_origin = get_origin(inner) + inner_args = get_args(inner) + shape_type, dtype_type = inner_args + return { + "origin": inner_origin, + "shape": metadata, + "shape_type": shape_type, + "dtype": dtype_type, + } + + +# ====================================================================================== +# Helpers for mcdc_get generators +# ====================================================================================== + + +def generate_mcdc_access(targets): + for object_name in targets.keys(): + path = f"{Path(mcdc.__file__).parent}" + file_getter = open(f"{path}/mcdc_get/{object_name}.py", "w") + file_setter = open(f"{path}/mcdc_set/{object_name}.py", "w") + + text_getter = ( + "# The following is automatically generated by code_factory.py\n\n" + ) + text_setter = ( + "# The following is automatically generated by code_factory.py\n\n" + ) + + text_getter += "from numba import njit\n\n\n" + text_setter += "from numba import njit\n\n\n" + + for attribute in targets[object_name]: + attribute_name = attribute[0] + shape = attribute[1] + + if len(shape) == 1: + text_getter += _accessor_1d_element(object_name, attribute_name) + text_getter += _accessor_1d_all(object_name, attribute_name, shape[0]) + text_getter += _accessor_1d_last(object_name, attribute_name, shape[0]) + + text_setter += _accessor_1d_element(object_name, attribute_name, True) + text_setter += _accessor_1d_all( + object_name, attribute_name, shape[0], True + ) + text_setter += _accessor_1d_last( + object_name, attribute_name, shape[0], True + ) + + elif len(shape) == 2: + text_getter += _accessor_2d_vector( + object_name, attribute_name, shape[1] + ) + text_getter += _accessor_2d_element( + object_name, attribute_name, shape[1] + ) + + text_setter += _accessor_2d_vector( + object_name, attribute_name, shape[1], True + ) + text_setter += _accessor_2d_element( + object_name, attribute_name, shape[1], True + ) + + elif len(shape) == 3: + text_getter += _accessor_3d_element( + object_name, attribute_name, shape[1], shape[2] + ) + + text_setter += _accessor_3d_element( + object_name, attribute_name, shape[1], shape[2], True + ) + + text_getter += _accessor_chunk(object_name, attribute_name) + text_setter += _accessor_chunk(object_name, attribute_name, True) + + file_getter.write(text_getter[:-2]) + file_setter.write(text_setter[:-2]) + + file_getter.close() + file_setter.close() + + for key in ["get", "set"]: + with open(f"{Path(mcdc.__file__).parent}/mcdc_{key}/__init__.py", "w") as f: + text = "# The following is automatically generated by code_factory.py\n\n" + for i, object_name in enumerate(targets.keys()): + text += f"import mcdc.mcdc_{key}.{object_name} as {object_name}\n" + if i < len(targets.keys()) - 1: + text += "\n" + f.write(text) + + +def _accessor_1d_element(object_name, attribute_name, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}(index, {object_name}, data, value):\n" + else: + text += f"def {attribute_name}(index, {object_name}, data):\n" + text += f' offset = {object_name}["{attribute_name}_offset"]\n' + if setter: + text += f" data[offset + index] = value\n\n\n" + else: + text += f" return data[offset + index]\n\n\n" + return text + + +def _accessor_1d_all(object_name, attribute_name, size, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}_all({object_name}, data, value):\n" + else: + text += f"def {attribute_name}_all({object_name}, data):\n" + text += f' start = {object_name}["{attribute_name}_offset"]\n' + if type(size) == str: + text += f' size = {object_name}["{size}"]\n' + else: + text += f" size = {size}\n" + text += f" end = start + size\n" + if setter: + text += f" data[start:end] = value\n\n\n" + else: + text += f" return data[start:end]\n\n\n" + return text + + +def _accessor_1d_last(object_name, attribute_name, size, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}_last({object_name}, data, value):\n" + else: + text += f"def {attribute_name}_last({object_name}, data):\n" + text += f' start = {object_name}["{attribute_name}_offset"]\n' + if type(size) == str: + text += f' size = {object_name}["{size}"]\n' + else: + text += f" size = {size}\n" + text += f" end = start + size\n" + if setter: + text += f" data[end - 1] = value\n\n\n" + else: + text += f" return data[end - 1]\n\n\n" + return text + + +def _accessor_chunk(object_name, attribute_name, setter=False): + text = f"@njit\n" + if setter: + text += ( + f"def {attribute_name}_chunk(start, length, {object_name}, data, value):\n" + ) + else: + text += f"def {attribute_name}_chunk(start, length, {object_name}, data):\n" + text += f' start += {object_name}["{attribute_name}_offset"]\n' + text += f" end = start + length\n" + if setter: + text += f" data[start:end] = value\n\n\n" + else: + text += f" return data[start:end]\n\n\n" + return text + + +def _accessor_2d_element(object_name, attribute_name, stride, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}(index_1, index_2, {object_name}, data, value):\n" + else: + text += f"def {attribute_name}(index_1, index_2, {object_name}, data):\n" + text += f' offset = {object_name}["{attribute_name}_offset"]\n' + if isinstance(stride, str): + text += f' stride = {object_name}["{stride}"]\n' + else: + text += f" stride = {stride}\n" + if setter: + text += f" data[offset + index_1 * stride + index_2] = value\n\n\n" + else: + text += f" return data[offset + index_1 * stride + index_2]\n\n\n" + return text + + +def _accessor_2d_vector(object_name, attribute_name, stride, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}_vector(index_1, {object_name}, data, value):\n" + else: + text += f"def {attribute_name}_vector(index_1, {object_name}, data):\n" + text += f' offset = {object_name}["{attribute_name}_offset"]\n' + if isinstance(stride, str): + text += f' stride = {object_name}["{stride}"]\n' + else: + text += f" stride = {stride}\n" + text += f" start = offset + index_1 * stride\n" + text += f" end = start + stride\n" + if setter: + text += f" data[start:end] - value\n\n\n" + else: + text += f" return data[start:end]\n\n\n" + return text + + +def _accessor_3d_element(object_name, attribute_name, stride_2, stride_3, setter=False): + text = f"@njit\n" + if setter: + text += f"def {attribute_name}(index_1, index_2, index_3, {object_name}, data, value):\n" + else: + text += ( + f"def {attribute_name}(index_1, index_2, index_3, {object_name}, data):\n" + ) + text += f' offset = {object_name}["{attribute_name}_offset"]\n' + text += f' stride_2 = {object_name}["{stride_2}"]\n' + text += f' stride_3 = {object_name}["{stride_3}"]\n' + if setter: + text += f" data[offset + index_1 * stride_2 * stride_3 + index_2 * stride_3 + index_3] = value\n\n\n" + else: + text += f" return data[offset + index_1 * stride_2 * stride_3 + index_2 * stride_3 + index_3]\n\n\n" + return text + + +# ====================================================================================== +# Misc. +# ====================================================================================== + + +def plural_to_singular(word: str) -> str: + """ + Convert a plural English noun (possibly underscore-separated) to singular. + Applies only to the last word and handles common irregulars. + """ + irregulars = { + "universes": "universe", + "children": "child", + "men": "man", + "women": "woman", + "people": "person", + "mice": "mouse", + "geese": "goose", + "teeth": "tooth", + "feet": "foot", + "indices": "index", + "matrices": "matrix", + "criteria": "criterion", + "data": "data", # invariant + "spectra": "spectrum", + } + + parts = word.lower().split("_") + w = parts[-1] + + if w in irregulars: + parts[-1] = irregulars[w] + elif w.endswith("ies") and len(w) > 3: + parts[-1] = w[:-3] + "y" + elif w.endswith("ves") and len(w) > 3: + parts[-1] = w[:-3] + "f" + elif w.endswith("oes"): + parts[-1] = w[:-2] + elif any(w.endswith(suffix) for suffix in ("ses", "xes", "zes", "ches", "shes")): + parts[-1] = w[:-2] + elif w.endswith("s") and not w.endswith("ss"): + parts[-1] = w[:-1] + + return "_".join(parts) + + +def singular_to_plural(word: str) -> str: + """ + Convert a singular English noun (possibly underscore-separated) to plural. + Applies only to the last word and handles common irregulars. + """ + irregulars = { + "universe": "universes", + "child": "children", + "man": "men", + "woman": "women", + "person": "people", + "mouse": "mice", + "goose": "geese", + "tooth": "teeth", + "foot": "feet", + "index": "indices", + "matrix": "matrices", + "criterion": "criteria", + "data": "data", # invariant + "spectrum": "spectra", + } + + parts = word.lower().split("_") + w = parts[-1] + + if w in irregulars: + parts[-1] = irregulars[w] + elif w.endswith("y") and w[-2:] not in ("ay", "ey", "iy", "oy", "uy"): + parts[-1] = w[:-1] + "ies" + elif w.endswith("f"): + parts[-1] = w[:-1] + "ves" + elif w.endswith("fe"): + parts[-1] = w[:-2] + "ves" + elif w.endswith(("s", "x", "z", "ch", "sh")): + parts[-1] = w + "es" + else: + parts[-1] = w + "s" + + return "_".join(parts) + + +def decode_structure_item(item, prefix=""): + """ + A structure item is a list describing a member field in the structure. + The list contains [field name, field type, size]. + """ + if type(item[1]) != np.dtypes.VoidDType: + if isinstance(item[1], str): + dtype = f"'{item[1]}'" + else: + dtype = item[1].__name__ + if len(item) == 3: + return f"{prefix} ('{item[0]}', {dtype}, {item[2]}),\n" + else: + return f"{prefix} ('{item[0]}', {dtype}),\n" + else: + if len(item) == 3: + return f"{prefix} ('{item[0]}', {plural_to_singular(item[0])}, {item[2]}),\n" + else: + return f"{prefix} ('{item[0]}', {item[0]}),\n" diff --git a/mcdc/mcdc/config.py b/mcdc/mcdc/config.py new file mode 100644 index 000000000..4979c5fcd --- /dev/null +++ b/mcdc/mcdc/config.py @@ -0,0 +1,158 @@ +import argparse, os + +parser = argparse.ArgumentParser(description="MC/DC: Monte Carlo Dynamic Code") + +# ====================================================================================== +# Run mode +# ====================================================================================== + +parser.add_argument( + "--mode", + type=str, + help="Run mode", + choices=["python", "numba", "numba_debug"], + default="python", +) + +parser.add_argument( + "--target", type=str, help="Target", choices=["cpu", "gpu"], default="cpu" +) + +# ====================================================================================== +# Settings +# ====================================================================================== + +parser.add_argument("--N_particle", type=int, help="Number of particles") +parser.add_argument("--N_batch", type=int, help="Number of batches") +parser.add_argument("--output", type=str, help="Output file name") +parser.add_argument("--progress_bar", default=True, action="store_true") +parser.add_argument("--no-progress_bar", dest="progress_bar", action="store_false") +parser.add_argument("--runtime_output", default=False, action="store_true") + + +# ====================================================================================== +# Numba +# ====================================================================================== + +parser.add_argument("--clear_cache", action="store_true") +parser.add_argument("--caching", action="store_true", default=False) +parser.add_argument("--no_caching", dest="caching", action="store_false") + +# ====================================================================================== +# GPU mode +# ====================================================================================== + +parser.add_argument( + "--gpu_state_storage", + type=str, + help="Strategy used in GPU execution (event or async).", + choices=["separate", "managed", "united"], + default="separate", +) + +parser.add_argument( + "--gpu_strategy", + type=str, + help="Strategy used in GPU execution (event or async).", + choices=["async", "event"], + default="event", +) + +parser.add_argument( + "--gpu_block_count", + type=int, + help="Number of blocks used in GPU execution.", + default=240, +) + +parser.add_argument( + "--gpu_arena_size", + type=int, + help="Capacity of each intermediate data buffer used, as a particle count.", + default=0x100000, +) + +parser.add_argument( + "--gpu_rocm_path", + type=str, + help="Path to ROCm installation for use in GPU execution.", + default=None, +) + +parser.add_argument( + "--gpu_cuda_path", + type=str, + help="Path to CUDA installation for use in GPU execution.", + default=None, +) + +parser.add_argument( + "--gpu_share_stride", + type=int, + help="Number of gpus that are shared across adjacent ranks.", + default=1, +) + + +# ====================================================================================== +# Config processor +# ====================================================================================== + +args, unargs = parser.parse_known_args() + +mode = args.mode +target = args.target +gpu_state_storage = args.gpu_state_storage +caching = args.caching +clear_cache = args.clear_cache + +from mpi4py import MPI +import shutil + +src_path = os.path.dirname(os.path.abspath(__file__)) +cache_path = f"{src_path}/__pycache__" + +if ((caching == False) or (clear_cache == True)) and (MPI.COMM_WORLD.Get_rank() == 0): + if os.path.exists(cache_path): + shutil.rmtree(cache_path) + if os.path.exists("__harmonize_cache__"): + shutil.rmtree("__harmonize_cache__") + +if MPI.COMM_WORLD.Get_size() > 1: + MPI.COMM_WORLD.Barrier() + +from mcdc.print_ import ( + print_warning, +) +import numba as nb + +if mode == "python": + nb.config.DISABLE_JIT = True +elif mode == "numba": + nb.config.DISABLE_JIT = False + nb.config.NUMBA_DEBUG_CACHE = 1 + nb.config.THREADING_LAYER = "workqueue" +elif mode == "numba_debug": + msg = "\n >> Entering numba debug mode\n >> will result in slower code and longer compile times\n >> to configure debug options see main.py" + print_warning(msg) + + nb.config.DISABLE_JIT = False # turns on the jitter + nb.config.DEBUG = False # turns on debugging options + nb.config.NUMBA_FULL_TRACEBACKS = ( + 1 # enables errors from sub-packages to be printed + ) + nb.config.NUMBA_BOUNDSCHECK = 1 # checks bounds errors of vectors + nb.config.NUMBA_COLOR_SCHEME = ( + "dark_bg" # prints error messages for dark background terminals + ) + nb.config.NUMBA_DEBUG_NRT = 1 # Numba run time (NRT) statistics counter + nb.config.NUMBA_DEBUG_TYPEINFER = ( + 1 # print out debugging information about type inference. + ) + nb.config.NUMBA_ENABLE_PROFILING = 1 # enables profiler use + nb.config.NUMBA_DUMP_CFG = 1 # prints out a control flow diagram + nb.config.NUMBA_OPT = 0 # forums un optimized code from compilers + nb.config.NUMBA_DEBUGINFO = 1 # + nb.config.NUMBA_EXTEND_VARIABLE_LIFETIMES = ( + 1 # allows for inspection of numba variables after end of compilation + ) diff --git a/mcdc/mcdc/constant.py b/mcdc/mcdc/constant.py new file mode 100644 index 000000000..0b7130f16 --- /dev/null +++ b/mcdc/mcdc/constant.py @@ -0,0 +1,214 @@ +import math + +# Data index +TALLY = 0 + +# Tally types +TALLY_SURFACE = 0 +TALLY_COLLISION = 1 +TALLY_TRACKLENGTH = 2 + +# Tally spatial filter types +SPATIAL_FILTER_NONE = 0 +SPATIAL_FILTER_SURFACE = 1 +SPATIAL_FILTER_CELL = 2 +SPATIAL_FILTER_MESH = 3 + +# Meshes +MESH_UNIFORM = 0 +MESH_STRUCTURED = 1 + +# Tally scores +SCORE_FLUX = 0 +SCORE_DENSITY = 1 +SCORE_COLLISION = 2 +SCORE_CAPTURE = 3 +SCORE_FISSION = 4 +SCORE_NET_CURRENT = 5 +SCORE_MU_SQ = 6 +SCORE_TIME_MOMENT_FLUX = 7 +SCORE_SPACE_MOMENT_FLUX = 8 +SCORE_TIME_MOMENT_CURRENT = 9 +SCORE_SPACE_MOMENT_CURRENT = 10 +SCORE_TIME_MOMENT_MU_SQ = 11 +SCORE_SPACE_MOMENT_MU_SQ = 12 +SCORE_ENERGY_DEPOSITION = 13 + +# Boundary condition +BC_NONE = 0 +BC_VACUUM = 1 +BC_REFLECTIVE = 2 + +# Cell fill +FILL_MATERIAL = 0 +FILL_UNIVERSE = 1 +FILL_LATTICE = 2 +FILL_NONE = 3 + +# Region +REGION_HALFSPACE = 0 +REGION_INTERSECTION = 1 +REGION_UNION = 2 +REGION_COMPLEMENT = 3 +REGION_ALL = 4 + +# Surface type +SURFACE_PLANE_X = 1 +SURFACE_PLANE_Y = 2 +SURFACE_PLANE_Z = 3 +SURFACE_PLANE = 4 +SURFACE_CYLINDER_X = 5 +SURFACE_CYLINDER_Y = 6 +SURFACE_CYLINDER_Z = 7 +SURFACE_CYLINDER = 8 +SURFACE_SPHERE = 9 +SURFACE_QUADRIC = 10 +SURFACE_CONE_X = 11 +SURFACE_CONE_Y = 12 +SURFACE_CONE_Z = 13 +SURFACE_TORUS_Z = 14 + +# Boolean operator +BOOL_AND = -1 +BOOL_OR = -2 +BOOL_NOT = -3 + +# Universe +UNIVERSE_ROOT = 0 + +# Events +# The << operator represents a bitshift. +# Each event is assigned 1 << X, which is equal to 2 to the power of X. +EVENT_NONE = 1 << 0 +# Geometry events +EVENT_SURFACE_CROSSING = 1 << 1 +EVENT_LATTICE_CROSSING = 1 << 2 +EVENT_LOST = 1 << 3 +# Collision/reaction events +EVENT_COLLISION = 1 << 4 +# Miscellanies +EVENT_TIME_CENSUS = 1 << 5 +EVENT_TIME_BOUNDARY = 1 << 6 + +# Materials +MATERIAL = 0 +MATERIAL_MG = 1 +MATERIAL_ELEMENTAL = 2 + +# Reactions +NEUTRON_REACTION_TOTAL = 0 +NEUTRON_REACTION_ELASTIC_SCATTERING = 1 +NEUTRON_REACTION_CAPTURE = 2 +NEUTRON_REACTION_INELASTIC_SCATTERING = 3 +NEUTRON_REACTION_FISSION = 4 +NEUTRON_REACTION_FISSION_PROMPT = 5 +NEUTRON_REACTION_FISSION_DELAYED = 6 +ELECTRON_REACTION_TOTAL = 100 +ELECTRON_REACTION_ELASTIC_SCATTERING = 101 +ELECTRON_REACTION_ELASTIC_SMALL_ANGLE = 102 +ELECTRON_REACTION_ELASTIC_LARGE_ANGLE = 103 +ELECTRON_REACTION_IONIZATION = 104 +ELECTRON_REACTION_BREMSSTRAHLUNG = 105 +ELECTRON_REACTION_EXCITATION = 106 + +# Particle types +PARTICLE_NEUTRON = 0 +PARTICLE_ELECTRON = 1 +PARTICLE_PROTON = 2 + +# Data +DATA_NONE = 0 +DATA_TABLE = 1 +DATA_POLYNOMIAL = 2 + +# Distribution +DISTRIBUTION_NONE = 0 +DISTRIBUTION_PMF = 1 +DISTRIBUTION_TABULATED = 2 +DISTRIBUTION_MULTITABLE = 3 +DISTRIBUTION_LEVEL_SCATTERING = 4 +DISTRIBUTION_EVAPORATION = 5 +DISTRIBUTION_MAXWELLIAN = 6 +DISTRIBUTION_KALBACH_MANN = 7 +DISTRIBUTION_TABULATED_ENERGY_ANGLE = 8 +DISTRIBUTION_N_BODY = 9 + +# Anguler distribution type +ANGLE_ISOTROPIC = 0 +ANGLE_DISTRIBUTED = 1 +ANGLE_ENERGY_CORRELATED = 2 + +# Referance frame +REFERENCE_FRAME_LAB = 0 +REFERENCE_FRAME_COM = 1 + +# Interpolation law +INTERPOLATION_LINEAR = 2 +INTERPOLATION_LOG = 5 + +# Gyration raius type +GYRATION_RADIUS_ALL = 0 +GYRATION_RADIUS_INFINITE_X = 1 +GYRATION_RADIUS_INFINITE_Y = 2 +GYRATION_RADIUS_INFINITE_Z = 3 +GYRATION_RADIUS_ONLY_X = 4 +GYRATION_RADIUS_ONLY_Y = 5 +GYRATION_RADIUS_ONLY_Z = 6 + +# Population control +PCT_NONE = 0 +PCT_COMBING = 1 +PCT_COMBING_WEIGHT = 2 +PCT_SPLITTING_ROULETTE = 3 +PCT_SPLITTING_ROULETTE_WEIGHT = 4 + +# Misc. +TINY = 1e-10 +COINCIDENCE_TOLERANCE = TINY +COINCIDENCE_TOLERANCE_DIRECTION = 1e-5 +COINCIDENCE_TOLERANCE_ENERGY = 1e-5 +COINCIDENCE_TOLERANCE_TIME = TINY * 1e-2 +INF = 1e10 +PI = math.pi +PI_SQRT = math.sqrt(PI) +PI_HALF = PI / 2.0 +BANKMAX = 100 # Default maximum active bank + +# Axes +AXIS_X = 0 +AXIS_Y = 1 +AXIS_Z = 2 +AXIS_T = 3 + +# Physics +LIGHT_SPEED = 2.99792458e10 # cm/s +NEUTRON_MASS = 939.565413e6 # eV/c^2 +ELECTRON_MASS = 510.99895069e3 # eV/c^2 +BOLTZMANN_K = 8.61733326e-5 # eV/K +ELECTRON_CUTOFF_ENERGY = 100 # eV +MU_CUTOFF = 0.999999 +THERMAL_THRESHOLD_FACTOR = 400 + +# Weight Windows Methods +WW_USER = 0 +WW_PREVIOUS = 1 +# Weight Windows Modifications +WW_MIN = 0 +WW_WOLLABER = 1 + +# ====================================================================================== +# GPU settings +# ====================================================================================== + +# GPU strategies +GPU_STRATEGY_SIMPLE_ASYNC = 0 +GPU_STRATEGY_ASYNC = 0 +GPU_STRATEGY_EVENT = 1 + +# GPU async. types +GPU_ASYNC_SIMPLE = 0 + +# GPU storage types +GPU_STORAGE_SEPARATE = 0 +GPU_STORAGE_MANAGED = 1 +GPU_STORAGE_UNITED = 2 diff --git a/mcdc/mcdc/literals.py b/mcdc/mcdc/literals.py new file mode 100644 index 000000000..3cec7f37a --- /dev/null +++ b/mcdc/mcdc/literals.py @@ -0,0 +1,6 @@ +# The following will be replaced with their respective literals by +# code_factory/literals_generator.py + + +def rpn_evaluation_buffer_size(): + pass diff --git a/mcdc/mcdc/main.py b/mcdc/mcdc/main.py new file mode 100644 index 000000000..1e9ff34a0 --- /dev/null +++ b/mcdc/mcdc/main.py @@ -0,0 +1,372 @@ +# ====================================================================================== +# Run +# ====================================================================================== + + +def run(): + """ + Execute the MC/DC simulation. + + Runs the transport simulation defined by the current problem + (materials, geometry, sources, tallies, and settings). + Results are written to an HDF5 output file. + + Command-line arguments (``--N_particle``, ``--output``, etc.) override + the corresponding settings when provided. + """ + import mcdc.print_ as print_module + from mpi4py import MPI + + # TIMER: total + time_total_start = MPI.Wtime() + + # Get settings and MPI master status + from mcdc.object_.simulation import simulation as simulationPy + + settings = simulationPy.settings + master = MPI.COMM_WORLD.Get_rank() == 0 + + # ================================================================================== + # Preparation + # ================================================================================== + + # TIMER: preparation + time_prep_start = MPI.Wtime() + + # Override settings with command-line arguments + override_settings() + + # Generate the program state: + # - `simulation`: the simulation, storing fixed side data and meta data that + # describes arbitrarily-sized data + # - `data`: a long 1D array storing arbitrarily-sized data of the simulation + # NOTE: The simulation structure to be generated in a container, which is a + # a one-sized array that stores the structure. The container is needed to + # ensure proper mutability and tracking of the structure when running in + # different kinds of machines supported by the Numba compilation framework. + simulation_container, data = preparation() + simulation = simulation_container[0] + + # Print headers + if master: + print_module.print_banner() + print_module.print_configuration() + print(" Now running the particle transport...") + if settings.neutron_eigenvalue_mode: + print_module.print_eigenvalue_header(simulation) + + # TIMER: preparation + time_prep_end = MPI.Wtime() + + # ================================================================================== + # Running the simulation + # ================================================================================== + + # TIMER: simulation + time_simulation_start = MPI.Wtime() + + # Run simulation + import mcdc.transport.simulation as simulation_module + + if settings.neutron_eigenvalue_mode: + simulation_module.eigenvalue_simulation(simulation_container, data) + else: + simulation_module.fixed_source_simulation(simulation_container, data) + + # TIMER: simulation + time_simulation_end = MPI.Wtime() + + # ================================================================================== + # Working on the output + # ================================================================================== + + import mcdc.output as output_module + + # TIMER: output + time_output_start = MPI.Wtime() + + # Generate hdf5 output file + output_module.generate_output(simulation, data) + + # TIMER: output + time_output_end = MPI.Wtime() + + # Final barrier + MPI.COMM_WORLD.Barrier() + + # TIMER: total + time_total_end = MPI.Wtime() + + # Manage timers + simulation["runtime_total"] = time_total_end - time_total_start + simulation["runtime_preparation"] = time_prep_end - time_prep_start + simulation["runtime_simulation"] = time_simulation_end - time_simulation_start + simulation["runtime_output"] = time_output_end - time_output_start + output_module.create_runtime_datasets(simulation) + if master: + print_module.print_runtime(simulation) + + # ================================================================================== + # Finalizing + # ================================================================================== + + finalize(simulation) + + +# ====================================================================================== +# Preparation +# ====================================================================================== + + +def preparation(): + import math + + from mpi4py import MPI + + from mcdc.object_.simulation import simulation as simulationPy + from mcdc.object_.material import ( + Material, + MaterialMG, + set_elements_from_nuclides, + set_nuclides_from_elements, + update_fissionable_from_nuclides, + ) + + # ================================================================================== + # Adjust simulation settings as needed + # ================================================================================== + + # Get settings + settings = simulationPy.settings + + # Set appropriate time boundary + settings.time_boundary = min( + [settings.time_boundary] + [tally.time[-1] for tally in simulationPy.tallies] + ) + + # ================================================================================== + # Set material data as needed + # ================================================================================== + + # Set material compositions based on transported particles + for material in simulationPy.materials: + if not isinstance(material, Material): + continue + + if settings.neutron_transport and len(material.nuclides) == 0: + set_nuclides_from_elements(material) + + if settings.electron_transport and len(material.elements) == 0: + set_elements_from_nuclides(material) + + # Set nuclear and atomic data for transported particles + if settings.neutron_transport: + for nuclide in simulationPy.nuclides: + nuclide.set_neutron_data() + + for material in simulationPy.materials: + if isinstance(material, Material): + update_fissionable_from_nuclides(material) + + if settings.electron_transport: + for element in simulationPy.elements: + element.set_electron_data() + + # Set physics mode + if len(simulationPy.materials) == 0: + # Default physics in dummy mode + settings.neutron_multigroup_mode = True + else: + settings.neutron_multigroup_mode = isinstance( + simulationPy.materials[0], MaterialMG + ) + + # ================================================================================== + # Adjust simulation parameters as needed + # ================================================================================== + + # Reset time grid size of all tallies if census-based tally is desired + if settings.use_census_based_tally: + N_bin = settings.census_tally_frequency + for tally in simulationPy.tallies: + tally._use_census_based_tally(N_bin) + + # Normalize source probability + norm = 0.0 + for source in simulationPy.sources: + norm += source.probability + for source in simulationPy.sources: + source.probability /= norm + + # Create root universe if not defined + if len(simulationPy.universes[0].cells) == 0: + simulationPy.universes[0].cells = simulationPy.cells + + # Initial guess + simulationPy.k_eff = settings.k_init + + # Activate tally scoring for fixed-source + if not settings.neutron_eigenvalue_mode: + simulationPy.cycle_active = True + # All active eigenvalue cycle? + elif settings.N_inactive == 0: + simulationPy.cycle_active = True + + # ================================================================================== + # Set particle bank sizes + # ================================================================================== + + # Some sizes + N_particle = settings.N_particle + N_work = math.ceil(N_particle / MPI.COMM_WORLD.Get_size()) + N_census = settings.N_census + + # Determine bank size + if settings.neutron_eigenvalue_mode or N_census == 1: + settings.future_bank_buffer_ratio = 0.0 + if not settings.neutron_eigenvalue_mode and N_census == 1: + settings.census_bank_buffer_ratio = 0.0 + settings.source_bank_buffer_ratio = 0.0 + size_active = settings.active_bank_buffer + size_census = int((settings.census_bank_buffer_ratio) * N_work) + size_source = int((settings.source_bank_buffer_ratio) * N_work) + size_future = int((settings.future_bank_buffer_ratio) * N_work) + + # Set bank size + simulationPy.bank_active.size[0] = size_active + simulationPy.bank_census.size[0] = size_census + simulationPy.bank_source.size[0] = size_source + simulationPy.bank_future.size[0] = size_future + + # ================================================================================== + # Generate Numba-supported "Objects" + # ================================================================================== + + from mcdc.code_factory.numba_objects_generator import generate_numba_objects + from mcdc.code_factory.literals_generator import make_literals + + make_literals(simulationPy) + + simulation_container, data = generate_numba_objects(simulationPy) + simulation = simulation_container[0] + + # Reload mcdc getters and setters + import importlib + import mcdc.mcdc_get as mcdc_get + import mcdc.mcdc_set as mcdc_set + + importlib.reload(mcdc_get) + importlib.reload(mcdc_set) + + # ================================================================================== + # Adapt functions as needed + # ================================================================================== + + # Pick physics model + import mcdc.transport.physics as physics + + if settings.neutron_multigroup_mode: + physics.neutron.particle_speed = physics.neutron.multigroup.particle_speed + physics.neutron.macro_xs = physics.neutron.multigroup.macro_xs + physics.neutron.neutron_production_xs = ( + physics.neutron.multigroup.neutron_production_xs + ) + physics.neutron.collision = physics.neutron.multigroup.collision + + # Pick Python-version RNG if needed + import mcdc.config as config + import mcdc.transport.rng as rng + + if config.mode == "python": + rng.wrapping_add = rng.wrapping_add_python + rng.wrapping_mul = rng.wrapping_mul_python + + # TODO: Find out why the following is needed to avoid circular import + import mcdc.transport.particle_bank as particle_bank_module + + # ================================================================================== + # Source particles from file + # ================================================================================== + # TODO: Use parallel h5py, may need to compile for speed + + import h5py + + # All ranks, take turn + for i in range(simulation["mpi_size"]): + if simulation["mpi_rank"] == i: + if settings.use_source_file: + with h5py.File(settings.source_file_name, "r") as f: + # Get source particle size + N_particle = f["particles_size"][()] + + # Redistribute work + mpi.distribute_work(N_particle, simulation) + N_local = simulation["mpi_work_size"] + start = simulation["mpi_work_start"] + end = start + N_local + + # Add particles to source bank + simulation["bank_source"]["particles"][:N_local] = f["particles"][ + start:end + ] + simulation["bank_source"]["size"] = N_local + MPI.COMM_WORLD.Barrier() + + # ================================================================================== + # Finalize + # ================================================================================== + + return simulation_container, data + + +# ====================================================================================== +# Misc. +# ====================================================================================== + + +def override_settings(): + import mcdc.config as config + from mcdc.object_.simulation import simulation as simulationPy + + settings = simulationPy.settings + + if config.args.N_particle is not None: + settings.N_particle = config.args.N_particle + if config.args.N_batch is not None: + settings.N_batch = config.args.N_batch + if config.args.output is not None: + settings.output_name = config.args.output + if config.args.progress_bar is not None: + settings.use_progress_bar = config.args.progress_bar + + # GPU settings + if config.target == "gpu": + from mcdc.constant import ( + GPU_STRATEGY_ASYNC, + GPU_STRATEGY_EVENT, + GPU_STORAGE_SEPARATE, + GPU_STORAGE_MANAGED, + GPU_STORAGE_UNITED, + ) + + if config.args.gpu_strategy == "async": + settings.gpu_strategy = GPU_STRATEGY_ASYNC + elif config.args.gpu_strategy == "event": + settings.gpu_strategy = GPU_STRATEGY_EVENT + + if config.args.gpu_state_storage == "separate": + settings.gpu_storage = GPU_STORAGE_SEPARATE + elif config.args.gpu_state_storage == "managed": + settings.gpu_storage = GPU_STORAGE_MANAGED + elif config.args.gpu_state_storage == "united": + settings.gpu_storage = GPU_STORAGE_UNITED + + +def finalize(simulation): + import mcdc.config as config + + # GPU teardowns if needed + if config.target == "gpu": + from mcdc.code_factory.gpu.program_builder import teardown_gpu_program + + teardown_gpu_program(simulation) diff --git a/mcdc/mcdc/mcdc_get/__init__.py b/mcdc/mcdc/mcdc_get/__init__.py new file mode 100644 index 000000000..d3b533388 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/__init__.py @@ -0,0 +1,115 @@ +# The following is automatically generated by code_factory.py + +import mcdc.mcdc_get.particle_data as particle_data + +import mcdc.mcdc_get.particle as particle + +import mcdc.mcdc_get.cell as cell + +import mcdc.mcdc_get.lattice as lattice + +import mcdc.mcdc_get.material as material + +import mcdc.mcdc_get.tally as tally + +import mcdc.mcdc_get.universe as universe + +import mcdc.mcdc_get.data as data + +import mcdc.mcdc_get.none_data as none_data + +import mcdc.mcdc_get.polynomial_data as polynomial_data + +import mcdc.mcdc_get.table_data as table_data + +import mcdc.mcdc_get.distribution as distribution + +import mcdc.mcdc_get.evaporation_distribution as evaporation_distribution + +import mcdc.mcdc_get.kalbach_mann_distribution as kalbach_mann_distribution + +import mcdc.mcdc_get.level_scattering_distribution as level_scattering_distribution + +import mcdc.mcdc_get.maxwellian_distribution as maxwellian_distribution + +import mcdc.mcdc_get.multi_table_distribution as multi_table_distribution + +import mcdc.mcdc_get.nbody_distribution as nbody_distribution + +import mcdc.mcdc_get.none_distribution as none_distribution + +import mcdc.mcdc_get.pmf_distribution as pmf_distribution + +import mcdc.mcdc_get.tabulated_distribution as tabulated_distribution + +import mcdc.mcdc_get.tabulated_energy_angle_distribution as tabulated_energy_angle_distribution + +import mcdc.mcdc_get.electron_reaction as electron_reaction + +import mcdc.mcdc_get.electron_bremsstrahlung_reaction as electron_bremsstrahlung_reaction + +import mcdc.mcdc_get.electron_elastic_scattering_reaction as electron_elastic_scattering_reaction + +import mcdc.mcdc_get.electron_excitation_reaction as electron_excitation_reaction + +import mcdc.mcdc_get.electron_ionization_reaction as electron_ionization_reaction + +import mcdc.mcdc_get.element as element + +import mcdc.mcdc_get.gpu_meta as gpu_meta + +import mcdc.mcdc_get.native_material as native_material + +import mcdc.mcdc_get.multigroup_material as multigroup_material + +import mcdc.mcdc_get.nuclide as nuclide + +import mcdc.mcdc_get.mesh as mesh + +import mcdc.mcdc_get.structured_mesh as structured_mesh + +import mcdc.mcdc_get.uniform_mesh as uniform_mesh + +import mcdc.mcdc_get.neutron_reaction as neutron_reaction + +import mcdc.mcdc_get.neutron_capture_reaction as neutron_capture_reaction + +import mcdc.mcdc_get.neutron_elastic_scattering_reaction as neutron_elastic_scattering_reaction + +import mcdc.mcdc_get.neutron_fission_reaction as neutron_fission_reaction + +import mcdc.mcdc_get.neutron_inelastic_scattering_reaction as neutron_inelastic_scattering_reaction + +import mcdc.mcdc_get.collision_data as collision_data + +import mcdc.mcdc_get.particle_bank as particle_bank + +import mcdc.mcdc_get.settings as settings + +import mcdc.mcdc_get.implicit_capture as implicit_capture + +import mcdc.mcdc_get.population_control as population_control + +import mcdc.mcdc_get.weight_roulette as weight_roulette + +import mcdc.mcdc_get.weighted_emission as weighted_emission + +import mcdc.mcdc_get.source as source + +import mcdc.mcdc_get.surface as surface + +import mcdc.mcdc_get.surface_tally as surface_tally + +import mcdc.mcdc_get.collision_tally as collision_tally + +import mcdc.mcdc_get.tracklength_tally as tracklength_tally + +import mcdc.mcdc_get.bank_active as bank_active + +import mcdc.mcdc_get.bank_census as bank_census + +import mcdc.mcdc_get.bank_source as bank_source + +import mcdc.mcdc_get.bank_future as bank_future + +import mcdc.mcdc_get.simulation as simulation diff --git a/mcdc/mcdc/mcdc_get/bank_active.py b/mcdc/mcdc/mcdc_get/bank_active.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/bank_active.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/bank_census.py b/mcdc/mcdc/mcdc_get/bank_census.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/bank_census.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/bank_future.py b/mcdc/mcdc/mcdc_get/bank_future.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/bank_future.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/bank_source.py b/mcdc/mcdc/mcdc_get/bank_source.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/bank_source.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/cell.py b/mcdc/mcdc/mcdc_get/cell.py new file mode 100644 index 000000000..1aaa320fd --- /dev/null +++ b/mcdc/mcdc/mcdc_get/cell.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def region_RPN_tokens(index, cell, data): + offset = cell["region_RPN_tokens_offset"] + return data[offset + index] + + +@njit +def region_RPN_tokens_all(cell, data): + start = cell["region_RPN_tokens_offset"] + size = cell["region_RPN_tokens_length"] + end = start + size + return data[start:end] + + +@njit +def region_RPN_tokens_last(cell, data): + start = cell["region_RPN_tokens_offset"] + size = cell["region_RPN_tokens_length"] + end = start + size + return data[end - 1] + + +@njit +def region_RPN_tokens_chunk(start, length, cell, data): + start += cell["region_RPN_tokens_offset"] + end = start + length + return data[start:end] + + +@njit +def surface_IDs(index, cell, data): + offset = cell["surface_IDs_offset"] + return data[offset + index] + + +@njit +def surface_IDs_all(cell, data): + start = cell["surface_IDs_offset"] + size = cell["N_surface"] + end = start + size + return data[start:end] + + +@njit +def surface_IDs_last(cell, data): + start = cell["surface_IDs_offset"] + size = cell["N_surface"] + end = start + size + return data[end - 1] + + +@njit +def surface_IDs_chunk(start, length, cell, data): + start += cell["surface_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def tally_IDs(index, cell, data): + offset = cell["tally_IDs_offset"] + return data[offset + index] + + +@njit +def tally_IDs_all(cell, data): + start = cell["tally_IDs_offset"] + size = cell["N_tally"] + end = start + size + return data[start:end] + + +@njit +def tally_IDs_last(cell, data): + start = cell["tally_IDs_offset"] + size = cell["N_tally"] + end = start + size + return data[end - 1] + + +@njit +def tally_IDs_chunk(start, length, cell, data): + start += cell["tally_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/cell_tally.py b/mcdc/mcdc/mcdc_get/cell_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/cell_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/collision_data.py b/mcdc/mcdc/mcdc_get/collision_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/collision_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/collision_tally.py b/mcdc/mcdc/mcdc_get/collision_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/collision_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/data.py b/mcdc/mcdc/mcdc_get/data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/distribution.py b/mcdc/mcdc/mcdc_get/distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/electron_bremsstrahlung_reaction.py b/mcdc/mcdc/mcdc_get/electron_bremsstrahlung_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/electron_bremsstrahlung_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/electron_elastic_scattering_reaction.py b/mcdc/mcdc/mcdc_get/electron_elastic_scattering_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/electron_elastic_scattering_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/electron_excitation_reaction.py b/mcdc/mcdc/mcdc_get/electron_excitation_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/electron_excitation_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/electron_ionization_reaction.py b/mcdc/mcdc/mcdc_get/electron_ionization_reaction.py new file mode 100644 index 000000000..22775182f --- /dev/null +++ b/mcdc/mcdc/mcdc_get/electron_ionization_reaction.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def subshell_x_IDs(index, electron_ionization_reaction, data): + offset = electron_ionization_reaction["subshell_x_IDs_offset"] + return data[offset + index] + + +@njit +def subshell_x_IDs_all(electron_ionization_reaction, data): + start = electron_ionization_reaction["subshell_x_IDs_offset"] + size = electron_ionization_reaction["N_subshell_x"] + end = start + size + return data[start:end] + + +@njit +def subshell_x_IDs_last(electron_ionization_reaction, data): + start = electron_ionization_reaction["subshell_x_IDs_offset"] + size = electron_ionization_reaction["N_subshell_x"] + end = start + size + return data[end - 1] + + +@njit +def subshell_x_IDs_chunk(start, length, electron_ionization_reaction, data): + start += electron_ionization_reaction["subshell_x_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def subshell_product_IDs(index, electron_ionization_reaction, data): + offset = electron_ionization_reaction["subshell_product_IDs_offset"] + return data[offset + index] + + +@njit +def subshell_product_IDs_all(electron_ionization_reaction, data): + start = electron_ionization_reaction["subshell_product_IDs_offset"] + size = electron_ionization_reaction["N_subshell_product"] + end = start + size + return data[start:end] + + +@njit +def subshell_product_IDs_last(electron_ionization_reaction, data): + start = electron_ionization_reaction["subshell_product_IDs_offset"] + size = electron_ionization_reaction["N_subshell_product"] + end = start + size + return data[end - 1] + + +@njit +def subshell_product_IDs_chunk(start, length, electron_ionization_reaction, data): + start += electron_ionization_reaction["subshell_product_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/electron_reaction.py b/mcdc/mcdc/mcdc_get/electron_reaction.py new file mode 100644 index 000000000..30ab62a98 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/electron_reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, electron_reaction, data): + offset = electron_reaction["xs_offset"] + return data[offset + index] + + +@njit +def xs_all(electron_reaction, data): + start = electron_reaction["xs_offset"] + size = electron_reaction["xs_length"] + end = start + size + return data[start:end] + + +@njit +def xs_last(electron_reaction, data): + start = electron_reaction["xs_offset"] + size = electron_reaction["xs_length"] + end = start + size + return data[end - 1] + + +@njit +def xs_chunk(start, length, electron_reaction, data): + start += electron_reaction["xs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/element.py b/mcdc/mcdc/mcdc_get/element.py new file mode 100644 index 000000000..6d5873a2c --- /dev/null +++ b/mcdc/mcdc/mcdc_get/element.py @@ -0,0 +1,322 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def electron_xs_energy_grid(index, element, data): + offset = element["electron_xs_energy_grid_offset"] + return data[offset + index] + + +@njit +def electron_xs_energy_grid_all(element, data): + start = element["electron_xs_energy_grid_offset"] + size = element["electron_xs_energy_grid_length"] + end = start + size + return data[start:end] + + +@njit +def electron_xs_energy_grid_last(element, data): + start = element["electron_xs_energy_grid_offset"] + size = element["electron_xs_energy_grid_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_xs_energy_grid_chunk(start, length, element, data): + start += element["electron_xs_energy_grid_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_total_xs(index, element, data): + offset = element["electron_total_xs_offset"] + return data[offset + index] + + +@njit +def electron_total_xs_all(element, data): + start = element["electron_total_xs_offset"] + size = element["electron_total_xs_length"] + end = start + size + return data[start:end] + + +@njit +def electron_total_xs_last(element, data): + start = element["electron_total_xs_offset"] + size = element["electron_total_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_total_xs_chunk(start, length, element, data): + start += element["electron_total_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_ionization_xs(index, element, data): + offset = element["electron_ionization_xs_offset"] + return data[offset + index] + + +@njit +def electron_ionization_xs_all(element, data): + start = element["electron_ionization_xs_offset"] + size = element["electron_ionization_xs_length"] + end = start + size + return data[start:end] + + +@njit +def electron_ionization_xs_last(element, data): + start = element["electron_ionization_xs_offset"] + size = element["electron_ionization_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_ionization_xs_chunk(start, length, element, data): + start += element["electron_ionization_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_elastic_xs(index, element, data): + offset = element["electron_elastic_xs_offset"] + return data[offset + index] + + +@njit +def electron_elastic_xs_all(element, data): + start = element["electron_elastic_xs_offset"] + size = element["electron_elastic_xs_length"] + end = start + size + return data[start:end] + + +@njit +def electron_elastic_xs_last(element, data): + start = element["electron_elastic_xs_offset"] + size = element["electron_elastic_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_elastic_xs_chunk(start, length, element, data): + start += element["electron_elastic_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_excitation_xs(index, element, data): + offset = element["electron_excitation_xs_offset"] + return data[offset + index] + + +@njit +def electron_excitation_xs_all(element, data): + start = element["electron_excitation_xs_offset"] + size = element["electron_excitation_xs_length"] + end = start + size + return data[start:end] + + +@njit +def electron_excitation_xs_last(element, data): + start = element["electron_excitation_xs_offset"] + size = element["electron_excitation_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_excitation_xs_chunk(start, length, element, data): + start += element["electron_excitation_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_bremsstrahlung_xs(index, element, data): + offset = element["electron_bremsstrahlung_xs_offset"] + return data[offset + index] + + +@njit +def electron_bremsstrahlung_xs_all(element, data): + start = element["electron_bremsstrahlung_xs_offset"] + size = element["electron_bremsstrahlung_xs_length"] + end = start + size + return data[start:end] + + +@njit +def electron_bremsstrahlung_xs_last(element, data): + start = element["electron_bremsstrahlung_xs_offset"] + size = element["electron_bremsstrahlung_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_bremsstrahlung_xs_chunk(start, length, element, data): + start += element["electron_bremsstrahlung_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_ionization_reaction_IDs(index, element, data): + offset = element["electron_ionization_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def electron_ionization_reaction_IDs_all(element, data): + start = element["electron_ionization_reaction_IDs_offset"] + size = element["N_electron_ionization_reaction"] + end = start + size + return data[start:end] + + +@njit +def electron_ionization_reaction_IDs_last(element, data): + start = element["electron_ionization_reaction_IDs_offset"] + size = element["N_electron_ionization_reaction"] + end = start + size + return data[end - 1] + + +@njit +def electron_ionization_reaction_IDs_chunk(start, length, element, data): + start += element["electron_ionization_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_elastic_scattering_reaction_IDs(index, element, data): + offset = element["electron_elastic_scattering_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def electron_elastic_scattering_reaction_IDs_all(element, data): + start = element["electron_elastic_scattering_reaction_IDs_offset"] + size = element["N_electron_elastic_scattering_reaction"] + end = start + size + return data[start:end] + + +@njit +def electron_elastic_scattering_reaction_IDs_last(element, data): + start = element["electron_elastic_scattering_reaction_IDs_offset"] + size = element["N_electron_elastic_scattering_reaction"] + end = start + size + return data[end - 1] + + +@njit +def electron_elastic_scattering_reaction_IDs_chunk(start, length, element, data): + start += element["electron_elastic_scattering_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_excitation_reaction_IDs(index, element, data): + offset = element["electron_excitation_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def electron_excitation_reaction_IDs_all(element, data): + start = element["electron_excitation_reaction_IDs_offset"] + size = element["N_electron_excitation_reaction"] + end = start + size + return data[start:end] + + +@njit +def electron_excitation_reaction_IDs_last(element, data): + start = element["electron_excitation_reaction_IDs_offset"] + size = element["N_electron_excitation_reaction"] + end = start + size + return data[end - 1] + + +@njit +def electron_excitation_reaction_IDs_chunk(start, length, element, data): + start += element["electron_excitation_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_bremsstrahlung_reaction_IDs(index, element, data): + offset = element["electron_bremsstrahlung_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def electron_bremsstrahlung_reaction_IDs_all(element, data): + start = element["electron_bremsstrahlung_reaction_IDs_offset"] + size = element["N_electron_bremsstrahlung_reaction"] + end = start + size + return data[start:end] + + +@njit +def electron_bremsstrahlung_reaction_IDs_last(element, data): + start = element["electron_bremsstrahlung_reaction_IDs_offset"] + size = element["N_electron_bremsstrahlung_reaction"] + end = start + size + return data[end - 1] + + +@njit +def electron_bremsstrahlung_reaction_IDs_chunk(start, length, element, data): + start += element["electron_bremsstrahlung_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def electron_ionization_subshell_binding_energy(index, element, data): + offset = element["electron_ionization_subshell_binding_energy_offset"] + return data[offset + index] + + +@njit +def electron_ionization_subshell_binding_energy_all(element, data): + start = element["electron_ionization_subshell_binding_energy_offset"] + size = element["electron_ionization_subshell_binding_energy_length"] + end = start + size + return data[start:end] + + +@njit +def electron_ionization_subshell_binding_energy_last(element, data): + start = element["electron_ionization_subshell_binding_energy_offset"] + size = element["electron_ionization_subshell_binding_energy_length"] + end = start + size + return data[end - 1] + + +@njit +def electron_ionization_subshell_binding_energy_chunk(start, length, element, data): + start += element["electron_ionization_subshell_binding_energy_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/evaporation_distribution.py b/mcdc/mcdc/mcdc_get/evaporation_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/evaporation_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/global_tally.py b/mcdc/mcdc/mcdc_get/global_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/global_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/gpu_meta.py b/mcdc/mcdc/mcdc_get/gpu_meta.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/gpu_meta.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/implicit_capture.py b/mcdc/mcdc/mcdc_get/implicit_capture.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/implicit_capture.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/kalbach_mann_distribution.py b/mcdc/mcdc/mcdc_get/kalbach_mann_distribution.py new file mode 100644 index 000000000..456942a01 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/kalbach_mann_distribution.py @@ -0,0 +1,206 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def energy(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["energy_offset"] + return data[offset + index] + + +@njit +def energy_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["energy_offset"] + size = kalbach_mann_distribution["energy_length"] + end = start + size + return data[start:end] + + +@njit +def energy_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["energy_offset"] + size = kalbach_mann_distribution["energy_length"] + end = start + size + return data[end - 1] + + +@njit +def energy_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["energy_offset"] + end = start + length + return data[start:end] + + +@njit +def offset(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["offset_offset"] + return data[offset + index] + + +@njit +def offset_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["offset_offset"] + size = kalbach_mann_distribution["offset_length"] + end = start + size + return data[start:end] + + +@njit +def offset_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["offset_offset"] + size = kalbach_mann_distribution["offset_length"] + end = start + size + return data[end - 1] + + +@njit +def offset_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["offset_offset"] + end = start + length + return data[start:end] + + +@njit +def energy_out(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["energy_out_offset"] + return data[offset + index] + + +@njit +def energy_out_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["energy_out_offset"] + size = kalbach_mann_distribution["energy_out_length"] + end = start + size + return data[start:end] + + +@njit +def energy_out_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["energy_out_offset"] + size = kalbach_mann_distribution["energy_out_length"] + end = start + size + return data[end - 1] + + +@njit +def energy_out_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["energy_out_offset"] + end = start + length + return data[start:end] + + +@njit +def pdf(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["pdf_offset"] + return data[offset + index] + + +@njit +def pdf_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["pdf_offset"] + size = kalbach_mann_distribution["pdf_length"] + end = start + size + return data[start:end] + + +@njit +def pdf_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["pdf_offset"] + size = kalbach_mann_distribution["pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def pdf_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cdf(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["cdf_offset"] + return data[offset + index] + + +@njit +def cdf_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["cdf_offset"] + size = kalbach_mann_distribution["cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cdf_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["cdf_offset"] + size = kalbach_mann_distribution["cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cdf_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["cdf_offset"] + end = start + length + return data[start:end] + + +@njit +def precompound_factor(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["precompound_factor_offset"] + return data[offset + index] + + +@njit +def precompound_factor_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["precompound_factor_offset"] + size = kalbach_mann_distribution["precompound_factor_length"] + end = start + size + return data[start:end] + + +@njit +def precompound_factor_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["precompound_factor_offset"] + size = kalbach_mann_distribution["precompound_factor_length"] + end = start + size + return data[end - 1] + + +@njit +def precompound_factor_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["precompound_factor_offset"] + end = start + length + return data[start:end] + + +@njit +def angular_slope(index, kalbach_mann_distribution, data): + offset = kalbach_mann_distribution["angular_slope_offset"] + return data[offset + index] + + +@njit +def angular_slope_all(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["angular_slope_offset"] + size = kalbach_mann_distribution["angular_slope_length"] + end = start + size + return data[start:end] + + +@njit +def angular_slope_last(kalbach_mann_distribution, data): + start = kalbach_mann_distribution["angular_slope_offset"] + size = kalbach_mann_distribution["angular_slope_length"] + end = start + size + return data[end - 1] + + +@njit +def angular_slope_chunk(start, length, kalbach_mann_distribution, data): + start += kalbach_mann_distribution["angular_slope_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/lattice.py b/mcdc/mcdc/mcdc_get/lattice.py new file mode 100644 index 000000000..437f5be40 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/lattice.py @@ -0,0 +1,18 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def universe_IDs(index_1, index_2, index_3, lattice, data): + offset = lattice["universe_IDs_offset"] + stride_2 = lattice["Ny"] + stride_3 = lattice["Nz"] + return data[offset + index_1 * stride_2 * stride_3 + index_2 * stride_3 + index_3] + + +@njit +def universe_IDs_chunk(start, length, lattice, data): + start += lattice["universe_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/level_scattering_distribution.py b/mcdc/mcdc/mcdc_get/level_scattering_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/level_scattering_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/material.py b/mcdc/mcdc/mcdc_get/material.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/material.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/maxwellian_distribution.py b/mcdc/mcdc/mcdc_get/maxwellian_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/maxwellian_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/mesh.py b/mcdc/mcdc/mcdc_get/mesh.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/mesh.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/mesh_tally.py b/mcdc/mcdc/mcdc_get/mesh_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/mesh_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/multi_table_distribution.py b/mcdc/mcdc/mcdc_get/multi_table_distribution.py new file mode 100644 index 000000000..adc740bdc --- /dev/null +++ b/mcdc/mcdc/mcdc_get/multi_table_distribution.py @@ -0,0 +1,148 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def grid(index, multi_table_distribution, data): + offset = multi_table_distribution["grid_offset"] + return data[offset + index] + + +@njit +def grid_all(multi_table_distribution, data): + start = multi_table_distribution["grid_offset"] + size = multi_table_distribution["grid_length"] + end = start + size + return data[start:end] + + +@njit +def grid_last(multi_table_distribution, data): + start = multi_table_distribution["grid_offset"] + size = multi_table_distribution["grid_length"] + end = start + size + return data[end - 1] + + +@njit +def grid_chunk(start, length, multi_table_distribution, data): + start += multi_table_distribution["grid_offset"] + end = start + length + return data[start:end] + + +@njit +def offset(index, multi_table_distribution, data): + offset = multi_table_distribution["offset_offset"] + return data[offset + index] + + +@njit +def offset_all(multi_table_distribution, data): + start = multi_table_distribution["offset_offset"] + size = multi_table_distribution["offset_length"] + end = start + size + return data[start:end] + + +@njit +def offset_last(multi_table_distribution, data): + start = multi_table_distribution["offset_offset"] + size = multi_table_distribution["offset_length"] + end = start + size + return data[end - 1] + + +@njit +def offset_chunk(start, length, multi_table_distribution, data): + start += multi_table_distribution["offset_offset"] + end = start + length + return data[start:end] + + +@njit +def value(index, multi_table_distribution, data): + offset = multi_table_distribution["value_offset"] + return data[offset + index] + + +@njit +def value_all(multi_table_distribution, data): + start = multi_table_distribution["value_offset"] + size = multi_table_distribution["value_length"] + end = start + size + return data[start:end] + + +@njit +def value_last(multi_table_distribution, data): + start = multi_table_distribution["value_offset"] + size = multi_table_distribution["value_length"] + end = start + size + return data[end - 1] + + +@njit +def value_chunk(start, length, multi_table_distribution, data): + start += multi_table_distribution["value_offset"] + end = start + length + return data[start:end] + + +@njit +def pdf(index, multi_table_distribution, data): + offset = multi_table_distribution["pdf_offset"] + return data[offset + index] + + +@njit +def pdf_all(multi_table_distribution, data): + start = multi_table_distribution["pdf_offset"] + size = multi_table_distribution["pdf_length"] + end = start + size + return data[start:end] + + +@njit +def pdf_last(multi_table_distribution, data): + start = multi_table_distribution["pdf_offset"] + size = multi_table_distribution["pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def pdf_chunk(start, length, multi_table_distribution, data): + start += multi_table_distribution["pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cdf(index, multi_table_distribution, data): + offset = multi_table_distribution["cdf_offset"] + return data[offset + index] + + +@njit +def cdf_all(multi_table_distribution, data): + start = multi_table_distribution["cdf_offset"] + size = multi_table_distribution["cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cdf_last(multi_table_distribution, data): + start = multi_table_distribution["cdf_offset"] + size = multi_table_distribution["cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cdf_chunk(start, length, multi_table_distribution, data): + start += multi_table_distribution["cdf_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/multigroup_material.py b/mcdc/mcdc/mcdc_get/multigroup_material.py new file mode 100644 index 000000000..12f63912c --- /dev/null +++ b/mcdc/mcdc/mcdc_get/multigroup_material.py @@ -0,0 +1,385 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def mgxs_speed(index, multigroup_material, data): + offset = multigroup_material["mgxs_speed_offset"] + return data[offset + index] + + +@njit +def mgxs_speed_all(multigroup_material, data): + start = multigroup_material["mgxs_speed_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_speed_last(multigroup_material, data): + start = multigroup_material["mgxs_speed_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_speed_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_speed_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_decay_rate(index, multigroup_material, data): + offset = multigroup_material["mgxs_decay_rate_offset"] + return data[offset + index] + + +@njit +def mgxs_decay_rate_all(multigroup_material, data): + start = multigroup_material["mgxs_decay_rate_offset"] + size = multigroup_material["J"] + end = start + size + return data[start:end] + + +@njit +def mgxs_decay_rate_last(multigroup_material, data): + start = multigroup_material["mgxs_decay_rate_offset"] + size = multigroup_material["J"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_decay_rate_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_decay_rate_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_capture(index, multigroup_material, data): + offset = multigroup_material["mgxs_capture_offset"] + return data[offset + index] + + +@njit +def mgxs_capture_all(multigroup_material, data): + start = multigroup_material["mgxs_capture_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_capture_last(multigroup_material, data): + start = multigroup_material["mgxs_capture_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_capture_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_capture_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_scatter(index, multigroup_material, data): + offset = multigroup_material["mgxs_scatter_offset"] + return data[offset + index] + + +@njit +def mgxs_scatter_all(multigroup_material, data): + start = multigroup_material["mgxs_scatter_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_scatter_last(multigroup_material, data): + start = multigroup_material["mgxs_scatter_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_scatter_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_scatter_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_fission(index, multigroup_material, data): + offset = multigroup_material["mgxs_fission_offset"] + return data[offset + index] + + +@njit +def mgxs_fission_all(multigroup_material, data): + start = multigroup_material["mgxs_fission_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_fission_last(multigroup_material, data): + start = multigroup_material["mgxs_fission_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_fission_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_fission_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_total(index, multigroup_material, data): + offset = multigroup_material["mgxs_total_offset"] + return data[offset + index] + + +@njit +def mgxs_total_all(multigroup_material, data): + start = multigroup_material["mgxs_total_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_total_last(multigroup_material, data): + start = multigroup_material["mgxs_total_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_total_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_total_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_nu_s(index, multigroup_material, data): + offset = multigroup_material["mgxs_nu_s_offset"] + return data[offset + index] + + +@njit +def mgxs_nu_s_all(multigroup_material, data): + start = multigroup_material["mgxs_nu_s_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_nu_s_last(multigroup_material, data): + start = multigroup_material["mgxs_nu_s_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_nu_s_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_nu_s_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_nu_p(index, multigroup_material, data): + offset = multigroup_material["mgxs_nu_p_offset"] + return data[offset + index] + + +@njit +def mgxs_nu_p_all(multigroup_material, data): + start = multigroup_material["mgxs_nu_p_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_nu_p_last(multigroup_material, data): + start = multigroup_material["mgxs_nu_p_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_nu_p_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_nu_p_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_nu_d_vector(index_1, multigroup_material, data): + offset = multigroup_material["mgxs_nu_d_offset"] + stride = multigroup_material["J"] + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def mgxs_nu_d(index_1, index_2, multigroup_material, data): + offset = multigroup_material["mgxs_nu_d_offset"] + stride = multigroup_material["J"] + return data[offset + index_1 * stride + index_2] + + +@njit +def mgxs_nu_d_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_nu_d_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_nu_d_total(index, multigroup_material, data): + offset = multigroup_material["mgxs_nu_d_total_offset"] + return data[offset + index] + + +@njit +def mgxs_nu_d_total_all(multigroup_material, data): + start = multigroup_material["mgxs_nu_d_total_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_nu_d_total_last(multigroup_material, data): + start = multigroup_material["mgxs_nu_d_total_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_nu_d_total_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_nu_d_total_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_nu_f(index, multigroup_material, data): + offset = multigroup_material["mgxs_nu_f_offset"] + return data[offset + index] + + +@njit +def mgxs_nu_f_all(multigroup_material, data): + start = multigroup_material["mgxs_nu_f_offset"] + size = multigroup_material["G"] + end = start + size + return data[start:end] + + +@njit +def mgxs_nu_f_last(multigroup_material, data): + start = multigroup_material["mgxs_nu_f_offset"] + size = multigroup_material["G"] + end = start + size + return data[end - 1] + + +@njit +def mgxs_nu_f_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_nu_f_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_chi_s_vector(index_1, multigroup_material, data): + offset = multigroup_material["mgxs_chi_s_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def mgxs_chi_s(index_1, index_2, multigroup_material, data): + offset = multigroup_material["mgxs_chi_s_offset"] + stride = multigroup_material["G"] + return data[offset + index_1 * stride + index_2] + + +@njit +def mgxs_chi_s_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_chi_s_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_chi_p_vector(index_1, multigroup_material, data): + offset = multigroup_material["mgxs_chi_p_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def mgxs_chi_p(index_1, index_2, multigroup_material, data): + offset = multigroup_material["mgxs_chi_p_offset"] + stride = multigroup_material["G"] + return data[offset + index_1 * stride + index_2] + + +@njit +def mgxs_chi_p_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_chi_p_offset"] + end = start + length + return data[start:end] + + +@njit +def mgxs_chi_d_vector(index_1, multigroup_material, data): + offset = multigroup_material["mgxs_chi_d_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def mgxs_chi_d(index_1, index_2, multigroup_material, data): + offset = multigroup_material["mgxs_chi_d_offset"] + stride = multigroup_material["G"] + return data[offset + index_1 * stride + index_2] + + +@njit +def mgxs_chi_d_chunk(start, length, multigroup_material, data): + start += multigroup_material["mgxs_chi_d_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/native_material.py b/mcdc/mcdc/mcdc_get/native_material.py new file mode 100644 index 000000000..361294585 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/native_material.py @@ -0,0 +1,119 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def nuclide_IDs(index, native_material, data): + offset = native_material["nuclide_IDs_offset"] + return data[offset + index] + + +@njit +def nuclide_IDs_all(native_material, data): + start = native_material["nuclide_IDs_offset"] + size = native_material["N_nuclide"] + end = start + size + return data[start:end] + + +@njit +def nuclide_IDs_last(native_material, data): + start = native_material["nuclide_IDs_offset"] + size = native_material["N_nuclide"] + end = start + size + return data[end - 1] + + +@njit +def nuclide_IDs_chunk(start, length, native_material, data): + start += native_material["nuclide_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def element_IDs(index, native_material, data): + offset = native_material["element_IDs_offset"] + return data[offset + index] + + +@njit +def element_IDs_all(native_material, data): + start = native_material["element_IDs_offset"] + size = native_material["N_element"] + end = start + size + return data[start:end] + + +@njit +def element_IDs_last(native_material, data): + start = native_material["element_IDs_offset"] + size = native_material["N_element"] + end = start + size + return data[end - 1] + + +@njit +def element_IDs_chunk(start, length, native_material, data): + start += native_material["element_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def nuclide_densities(index, native_material, data): + offset = native_material["nuclide_densities_offset"] + return data[offset + index] + + +@njit +def nuclide_densities_all(native_material, data): + start = native_material["nuclide_densities_offset"] + size = native_material["nuclide_densities_length"] + end = start + size + return data[start:end] + + +@njit +def nuclide_densities_last(native_material, data): + start = native_material["nuclide_densities_offset"] + size = native_material["nuclide_densities_length"] + end = start + size + return data[end - 1] + + +@njit +def nuclide_densities_chunk(start, length, native_material, data): + start += native_material["nuclide_densities_offset"] + end = start + length + return data[start:end] + + +@njit +def element_densities(index, native_material, data): + offset = native_material["element_densities_offset"] + return data[offset + index] + + +@njit +def element_densities_all(native_material, data): + start = native_material["element_densities_offset"] + size = native_material["element_densities_length"] + end = start + size + return data[start:end] + + +@njit +def element_densities_last(native_material, data): + start = native_material["element_densities_offset"] + size = native_material["element_densities_length"] + end = start + size + return data[end - 1] + + +@njit +def element_densities_chunk(start, length, native_material, data): + start += native_material["element_densities_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/nbody_distribution.py b/mcdc/mcdc/mcdc_get/nbody_distribution.py new file mode 100644 index 000000000..08d5197b1 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/nbody_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, nbody_distribution, data): + offset = nbody_distribution["value_offset"] + return data[offset + index] + + +@njit +def value_all(nbody_distribution, data): + start = nbody_distribution["value_offset"] + size = nbody_distribution["value_length"] + end = start + size + return data[start:end] + + +@njit +def value_last(nbody_distribution, data): + start = nbody_distribution["value_offset"] + size = nbody_distribution["value_length"] + end = start + size + return data[end - 1] + + +@njit +def value_chunk(start, length, nbody_distribution, data): + start += nbody_distribution["value_offset"] + end = start + length + return data[start:end] + + +@njit +def pdf(index, nbody_distribution, data): + offset = nbody_distribution["pdf_offset"] + return data[offset + index] + + +@njit +def pdf_all(nbody_distribution, data): + start = nbody_distribution["pdf_offset"] + size = nbody_distribution["pdf_length"] + end = start + size + return data[start:end] + + +@njit +def pdf_last(nbody_distribution, data): + start = nbody_distribution["pdf_offset"] + size = nbody_distribution["pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def pdf_chunk(start, length, nbody_distribution, data): + start += nbody_distribution["pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cdf(index, nbody_distribution, data): + offset = nbody_distribution["cdf_offset"] + return data[offset + index] + + +@njit +def cdf_all(nbody_distribution, data): + start = nbody_distribution["cdf_offset"] + size = nbody_distribution["cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cdf_last(nbody_distribution, data): + start = nbody_distribution["cdf_offset"] + size = nbody_distribution["cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cdf_chunk(start, length, nbody_distribution, data): + start += nbody_distribution["cdf_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/neutron_capture_reaction.py b/mcdc/mcdc/mcdc_get/neutron_capture_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/neutron_capture_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/neutron_elastic_scattering_reaction.py b/mcdc/mcdc/mcdc_get/neutron_elastic_scattering_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/neutron_elastic_scattering_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/neutron_fission_reaction.py b/mcdc/mcdc/mcdc_get/neutron_fission_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/neutron_fission_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/neutron_inelastic_scattering_reaction.py b/mcdc/mcdc/mcdc_get/neutron_inelastic_scattering_reaction.py new file mode 100644 index 000000000..5cf2f0dd4 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/neutron_inelastic_scattering_reaction.py @@ -0,0 +1,84 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def spectrum_probability_grid(index, neutron_inelastic_scattering_reaction, data): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + return data[offset + index] + + +@njit +def spectrum_probability_grid_all(neutron_inelastic_scattering_reaction, data): + start = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + size = neutron_inelastic_scattering_reaction["spectrum_probability_grid_length"] + end = start + size + return data[start:end] + + +@njit +def spectrum_probability_grid_last(neutron_inelastic_scattering_reaction, data): + start = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + size = neutron_inelastic_scattering_reaction["spectrum_probability_grid_length"] + end = start + size + return data[end - 1] + + +@njit +def spectrum_probability_grid_chunk(start, length, neutron_inelastic_scattering_reaction, data): + start += neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + end = start + length + return data[start:end] + + +@njit +def spectrum_probability_vector(index_1, neutron_inelastic_scattering_reaction, data): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + stride = neutron_inelastic_scattering_reaction["N_spectrum"] + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def spectrum_probability(index_1, index_2, neutron_inelastic_scattering_reaction, data): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + stride = neutron_inelastic_scattering_reaction["N_spectrum"] + return data[offset + index_1 * stride + index_2] + + +@njit +def spectrum_probability_chunk(start, length, neutron_inelastic_scattering_reaction, data): + start += neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + end = start + length + return data[start:end] + + +@njit +def energy_spectrum_IDs(index, neutron_inelastic_scattering_reaction, data): + offset = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + return data[offset + index] + + +@njit +def energy_spectrum_IDs_all(neutron_inelastic_scattering_reaction, data): + start = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + size = neutron_inelastic_scattering_reaction["N_energy_spectrum"] + end = start + size + return data[start:end] + + +@njit +def energy_spectrum_IDs_last(neutron_inelastic_scattering_reaction, data): + start = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + size = neutron_inelastic_scattering_reaction["N_energy_spectrum"] + end = start + size + return data[end - 1] + + +@njit +def energy_spectrum_IDs_chunk(start, length, neutron_inelastic_scattering_reaction, data): + start += neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/neutron_reaction.py b/mcdc/mcdc/mcdc_get/neutron_reaction.py new file mode 100644 index 000000000..d01f98cf8 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/neutron_reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, neutron_reaction, data): + offset = neutron_reaction["xs_offset"] + return data[offset + index] + + +@njit +def xs_all(neutron_reaction, data): + start = neutron_reaction["xs_offset"] + size = neutron_reaction["xs_length"] + end = start + size + return data[start:end] + + +@njit +def xs_last(neutron_reaction, data): + start = neutron_reaction["xs_offset"] + size = neutron_reaction["xs_length"] + end = start + size + return data[end - 1] + + +@njit +def xs_chunk(start, length, neutron_reaction, data): + start += neutron_reaction["xs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/none_data.py b/mcdc/mcdc/mcdc_get/none_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/none_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/none_distribution.py b/mcdc/mcdc/mcdc_get/none_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/none_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/nuclide.py b/mcdc/mcdc/mcdc_get/nuclide.py new file mode 100644 index 000000000..af9593f10 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/nuclide.py @@ -0,0 +1,380 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def neutron_xs_energy_grid(index, nuclide, data): + offset = nuclide["neutron_xs_energy_grid_offset"] + return data[offset + index] + + +@njit +def neutron_xs_energy_grid_all(nuclide, data): + start = nuclide["neutron_xs_energy_grid_offset"] + size = nuclide["neutron_xs_energy_grid_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_xs_energy_grid_last(nuclide, data): + start = nuclide["neutron_xs_energy_grid_offset"] + size = nuclide["neutron_xs_energy_grid_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_xs_energy_grid_chunk(start, length, nuclide, data): + start += nuclide["neutron_xs_energy_grid_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_total_xs(index, nuclide, data): + offset = nuclide["neutron_total_xs_offset"] + return data[offset + index] + + +@njit +def neutron_total_xs_all(nuclide, data): + start = nuclide["neutron_total_xs_offset"] + size = nuclide["neutron_total_xs_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_total_xs_last(nuclide, data): + start = nuclide["neutron_total_xs_offset"] + size = nuclide["neutron_total_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_total_xs_chunk(start, length, nuclide, data): + start += nuclide["neutron_total_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_elastic_xs(index, nuclide, data): + offset = nuclide["neutron_elastic_xs_offset"] + return data[offset + index] + + +@njit +def neutron_elastic_xs_all(nuclide, data): + start = nuclide["neutron_elastic_xs_offset"] + size = nuclide["neutron_elastic_xs_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_elastic_xs_last(nuclide, data): + start = nuclide["neutron_elastic_xs_offset"] + size = nuclide["neutron_elastic_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_elastic_xs_chunk(start, length, nuclide, data): + start += nuclide["neutron_elastic_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_capture_xs(index, nuclide, data): + offset = nuclide["neutron_capture_xs_offset"] + return data[offset + index] + + +@njit +def neutron_capture_xs_all(nuclide, data): + start = nuclide["neutron_capture_xs_offset"] + size = nuclide["neutron_capture_xs_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_capture_xs_last(nuclide, data): + start = nuclide["neutron_capture_xs_offset"] + size = nuclide["neutron_capture_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_capture_xs_chunk(start, length, nuclide, data): + start += nuclide["neutron_capture_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_inelastic_xs(index, nuclide, data): + offset = nuclide["neutron_inelastic_xs_offset"] + return data[offset + index] + + +@njit +def neutron_inelastic_xs_all(nuclide, data): + start = nuclide["neutron_inelastic_xs_offset"] + size = nuclide["neutron_inelastic_xs_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_inelastic_xs_last(nuclide, data): + start = nuclide["neutron_inelastic_xs_offset"] + size = nuclide["neutron_inelastic_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_inelastic_xs_chunk(start, length, nuclide, data): + start += nuclide["neutron_inelastic_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_fission_xs(index, nuclide, data): + offset = nuclide["neutron_fission_xs_offset"] + return data[offset + index] + + +@njit +def neutron_fission_xs_all(nuclide, data): + start = nuclide["neutron_fission_xs_offset"] + size = nuclide["neutron_fission_xs_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_fission_xs_last(nuclide, data): + start = nuclide["neutron_fission_xs_offset"] + size = nuclide["neutron_fission_xs_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_fission_xs_chunk(start, length, nuclide, data): + start += nuclide["neutron_fission_xs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_elastic_scattering_reaction_IDs(index, nuclide, data): + offset = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def neutron_elastic_scattering_reaction_IDs_all(nuclide, data): + start = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_elastic_scattering_reaction"] + end = start + size + return data[start:end] + + +@njit +def neutron_elastic_scattering_reaction_IDs_last(nuclide, data): + start = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_elastic_scattering_reaction"] + end = start + size + return data[end - 1] + + +@njit +def neutron_elastic_scattering_reaction_IDs_chunk(start, length, nuclide, data): + start += nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_capture_reaction_IDs(index, nuclide, data): + offset = nuclide["neutron_capture_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def neutron_capture_reaction_IDs_all(nuclide, data): + start = nuclide["neutron_capture_reaction_IDs_offset"] + size = nuclide["N_neutron_capture_reaction"] + end = start + size + return data[start:end] + + +@njit +def neutron_capture_reaction_IDs_last(nuclide, data): + start = nuclide["neutron_capture_reaction_IDs_offset"] + size = nuclide["N_neutron_capture_reaction"] + end = start + size + return data[end - 1] + + +@njit +def neutron_capture_reaction_IDs_chunk(start, length, nuclide, data): + start += nuclide["neutron_capture_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_inelastic_scattering_reaction_IDs(index, nuclide, data): + offset = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def neutron_inelastic_scattering_reaction_IDs_all(nuclide, data): + start = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_inelastic_scattering_reaction"] + end = start + size + return data[start:end] + + +@njit +def neutron_inelastic_scattering_reaction_IDs_last(nuclide, data): + start = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_inelastic_scattering_reaction"] + end = start + size + return data[end - 1] + + +@njit +def neutron_inelastic_scattering_reaction_IDs_chunk(start, length, nuclide, data): + start += nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_fission_reaction_IDs(index, nuclide, data): + offset = nuclide["neutron_fission_reaction_IDs_offset"] + return data[offset + index] + + +@njit +def neutron_fission_reaction_IDs_all(nuclide, data): + start = nuclide["neutron_fission_reaction_IDs_offset"] + size = nuclide["N_neutron_fission_reaction"] + end = start + size + return data[start:end] + + +@njit +def neutron_fission_reaction_IDs_last(nuclide, data): + start = nuclide["neutron_fission_reaction_IDs_offset"] + size = nuclide["N_neutron_fission_reaction"] + end = start + size + return data[end - 1] + + +@njit +def neutron_fission_reaction_IDs_chunk(start, length, nuclide, data): + start += nuclide["neutron_fission_reaction_IDs_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_fission_delayed_fractions(index, nuclide, data): + offset = nuclide["neutron_fission_delayed_fractions_offset"] + return data[offset + index] + + +@njit +def neutron_fission_delayed_fractions_all(nuclide, data): + start = nuclide["neutron_fission_delayed_fractions_offset"] + size = nuclide["neutron_fission_delayed_fractions_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_fission_delayed_fractions_last(nuclide, data): + start = nuclide["neutron_fission_delayed_fractions_offset"] + size = nuclide["neutron_fission_delayed_fractions_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_fission_delayed_fractions_chunk(start, length, nuclide, data): + start += nuclide["neutron_fission_delayed_fractions_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_fission_delayed_decay_rates(index, nuclide, data): + offset = nuclide["neutron_fission_delayed_decay_rates_offset"] + return data[offset + index] + + +@njit +def neutron_fission_delayed_decay_rates_all(nuclide, data): + start = nuclide["neutron_fission_delayed_decay_rates_offset"] + size = nuclide["neutron_fission_delayed_decay_rates_length"] + end = start + size + return data[start:end] + + +@njit +def neutron_fission_delayed_decay_rates_last(nuclide, data): + start = nuclide["neutron_fission_delayed_decay_rates_offset"] + size = nuclide["neutron_fission_delayed_decay_rates_length"] + end = start + size + return data[end - 1] + + +@njit +def neutron_fission_delayed_decay_rates_chunk(start, length, nuclide, data): + start += nuclide["neutron_fission_delayed_decay_rates_offset"] + end = start + length + return data[start:end] + + +@njit +def neutron_fission_delayed_spectrum_IDs(index, nuclide, data): + offset = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + return data[offset + index] + + +@njit +def neutron_fission_delayed_spectrum_IDs_all(nuclide, data): + start = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + size = nuclide["N_neutron_fission_delayed_spectrum"] + end = start + size + return data[start:end] + + +@njit +def neutron_fission_delayed_spectrum_IDs_last(nuclide, data): + start = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + size = nuclide["N_neutron_fission_delayed_spectrum"] + end = start + size + return data[end - 1] + + +@njit +def neutron_fission_delayed_spectrum_IDs_chunk(start, length, nuclide, data): + start += nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/particle.py b/mcdc/mcdc/mcdc_get/particle.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/particle.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/particle_bank.py b/mcdc/mcdc/mcdc_get/particle_bank.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/particle_bank.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/particle_data.py b/mcdc/mcdc/mcdc_get/particle_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/particle_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/pmf_distribution.py b/mcdc/mcdc/mcdc_get/pmf_distribution.py new file mode 100644 index 000000000..e5f4a259d --- /dev/null +++ b/mcdc/mcdc/mcdc_get/pmf_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, pmf_distribution, data): + offset = pmf_distribution["value_offset"] + return data[offset + index] + + +@njit +def value_all(pmf_distribution, data): + start = pmf_distribution["value_offset"] + size = pmf_distribution["value_length"] + end = start + size + return data[start:end] + + +@njit +def value_last(pmf_distribution, data): + start = pmf_distribution["value_offset"] + size = pmf_distribution["value_length"] + end = start + size + return data[end - 1] + + +@njit +def value_chunk(start, length, pmf_distribution, data): + start += pmf_distribution["value_offset"] + end = start + length + return data[start:end] + + +@njit +def pmf(index, pmf_distribution, data): + offset = pmf_distribution["pmf_offset"] + return data[offset + index] + + +@njit +def pmf_all(pmf_distribution, data): + start = pmf_distribution["pmf_offset"] + size = pmf_distribution["pmf_length"] + end = start + size + return data[start:end] + + +@njit +def pmf_last(pmf_distribution, data): + start = pmf_distribution["pmf_offset"] + size = pmf_distribution["pmf_length"] + end = start + size + return data[end - 1] + + +@njit +def pmf_chunk(start, length, pmf_distribution, data): + start += pmf_distribution["pmf_offset"] + end = start + length + return data[start:end] + + +@njit +def cmf(index, pmf_distribution, data): + offset = pmf_distribution["cmf_offset"] + return data[offset + index] + + +@njit +def cmf_all(pmf_distribution, data): + start = pmf_distribution["cmf_offset"] + size = pmf_distribution["cmf_length"] + end = start + size + return data[start:end] + + +@njit +def cmf_last(pmf_distribution, data): + start = pmf_distribution["cmf_offset"] + size = pmf_distribution["cmf_length"] + end = start + size + return data[end - 1] + + +@njit +def cmf_chunk(start, length, pmf_distribution, data): + start += pmf_distribution["cmf_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/polynomial_data.py b/mcdc/mcdc/mcdc_get/polynomial_data.py new file mode 100644 index 000000000..05e4d1a9f --- /dev/null +++ b/mcdc/mcdc/mcdc_get/polynomial_data.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def coefficients(index, polynomial_data, data): + offset = polynomial_data["coefficients_offset"] + return data[offset + index] + + +@njit +def coefficients_all(polynomial_data, data): + start = polynomial_data["coefficients_offset"] + size = polynomial_data["coefficients_length"] + end = start + size + return data[start:end] + + +@njit +def coefficients_last(polynomial_data, data): + start = polynomial_data["coefficients_offset"] + size = polynomial_data["coefficients_length"] + end = start + size + return data[end - 1] + + +@njit +def coefficients_chunk(start, length, polynomial_data, data): + start += polynomial_data["coefficients_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/population_control.py b/mcdc/mcdc/mcdc_get/population_control.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/population_control.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/reaction.py b/mcdc/mcdc/mcdc_get/reaction.py new file mode 100644 index 000000000..b231d122f --- /dev/null +++ b/mcdc/mcdc/mcdc_get/reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, reaction, data): + offset = reaction["xs_offset"] + return data[offset + index] + + +@njit +def xs_all(reaction, data): + start = reaction["xs_offset"] + size = reaction["xs_length"] + end = start + size + return data[start:end] + + +@njit +def xs_last(reaction, data): + start = reaction["xs_offset"] + size = reaction["xs_length"] + end = start + size + return data[end - 1] + + +@njit +def xs_chunk(start, length, reaction, data): + start += reaction["xs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/settings.py b/mcdc/mcdc/mcdc_get/settings.py new file mode 100644 index 000000000..cb06d70e9 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/settings.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def census_time(index, settings, data): + offset = settings["census_time_offset"] + return data[offset + index] + + +@njit +def census_time_all(settings, data): + start = settings["census_time_offset"] + size = settings["census_time_length"] + end = start + size + return data[start:end] + + +@njit +def census_time_last(settings, data): + start = settings["census_time_offset"] + size = settings["census_time_length"] + end = start + size + return data[end - 1] + + +@njit +def census_time_chunk(start, length, settings, data): + start += settings["census_time_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/simulation.py b/mcdc/mcdc/mcdc_get/simulation.py new file mode 100644 index 000000000..043b87361 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/simulation.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def k_cycle(index, simulation, data): + offset = simulation["k_cycle_offset"] + return data[offset + index] + + +@njit +def k_cycle_all(simulation, data): + start = simulation["k_cycle_offset"] + size = simulation["k_cycle_length"] + end = start + size + return data[start:end] + + +@njit +def k_cycle_last(simulation, data): + start = simulation["k_cycle_offset"] + size = simulation["k_cycle_length"] + end = start + size + return data[end - 1] + + +@njit +def k_cycle_chunk(start, length, simulation, data): + start += simulation["k_cycle_offset"] + end = start + length + return data[start:end] + + +@njit +def gyration_radius(index, simulation, data): + offset = simulation["gyration_radius_offset"] + return data[offset + index] + + +@njit +def gyration_radius_all(simulation, data): + start = simulation["gyration_radius_offset"] + size = simulation["gyration_radius_length"] + end = start + size + return data[start:end] + + +@njit +def gyration_radius_last(simulation, data): + start = simulation["gyration_radius_offset"] + size = simulation["gyration_radius_length"] + end = start + size + return data[end - 1] + + +@njit +def gyration_radius_chunk(start, length, simulation, data): + start += simulation["gyration_radius_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/source.py b/mcdc/mcdc/mcdc_get/source.py new file mode 100644 index 000000000..b5be64b95 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/source.py @@ -0,0 +1,107 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def move_velocities_vector(index_1, source, data): + offset = source["move_velocities_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def move_velocities(index_1, index_2, source, data): + offset = source["move_velocities_offset"] + stride = 3 + return data[offset + index_1 * stride + index_2] + + +@njit +def move_velocities_chunk(start, length, source, data): + start += source["move_velocities_offset"] + end = start + length + return data[start:end] + + +@njit +def move_durations(index, source, data): + offset = source["move_durations_offset"] + return data[offset + index] + + +@njit +def move_durations_all(source, data): + start = source["move_durations_offset"] + size = source["N_move"] + end = start + size + return data[start:end] + + +@njit +def move_durations_last(source, data): + start = source["move_durations_offset"] + size = source["N_move"] + end = start + size + return data[end - 1] + + +@njit +def move_durations_chunk(start, length, source, data): + start += source["move_durations_offset"] + end = start + length + return data[start:end] + + +@njit +def move_time_grid(index, source, data): + offset = source["move_time_grid_offset"] + return data[offset + index] + + +@njit +def move_time_grid_all(source, data): + start = source["move_time_grid_offset"] + size = source["N_move_grid"] + end = start + size + return data[start:end] + + +@njit +def move_time_grid_last(source, data): + start = source["move_time_grid_offset"] + size = source["N_move_grid"] + end = start + size + return data[end - 1] + + +@njit +def move_time_grid_chunk(start, length, source, data): + start += source["move_time_grid_offset"] + end = start + length + return data[start:end] + + +@njit +def move_translations_vector(index_1, source, data): + offset = source["move_translations_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def move_translations(index_1, index_2, source, data): + offset = source["move_translations_offset"] + stride = 3 + return data[offset + index_1 * stride + index_2] + + +@njit +def move_translations_chunk(start, length, source, data): + start += source["move_translations_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/structured_mesh.py b/mcdc/mcdc/mcdc_get/structured_mesh.py new file mode 100644 index 000000000..971cb8942 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/structured_mesh.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def x(index, structured_mesh, data): + offset = structured_mesh["x_offset"] + return data[offset + index] + + +@njit +def x_all(structured_mesh, data): + start = structured_mesh["x_offset"] + size = structured_mesh["x_length"] + end = start + size + return data[start:end] + + +@njit +def x_last(structured_mesh, data): + start = structured_mesh["x_offset"] + size = structured_mesh["x_length"] + end = start + size + return data[end - 1] + + +@njit +def x_chunk(start, length, structured_mesh, data): + start += structured_mesh["x_offset"] + end = start + length + return data[start:end] + + +@njit +def y(index, structured_mesh, data): + offset = structured_mesh["y_offset"] + return data[offset + index] + + +@njit +def y_all(structured_mesh, data): + start = structured_mesh["y_offset"] + size = structured_mesh["y_length"] + end = start + size + return data[start:end] + + +@njit +def y_last(structured_mesh, data): + start = structured_mesh["y_offset"] + size = structured_mesh["y_length"] + end = start + size + return data[end - 1] + + +@njit +def y_chunk(start, length, structured_mesh, data): + start += structured_mesh["y_offset"] + end = start + length + return data[start:end] + + +@njit +def z(index, structured_mesh, data): + offset = structured_mesh["z_offset"] + return data[offset + index] + + +@njit +def z_all(structured_mesh, data): + start = structured_mesh["z_offset"] + size = structured_mesh["z_length"] + end = start + size + return data[start:end] + + +@njit +def z_last(structured_mesh, data): + start = structured_mesh["z_offset"] + size = structured_mesh["z_length"] + end = start + size + return data[end - 1] + + +@njit +def z_chunk(start, length, structured_mesh, data): + start += structured_mesh["z_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/surface.py b/mcdc/mcdc/mcdc_get/surface.py new file mode 100644 index 000000000..4a0230d76 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/surface.py @@ -0,0 +1,136 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def move_velocities_vector(index_1, surface, data): + offset = surface["move_velocities_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def move_velocities(index_1, index_2, surface, data): + offset = surface["move_velocities_offset"] + stride = 3 + return data[offset + index_1 * stride + index_2] + + +@njit +def move_velocities_chunk(start, length, surface, data): + start += surface["move_velocities_offset"] + end = start + length + return data[start:end] + + +@njit +def move_durations(index, surface, data): + offset = surface["move_durations_offset"] + return data[offset + index] + + +@njit +def move_durations_all(surface, data): + start = surface["move_durations_offset"] + size = surface["N_move"] + end = start + size + return data[start:end] + + +@njit +def move_durations_last(surface, data): + start = surface["move_durations_offset"] + size = surface["N_move"] + end = start + size + return data[end - 1] + + +@njit +def move_durations_chunk(start, length, surface, data): + start += surface["move_durations_offset"] + end = start + length + return data[start:end] + + +@njit +def move_time_grid(index, surface, data): + offset = surface["move_time_grid_offset"] + return data[offset + index] + + +@njit +def move_time_grid_all(surface, data): + start = surface["move_time_grid_offset"] + size = surface["N_move_grid"] + end = start + size + return data[start:end] + + +@njit +def move_time_grid_last(surface, data): + start = surface["move_time_grid_offset"] + size = surface["N_move_grid"] + end = start + size + return data[end - 1] + + +@njit +def move_time_grid_chunk(start, length, surface, data): + start += surface["move_time_grid_offset"] + end = start + length + return data[start:end] + + +@njit +def move_translations_vector(index_1, surface, data): + offset = surface["move_translations_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + return data[start:end] + + +@njit +def move_translations(index_1, index_2, surface, data): + offset = surface["move_translations_offset"] + stride = 3 + return data[offset + index_1 * stride + index_2] + + +@njit +def move_translations_chunk(start, length, surface, data): + start += surface["move_translations_offset"] + end = start + length + return data[start:end] + + +@njit +def tally_IDs(index, surface, data): + offset = surface["tally_IDs_offset"] + return data[offset + index] + + +@njit +def tally_IDs_all(surface, data): + start = surface["tally_IDs_offset"] + size = surface["N_tally"] + end = start + size + return data[start:end] + + +@njit +def tally_IDs_last(surface, data): + start = surface["tally_IDs_offset"] + size = surface["N_tally"] + end = start + size + return data[end - 1] + + +@njit +def tally_IDs_chunk(start, length, surface, data): + start += surface["tally_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/surface_tally.py b/mcdc/mcdc/mcdc_get/surface_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/surface_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/table_data.py b/mcdc/mcdc/mcdc_get/table_data.py new file mode 100644 index 000000000..854f9f357 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/table_data.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def x(index, table_data, data): + offset = table_data["x_offset"] + return data[offset + index] + + +@njit +def x_all(table_data, data): + start = table_data["x_offset"] + size = table_data["x_length"] + end = start + size + return data[start:end] + + +@njit +def x_last(table_data, data): + start = table_data["x_offset"] + size = table_data["x_length"] + end = start + size + return data[end - 1] + + +@njit +def x_chunk(start, length, table_data, data): + start += table_data["x_offset"] + end = start + length + return data[start:end] + + +@njit +def y(index, table_data, data): + offset = table_data["y_offset"] + return data[offset + index] + + +@njit +def y_all(table_data, data): + start = table_data["y_offset"] + size = table_data["y_length"] + end = start + size + return data[start:end] + + +@njit +def y_last(table_data, data): + start = table_data["y_offset"] + size = table_data["y_length"] + end = start + size + return data[end - 1] + + +@njit +def y_chunk(start, length, table_data, data): + start += table_data["y_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/tabulated_distribution.py b/mcdc/mcdc/mcdc_get/tabulated_distribution.py new file mode 100644 index 000000000..d8e356bf4 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/tabulated_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, tabulated_distribution, data): + offset = tabulated_distribution["value_offset"] + return data[offset + index] + + +@njit +def value_all(tabulated_distribution, data): + start = tabulated_distribution["value_offset"] + size = tabulated_distribution["value_length"] + end = start + size + return data[start:end] + + +@njit +def value_last(tabulated_distribution, data): + start = tabulated_distribution["value_offset"] + size = tabulated_distribution["value_length"] + end = start + size + return data[end - 1] + + +@njit +def value_chunk(start, length, tabulated_distribution, data): + start += tabulated_distribution["value_offset"] + end = start + length + return data[start:end] + + +@njit +def pdf(index, tabulated_distribution, data): + offset = tabulated_distribution["pdf_offset"] + return data[offset + index] + + +@njit +def pdf_all(tabulated_distribution, data): + start = tabulated_distribution["pdf_offset"] + size = tabulated_distribution["pdf_length"] + end = start + size + return data[start:end] + + +@njit +def pdf_last(tabulated_distribution, data): + start = tabulated_distribution["pdf_offset"] + size = tabulated_distribution["pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def pdf_chunk(start, length, tabulated_distribution, data): + start += tabulated_distribution["pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cdf(index, tabulated_distribution, data): + offset = tabulated_distribution["cdf_offset"] + return data[offset + index] + + +@njit +def cdf_all(tabulated_distribution, data): + start = tabulated_distribution["cdf_offset"] + size = tabulated_distribution["cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cdf_last(tabulated_distribution, data): + start = tabulated_distribution["cdf_offset"] + size = tabulated_distribution["cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cdf_chunk(start, length, tabulated_distribution, data): + start += tabulated_distribution["cdf_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/tabulated_energy_angle_distribution.py b/mcdc/mcdc/mcdc_get/tabulated_energy_angle_distribution.py new file mode 100644 index 000000000..aeb1fe914 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/tabulated_energy_angle_distribution.py @@ -0,0 +1,264 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def energy(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["energy_offset"] + return data[offset + index] + + +@njit +def energy_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["energy_offset"] + size = tabulated_energy_angle_distribution["energy_length"] + end = start + size + return data[start:end] + + +@njit +def energy_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["energy_offset"] + size = tabulated_energy_angle_distribution["energy_length"] + end = start + size + return data[end - 1] + + +@njit +def energy_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["energy_offset"] + end = start + length + return data[start:end] + + +@njit +def offset(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["offset_offset"] + return data[offset + index] + + +@njit +def offset_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["offset_offset"] + size = tabulated_energy_angle_distribution["offset_length"] + end = start + size + return data[start:end] + + +@njit +def offset_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["offset_offset"] + size = tabulated_energy_angle_distribution["offset_length"] + end = start + size + return data[end - 1] + + +@njit +def offset_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["offset_offset"] + end = start + length + return data[start:end] + + +@njit +def energy_out(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["energy_out_offset"] + return data[offset + index] + + +@njit +def energy_out_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["energy_out_offset"] + size = tabulated_energy_angle_distribution["energy_out_length"] + end = start + size + return data[start:end] + + +@njit +def energy_out_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["energy_out_offset"] + size = tabulated_energy_angle_distribution["energy_out_length"] + end = start + size + return data[end - 1] + + +@njit +def energy_out_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["energy_out_offset"] + end = start + length + return data[start:end] + + +@njit +def pdf(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["pdf_offset"] + return data[offset + index] + + +@njit +def pdf_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["pdf_offset"] + size = tabulated_energy_angle_distribution["pdf_length"] + end = start + size + return data[start:end] + + +@njit +def pdf_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["pdf_offset"] + size = tabulated_energy_angle_distribution["pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def pdf_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cdf(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["cdf_offset"] + return data[offset + index] + + +@njit +def cdf_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cdf_offset"] + size = tabulated_energy_angle_distribution["cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cdf_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cdf_offset"] + size = tabulated_energy_angle_distribution["cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cdf_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["cdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cosine_offset_(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["cosine_offset__offset"] + return data[offset + index] + + +@njit +def cosine_offset__all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_offset__offset"] + size = tabulated_energy_angle_distribution["cosine_offset__length"] + end = start + size + return data[start:end] + + +@njit +def cosine_offset__last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_offset__offset"] + size = tabulated_energy_angle_distribution["cosine_offset__length"] + end = start + size + return data[end - 1] + + +@njit +def cosine_offset__chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["cosine_offset__offset"] + end = start + length + return data[start:end] + + +@njit +def cosine(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["cosine_offset"] + return data[offset + index] + + +@njit +def cosine_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_offset"] + size = tabulated_energy_angle_distribution["cosine_length"] + end = start + size + return data[start:end] + + +@njit +def cosine_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_offset"] + size = tabulated_energy_angle_distribution["cosine_length"] + end = start + size + return data[end - 1] + + +@njit +def cosine_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["cosine_offset"] + end = start + length + return data[start:end] + + +@njit +def cosine_pdf(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["cosine_pdf_offset"] + return data[offset + index] + + +@njit +def cosine_pdf_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_pdf_offset"] + size = tabulated_energy_angle_distribution["cosine_pdf_length"] + end = start + size + return data[start:end] + + +@njit +def cosine_pdf_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_pdf_offset"] + size = tabulated_energy_angle_distribution["cosine_pdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cosine_pdf_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["cosine_pdf_offset"] + end = start + length + return data[start:end] + + +@njit +def cosine_cdf(index, tabulated_energy_angle_distribution, data): + offset = tabulated_energy_angle_distribution["cosine_cdf_offset"] + return data[offset + index] + + +@njit +def cosine_cdf_all(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_cdf_offset"] + size = tabulated_energy_angle_distribution["cosine_cdf_length"] + end = start + size + return data[start:end] + + +@njit +def cosine_cdf_last(tabulated_energy_angle_distribution, data): + start = tabulated_energy_angle_distribution["cosine_cdf_offset"] + size = tabulated_energy_angle_distribution["cosine_cdf_length"] + end = start + size + return data[end - 1] + + +@njit +def cosine_cdf_chunk(start, length, tabulated_energy_angle_distribution, data): + start += tabulated_energy_angle_distribution["cosine_cdf_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/tally.py b/mcdc/mcdc/mcdc_get/tally.py new file mode 100644 index 000000000..dbf02745f --- /dev/null +++ b/mcdc/mcdc/mcdc_get/tally.py @@ -0,0 +1,264 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def scores(index, tally, data): + offset = tally["scores_offset"] + return data[offset + index] + + +@njit +def scores_all(tally, data): + start = tally["scores_offset"] + size = tally["scores_length"] + end = start + size + return data[start:end] + + +@njit +def scores_last(tally, data): + start = tally["scores_offset"] + size = tally["scores_length"] + end = start + size + return data[end - 1] + + +@njit +def scores_chunk(start, length, tally, data): + start += tally["scores_offset"] + end = start + length + return data[start:end] + + +@njit +def mu(index, tally, data): + offset = tally["mu_offset"] + return data[offset + index] + + +@njit +def mu_all(tally, data): + start = tally["mu_offset"] + size = tally["mu_length"] + end = start + size + return data[start:end] + + +@njit +def mu_last(tally, data): + start = tally["mu_offset"] + size = tally["mu_length"] + end = start + size + return data[end - 1] + + +@njit +def mu_chunk(start, length, tally, data): + start += tally["mu_offset"] + end = start + length + return data[start:end] + + +@njit +def azi(index, tally, data): + offset = tally["azi_offset"] + return data[offset + index] + + +@njit +def azi_all(tally, data): + start = tally["azi_offset"] + size = tally["azi_length"] + end = start + size + return data[start:end] + + +@njit +def azi_last(tally, data): + start = tally["azi_offset"] + size = tally["azi_length"] + end = start + size + return data[end - 1] + + +@njit +def azi_chunk(start, length, tally, data): + start += tally["azi_offset"] + end = start + length + return data[start:end] + + +@njit +def energy(index, tally, data): + offset = tally["energy_offset"] + return data[offset + index] + + +@njit +def energy_all(tally, data): + start = tally["energy_offset"] + size = tally["energy_length"] + end = start + size + return data[start:end] + + +@njit +def energy_last(tally, data): + start = tally["energy_offset"] + size = tally["energy_length"] + end = start + size + return data[end - 1] + + +@njit +def energy_chunk(start, length, tally, data): + start += tally["energy_offset"] + end = start + length + return data[start:end] + + +@njit +def time(index, tally, data): + offset = tally["time_offset"] + return data[offset + index] + + +@njit +def time_all(tally, data): + start = tally["time_offset"] + size = tally["time_length"] + end = start + size + return data[start:end] + + +@njit +def time_last(tally, data): + start = tally["time_offset"] + size = tally["time_length"] + end = start + size + return data[end - 1] + + +@njit +def time_chunk(start, length, tally, data): + start += tally["time_offset"] + end = start + length + return data[start:end] + + +@njit +def bin(index, tally, data): + offset = tally["bin_offset"] + return data[offset + index] + + +@njit +def bin_all(tally, data): + start = tally["bin_offset"] + size = tally["bin_length"] + end = start + size + return data[start:end] + + +@njit +def bin_last(tally, data): + start = tally["bin_offset"] + size = tally["bin_length"] + end = start + size + return data[end - 1] + + +@njit +def bin_chunk(start, length, tally, data): + start += tally["bin_offset"] + end = start + length + return data[start:end] + + +@njit +def bin_sum(index, tally, data): + offset = tally["bin_sum_offset"] + return data[offset + index] + + +@njit +def bin_sum_all(tally, data): + start = tally["bin_sum_offset"] + size = tally["bin_sum_length"] + end = start + size + return data[start:end] + + +@njit +def bin_sum_last(tally, data): + start = tally["bin_sum_offset"] + size = tally["bin_sum_length"] + end = start + size + return data[end - 1] + + +@njit +def bin_sum_chunk(start, length, tally, data): + start += tally["bin_sum_offset"] + end = start + length + return data[start:end] + + +@njit +def bin_sum_square(index, tally, data): + offset = tally["bin_sum_square_offset"] + return data[offset + index] + + +@njit +def bin_sum_square_all(tally, data): + start = tally["bin_sum_square_offset"] + size = tally["bin_sum_square_length"] + end = start + size + return data[start:end] + + +@njit +def bin_sum_square_last(tally, data): + start = tally["bin_sum_square_offset"] + size = tally["bin_sum_square_length"] + end = start + size + return data[end - 1] + + +@njit +def bin_sum_square_chunk(start, length, tally, data): + start += tally["bin_sum_square_offset"] + end = start + length + return data[start:end] + + +@njit +def bin_shape(index, tally, data): + offset = tally["bin_shape_offset"] + return data[offset + index] + + +@njit +def bin_shape_all(tally, data): + start = tally["bin_shape_offset"] + size = tally["bin_shape_length"] + end = start + size + return data[start:end] + + +@njit +def bin_shape_last(tally, data): + start = tally["bin_shape_offset"] + size = tally["bin_shape_length"] + end = start + size + return data[end - 1] + + +@njit +def bin_shape_chunk(start, length, tally, data): + start += tally["bin_shape_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/tracklength_tally.py b/mcdc/mcdc/mcdc_get/tracklength_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/tracklength_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/uniform_mesh.py b/mcdc/mcdc/mcdc_get/uniform_mesh.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/uniform_mesh.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/universe.py b/mcdc/mcdc/mcdc_get/universe.py new file mode 100644 index 000000000..5bfcf612d --- /dev/null +++ b/mcdc/mcdc/mcdc_get/universe.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def cell_IDs(index, universe, data): + offset = universe["cell_IDs_offset"] + return data[offset + index] + + +@njit +def cell_IDs_all(universe, data): + start = universe["cell_IDs_offset"] + size = universe["N_cell"] + end = start + size + return data[start:end] + + +@njit +def cell_IDs_last(universe, data): + start = universe["cell_IDs_offset"] + size = universe["N_cell"] + end = start + size + return data[end - 1] + + +@njit +def cell_IDs_chunk(start, length, universe, data): + start += universe["cell_IDs_offset"] + end = start + length + return data[start:end] diff --git a/mcdc/mcdc/mcdc_get/weight_roulette.py b/mcdc/mcdc/mcdc_get/weight_roulette.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/weight_roulette.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_get/weighted_emission.py b/mcdc/mcdc/mcdc_get/weighted_emission.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_get/weighted_emission.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/__init__.py b/mcdc/mcdc/mcdc_set/__init__.py new file mode 100644 index 000000000..38ce0dddd --- /dev/null +++ b/mcdc/mcdc/mcdc_set/__init__.py @@ -0,0 +1,115 @@ +# The following is automatically generated by code_factory.py + +import mcdc.mcdc_set.particle_data as particle_data + +import mcdc.mcdc_set.particle as particle + +import mcdc.mcdc_set.cell as cell + +import mcdc.mcdc_set.lattice as lattice + +import mcdc.mcdc_set.material as material + +import mcdc.mcdc_set.tally as tally + +import mcdc.mcdc_set.universe as universe + +import mcdc.mcdc_set.data as data + +import mcdc.mcdc_set.none_data as none_data + +import mcdc.mcdc_set.polynomial_data as polynomial_data + +import mcdc.mcdc_set.table_data as table_data + +import mcdc.mcdc_set.distribution as distribution + +import mcdc.mcdc_set.evaporation_distribution as evaporation_distribution + +import mcdc.mcdc_set.kalbach_mann_distribution as kalbach_mann_distribution + +import mcdc.mcdc_set.level_scattering_distribution as level_scattering_distribution + +import mcdc.mcdc_set.maxwellian_distribution as maxwellian_distribution + +import mcdc.mcdc_set.multi_table_distribution as multi_table_distribution + +import mcdc.mcdc_set.nbody_distribution as nbody_distribution + +import mcdc.mcdc_set.none_distribution as none_distribution + +import mcdc.mcdc_set.pmf_distribution as pmf_distribution + +import mcdc.mcdc_set.tabulated_distribution as tabulated_distribution + +import mcdc.mcdc_set.tabulated_energy_angle_distribution as tabulated_energy_angle_distribution + +import mcdc.mcdc_set.electron_reaction as electron_reaction + +import mcdc.mcdc_set.electron_bremsstrahlung_reaction as electron_bremsstrahlung_reaction + +import mcdc.mcdc_set.electron_elastic_scattering_reaction as electron_elastic_scattering_reaction + +import mcdc.mcdc_set.electron_excitation_reaction as electron_excitation_reaction + +import mcdc.mcdc_set.electron_ionization_reaction as electron_ionization_reaction + +import mcdc.mcdc_set.element as element + +import mcdc.mcdc_set.gpu_meta as gpu_meta + +import mcdc.mcdc_set.native_material as native_material + +import mcdc.mcdc_set.multigroup_material as multigroup_material + +import mcdc.mcdc_set.nuclide as nuclide + +import mcdc.mcdc_set.mesh as mesh + +import mcdc.mcdc_set.structured_mesh as structured_mesh + +import mcdc.mcdc_set.uniform_mesh as uniform_mesh + +import mcdc.mcdc_set.neutron_reaction as neutron_reaction + +import mcdc.mcdc_set.neutron_capture_reaction as neutron_capture_reaction + +import mcdc.mcdc_set.neutron_elastic_scattering_reaction as neutron_elastic_scattering_reaction + +import mcdc.mcdc_set.neutron_fission_reaction as neutron_fission_reaction + +import mcdc.mcdc_set.neutron_inelastic_scattering_reaction as neutron_inelastic_scattering_reaction + +import mcdc.mcdc_set.collision_data as collision_data + +import mcdc.mcdc_set.particle_bank as particle_bank + +import mcdc.mcdc_set.settings as settings + +import mcdc.mcdc_set.implicit_capture as implicit_capture + +import mcdc.mcdc_set.population_control as population_control + +import mcdc.mcdc_set.weight_roulette as weight_roulette + +import mcdc.mcdc_set.weighted_emission as weighted_emission + +import mcdc.mcdc_set.source as source + +import mcdc.mcdc_set.surface as surface + +import mcdc.mcdc_set.surface_tally as surface_tally + +import mcdc.mcdc_set.collision_tally as collision_tally + +import mcdc.mcdc_set.tracklength_tally as tracklength_tally + +import mcdc.mcdc_set.bank_active as bank_active + +import mcdc.mcdc_set.bank_census as bank_census + +import mcdc.mcdc_set.bank_source as bank_source + +import mcdc.mcdc_set.bank_future as bank_future + +import mcdc.mcdc_set.simulation as simulation diff --git a/mcdc/mcdc/mcdc_set/bank_active.py b/mcdc/mcdc/mcdc_set/bank_active.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/bank_active.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/bank_census.py b/mcdc/mcdc/mcdc_set/bank_census.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/bank_census.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/bank_future.py b/mcdc/mcdc/mcdc_set/bank_future.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/bank_future.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/bank_source.py b/mcdc/mcdc/mcdc_set/bank_source.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/bank_source.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/cell.py b/mcdc/mcdc/mcdc_set/cell.py new file mode 100644 index 000000000..6fbb9a2cf --- /dev/null +++ b/mcdc/mcdc/mcdc_set/cell.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def region_RPN_tokens(index, cell, data, value): + offset = cell["region_RPN_tokens_offset"] + data[offset + index] = value + + +@njit +def region_RPN_tokens_all(cell, data, value): + start = cell["region_RPN_tokens_offset"] + size = cell["region_RPN_tokens_length"] + end = start + size + data[start:end] = value + + +@njit +def region_RPN_tokens_last(cell, data, value): + start = cell["region_RPN_tokens_offset"] + size = cell["region_RPN_tokens_length"] + end = start + size + data[end - 1] = value + + +@njit +def region_RPN_tokens_chunk(start, length, cell, data, value): + start += cell["region_RPN_tokens_offset"] + end = start + length + data[start:end] = value + + +@njit +def surface_IDs(index, cell, data, value): + offset = cell["surface_IDs_offset"] + data[offset + index] = value + + +@njit +def surface_IDs_all(cell, data, value): + start = cell["surface_IDs_offset"] + size = cell["N_surface"] + end = start + size + data[start:end] = value + + +@njit +def surface_IDs_last(cell, data, value): + start = cell["surface_IDs_offset"] + size = cell["N_surface"] + end = start + size + data[end - 1] = value + + +@njit +def surface_IDs_chunk(start, length, cell, data, value): + start += cell["surface_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def tally_IDs(index, cell, data, value): + offset = cell["tally_IDs_offset"] + data[offset + index] = value + + +@njit +def tally_IDs_all(cell, data, value): + start = cell["tally_IDs_offset"] + size = cell["N_tally"] + end = start + size + data[start:end] = value + + +@njit +def tally_IDs_last(cell, data, value): + start = cell["tally_IDs_offset"] + size = cell["N_tally"] + end = start + size + data[end - 1] = value + + +@njit +def tally_IDs_chunk(start, length, cell, data, value): + start += cell["tally_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/cell_tally.py b/mcdc/mcdc/mcdc_set/cell_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/cell_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/collision_data.py b/mcdc/mcdc/mcdc_set/collision_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/collision_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/collision_tally.py b/mcdc/mcdc/mcdc_set/collision_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/collision_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/data.py b/mcdc/mcdc/mcdc_set/data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/distribution.py b/mcdc/mcdc/mcdc_set/distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/electron_bremsstrahlung_reaction.py b/mcdc/mcdc/mcdc_set/electron_bremsstrahlung_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/electron_bremsstrahlung_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/electron_elastic_scattering_reaction.py b/mcdc/mcdc/mcdc_set/electron_elastic_scattering_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/electron_elastic_scattering_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/electron_excitation_reaction.py b/mcdc/mcdc/mcdc_set/electron_excitation_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/electron_excitation_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/electron_ionization_reaction.py b/mcdc/mcdc/mcdc_set/electron_ionization_reaction.py new file mode 100644 index 000000000..0d281038d --- /dev/null +++ b/mcdc/mcdc/mcdc_set/electron_ionization_reaction.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def subshell_x_IDs(index, electron_ionization_reaction, data, value): + offset = electron_ionization_reaction["subshell_x_IDs_offset"] + data[offset + index] = value + + +@njit +def subshell_x_IDs_all(electron_ionization_reaction, data, value): + start = electron_ionization_reaction["subshell_x_IDs_offset"] + size = electron_ionization_reaction["N_subshell_x"] + end = start + size + data[start:end] = value + + +@njit +def subshell_x_IDs_last(electron_ionization_reaction, data, value): + start = electron_ionization_reaction["subshell_x_IDs_offset"] + size = electron_ionization_reaction["N_subshell_x"] + end = start + size + data[end - 1] = value + + +@njit +def subshell_x_IDs_chunk(start, length, electron_ionization_reaction, data, value): + start += electron_ionization_reaction["subshell_x_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def subshell_product_IDs(index, electron_ionization_reaction, data, value): + offset = electron_ionization_reaction["subshell_product_IDs_offset"] + data[offset + index] = value + + +@njit +def subshell_product_IDs_all(electron_ionization_reaction, data, value): + start = electron_ionization_reaction["subshell_product_IDs_offset"] + size = electron_ionization_reaction["N_subshell_product"] + end = start + size + data[start:end] = value + + +@njit +def subshell_product_IDs_last(electron_ionization_reaction, data, value): + start = electron_ionization_reaction["subshell_product_IDs_offset"] + size = electron_ionization_reaction["N_subshell_product"] + end = start + size + data[end - 1] = value + + +@njit +def subshell_product_IDs_chunk(start, length, electron_ionization_reaction, data, value): + start += electron_ionization_reaction["subshell_product_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/electron_reaction.py b/mcdc/mcdc/mcdc_set/electron_reaction.py new file mode 100644 index 000000000..7d09596cc --- /dev/null +++ b/mcdc/mcdc/mcdc_set/electron_reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, electron_reaction, data, value): + offset = electron_reaction["xs_offset"] + data[offset + index] = value + + +@njit +def xs_all(electron_reaction, data, value): + start = electron_reaction["xs_offset"] + size = electron_reaction["xs_length"] + end = start + size + data[start:end] = value + + +@njit +def xs_last(electron_reaction, data, value): + start = electron_reaction["xs_offset"] + size = electron_reaction["xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def xs_chunk(start, length, electron_reaction, data, value): + start += electron_reaction["xs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/element.py b/mcdc/mcdc/mcdc_set/element.py new file mode 100644 index 000000000..6f696cc57 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/element.py @@ -0,0 +1,322 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def electron_xs_energy_grid(index, element, data, value): + offset = element["electron_xs_energy_grid_offset"] + data[offset + index] = value + + +@njit +def electron_xs_energy_grid_all(element, data, value): + start = element["electron_xs_energy_grid_offset"] + size = element["electron_xs_energy_grid_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_xs_energy_grid_last(element, data, value): + start = element["electron_xs_energy_grid_offset"] + size = element["electron_xs_energy_grid_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_xs_energy_grid_chunk(start, length, element, data, value): + start += element["electron_xs_energy_grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_total_xs(index, element, data, value): + offset = element["electron_total_xs_offset"] + data[offset + index] = value + + +@njit +def electron_total_xs_all(element, data, value): + start = element["electron_total_xs_offset"] + size = element["electron_total_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_total_xs_last(element, data, value): + start = element["electron_total_xs_offset"] + size = element["electron_total_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_total_xs_chunk(start, length, element, data, value): + start += element["electron_total_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_ionization_xs(index, element, data, value): + offset = element["electron_ionization_xs_offset"] + data[offset + index] = value + + +@njit +def electron_ionization_xs_all(element, data, value): + start = element["electron_ionization_xs_offset"] + size = element["electron_ionization_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_ionization_xs_last(element, data, value): + start = element["electron_ionization_xs_offset"] + size = element["electron_ionization_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_ionization_xs_chunk(start, length, element, data, value): + start += element["electron_ionization_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_elastic_xs(index, element, data, value): + offset = element["electron_elastic_xs_offset"] + data[offset + index] = value + + +@njit +def electron_elastic_xs_all(element, data, value): + start = element["electron_elastic_xs_offset"] + size = element["electron_elastic_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_elastic_xs_last(element, data, value): + start = element["electron_elastic_xs_offset"] + size = element["electron_elastic_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_elastic_xs_chunk(start, length, element, data, value): + start += element["electron_elastic_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_excitation_xs(index, element, data, value): + offset = element["electron_excitation_xs_offset"] + data[offset + index] = value + + +@njit +def electron_excitation_xs_all(element, data, value): + start = element["electron_excitation_xs_offset"] + size = element["electron_excitation_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_excitation_xs_last(element, data, value): + start = element["electron_excitation_xs_offset"] + size = element["electron_excitation_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_excitation_xs_chunk(start, length, element, data, value): + start += element["electron_excitation_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_bremsstrahlung_xs(index, element, data, value): + offset = element["electron_bremsstrahlung_xs_offset"] + data[offset + index] = value + + +@njit +def electron_bremsstrahlung_xs_all(element, data, value): + start = element["electron_bremsstrahlung_xs_offset"] + size = element["electron_bremsstrahlung_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_bremsstrahlung_xs_last(element, data, value): + start = element["electron_bremsstrahlung_xs_offset"] + size = element["electron_bremsstrahlung_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_bremsstrahlung_xs_chunk(start, length, element, data, value): + start += element["electron_bremsstrahlung_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_ionization_reaction_IDs(index, element, data, value): + offset = element["electron_ionization_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def electron_ionization_reaction_IDs_all(element, data, value): + start = element["electron_ionization_reaction_IDs_offset"] + size = element["N_electron_ionization_reaction"] + end = start + size + data[start:end] = value + + +@njit +def electron_ionization_reaction_IDs_last(element, data, value): + start = element["electron_ionization_reaction_IDs_offset"] + size = element["N_electron_ionization_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def electron_ionization_reaction_IDs_chunk(start, length, element, data, value): + start += element["electron_ionization_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_elastic_scattering_reaction_IDs(index, element, data, value): + offset = element["electron_elastic_scattering_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def electron_elastic_scattering_reaction_IDs_all(element, data, value): + start = element["electron_elastic_scattering_reaction_IDs_offset"] + size = element["N_electron_elastic_scattering_reaction"] + end = start + size + data[start:end] = value + + +@njit +def electron_elastic_scattering_reaction_IDs_last(element, data, value): + start = element["electron_elastic_scattering_reaction_IDs_offset"] + size = element["N_electron_elastic_scattering_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def electron_elastic_scattering_reaction_IDs_chunk(start, length, element, data, value): + start += element["electron_elastic_scattering_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_excitation_reaction_IDs(index, element, data, value): + offset = element["electron_excitation_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def electron_excitation_reaction_IDs_all(element, data, value): + start = element["electron_excitation_reaction_IDs_offset"] + size = element["N_electron_excitation_reaction"] + end = start + size + data[start:end] = value + + +@njit +def electron_excitation_reaction_IDs_last(element, data, value): + start = element["electron_excitation_reaction_IDs_offset"] + size = element["N_electron_excitation_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def electron_excitation_reaction_IDs_chunk(start, length, element, data, value): + start += element["electron_excitation_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_bremsstrahlung_reaction_IDs(index, element, data, value): + offset = element["electron_bremsstrahlung_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def electron_bremsstrahlung_reaction_IDs_all(element, data, value): + start = element["electron_bremsstrahlung_reaction_IDs_offset"] + size = element["N_electron_bremsstrahlung_reaction"] + end = start + size + data[start:end] = value + + +@njit +def electron_bremsstrahlung_reaction_IDs_last(element, data, value): + start = element["electron_bremsstrahlung_reaction_IDs_offset"] + size = element["N_electron_bremsstrahlung_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def electron_bremsstrahlung_reaction_IDs_chunk(start, length, element, data, value): + start += element["electron_bremsstrahlung_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def electron_ionization_subshell_binding_energy(index, element, data, value): + offset = element["electron_ionization_subshell_binding_energy_offset"] + data[offset + index] = value + + +@njit +def electron_ionization_subshell_binding_energy_all(element, data, value): + start = element["electron_ionization_subshell_binding_energy_offset"] + size = element["electron_ionization_subshell_binding_energy_length"] + end = start + size + data[start:end] = value + + +@njit +def electron_ionization_subshell_binding_energy_last(element, data, value): + start = element["electron_ionization_subshell_binding_energy_offset"] + size = element["electron_ionization_subshell_binding_energy_length"] + end = start + size + data[end - 1] = value + + +@njit +def electron_ionization_subshell_binding_energy_chunk(start, length, element, data, value): + start += element["electron_ionization_subshell_binding_energy_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/evaporation_distribution.py b/mcdc/mcdc/mcdc_set/evaporation_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/evaporation_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/global_tally.py b/mcdc/mcdc/mcdc_set/global_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/global_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/gpu_meta.py b/mcdc/mcdc/mcdc_set/gpu_meta.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/gpu_meta.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/implicit_capture.py b/mcdc/mcdc/mcdc_set/implicit_capture.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/implicit_capture.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/kalbach_mann_distribution.py b/mcdc/mcdc/mcdc_set/kalbach_mann_distribution.py new file mode 100644 index 000000000..883f15846 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/kalbach_mann_distribution.py @@ -0,0 +1,206 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def energy(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["energy_offset"] + data[offset + index] = value + + +@njit +def energy_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["energy_offset"] + size = kalbach_mann_distribution["energy_length"] + end = start + size + data[start:end] = value + + +@njit +def energy_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["energy_offset"] + size = kalbach_mann_distribution["energy_length"] + end = start + size + data[end - 1] = value + + +@njit +def energy_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["energy_offset"] + end = start + length + data[start:end] = value + + +@njit +def offset(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["offset_offset"] + data[offset + index] = value + + +@njit +def offset_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["offset_offset"] + size = kalbach_mann_distribution["offset_length"] + end = start + size + data[start:end] = value + + +@njit +def offset_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["offset_offset"] + size = kalbach_mann_distribution["offset_length"] + end = start + size + data[end - 1] = value + + +@njit +def offset_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["offset_offset"] + end = start + length + data[start:end] = value + + +@njit +def energy_out(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["energy_out_offset"] + data[offset + index] = value + + +@njit +def energy_out_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["energy_out_offset"] + size = kalbach_mann_distribution["energy_out_length"] + end = start + size + data[start:end] = value + + +@njit +def energy_out_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["energy_out_offset"] + size = kalbach_mann_distribution["energy_out_length"] + end = start + size + data[end - 1] = value + + +@njit +def energy_out_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["energy_out_offset"] + end = start + length + data[start:end] = value + + +@njit +def pdf(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["pdf_offset"] + data[offset + index] = value + + +@njit +def pdf_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["pdf_offset"] + size = kalbach_mann_distribution["pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def pdf_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["pdf_offset"] + size = kalbach_mann_distribution["pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pdf_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cdf(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["cdf_offset"] + data[offset + index] = value + + +@njit +def cdf_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["cdf_offset"] + size = kalbach_mann_distribution["cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cdf_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["cdf_offset"] + size = kalbach_mann_distribution["cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cdf_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["cdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def precompound_factor(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["precompound_factor_offset"] + data[offset + index] = value + + +@njit +def precompound_factor_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["precompound_factor_offset"] + size = kalbach_mann_distribution["precompound_factor_length"] + end = start + size + data[start:end] = value + + +@njit +def precompound_factor_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["precompound_factor_offset"] + size = kalbach_mann_distribution["precompound_factor_length"] + end = start + size + data[end - 1] = value + + +@njit +def precompound_factor_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["precompound_factor_offset"] + end = start + length + data[start:end] = value + + +@njit +def angular_slope(index, kalbach_mann_distribution, data, value): + offset = kalbach_mann_distribution["angular_slope_offset"] + data[offset + index] = value + + +@njit +def angular_slope_all(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["angular_slope_offset"] + size = kalbach_mann_distribution["angular_slope_length"] + end = start + size + data[start:end] = value + + +@njit +def angular_slope_last(kalbach_mann_distribution, data, value): + start = kalbach_mann_distribution["angular_slope_offset"] + size = kalbach_mann_distribution["angular_slope_length"] + end = start + size + data[end - 1] = value + + +@njit +def angular_slope_chunk(start, length, kalbach_mann_distribution, data, value): + start += kalbach_mann_distribution["angular_slope_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/lattice.py b/mcdc/mcdc/mcdc_set/lattice.py new file mode 100644 index 000000000..18a2f9076 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/lattice.py @@ -0,0 +1,18 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def universe_IDs(index_1, index_2, index_3, lattice, data, value): + offset = lattice["universe_IDs_offset"] + stride_2 = lattice["Ny"] + stride_3 = lattice["Nz"] + data[offset + index_1 * stride_2 * stride_3 + index_2 * stride_3 + index_3] = value + + +@njit +def universe_IDs_chunk(start, length, lattice, data, value): + start += lattice["universe_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/level_scattering_distribution.py b/mcdc/mcdc/mcdc_set/level_scattering_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/level_scattering_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/material.py b/mcdc/mcdc/mcdc_set/material.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/material.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/maxwellian_distribution.py b/mcdc/mcdc/mcdc_set/maxwellian_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/maxwellian_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/mesh.py b/mcdc/mcdc/mcdc_set/mesh.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/mesh.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/mesh_tally.py b/mcdc/mcdc/mcdc_set/mesh_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/mesh_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/multi_table_distribution.py b/mcdc/mcdc/mcdc_set/multi_table_distribution.py new file mode 100644 index 000000000..ad00d4019 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/multi_table_distribution.py @@ -0,0 +1,148 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def grid(index, multi_table_distribution, data, value): + offset = multi_table_distribution["grid_offset"] + data[offset + index] = value + + +@njit +def grid_all(multi_table_distribution, data, value): + start = multi_table_distribution["grid_offset"] + size = multi_table_distribution["grid_length"] + end = start + size + data[start:end] = value + + +@njit +def grid_last(multi_table_distribution, data, value): + start = multi_table_distribution["grid_offset"] + size = multi_table_distribution["grid_length"] + end = start + size + data[end - 1] = value + + +@njit +def grid_chunk(start, length, multi_table_distribution, data, value): + start += multi_table_distribution["grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def offset(index, multi_table_distribution, data, value): + offset = multi_table_distribution["offset_offset"] + data[offset + index] = value + + +@njit +def offset_all(multi_table_distribution, data, value): + start = multi_table_distribution["offset_offset"] + size = multi_table_distribution["offset_length"] + end = start + size + data[start:end] = value + + +@njit +def offset_last(multi_table_distribution, data, value): + start = multi_table_distribution["offset_offset"] + size = multi_table_distribution["offset_length"] + end = start + size + data[end - 1] = value + + +@njit +def offset_chunk(start, length, multi_table_distribution, data, value): + start += multi_table_distribution["offset_offset"] + end = start + length + data[start:end] = value + + +@njit +def value(index, multi_table_distribution, data, value): + offset = multi_table_distribution["value_offset"] + data[offset + index] = value + + +@njit +def value_all(multi_table_distribution, data, value): + start = multi_table_distribution["value_offset"] + size = multi_table_distribution["value_length"] + end = start + size + data[start:end] = value + + +@njit +def value_last(multi_table_distribution, data, value): + start = multi_table_distribution["value_offset"] + size = multi_table_distribution["value_length"] + end = start + size + data[end - 1] = value + + +@njit +def value_chunk(start, length, multi_table_distribution, data, value): + start += multi_table_distribution["value_offset"] + end = start + length + data[start:end] = value + + +@njit +def pdf(index, multi_table_distribution, data, value): + offset = multi_table_distribution["pdf_offset"] + data[offset + index] = value + + +@njit +def pdf_all(multi_table_distribution, data, value): + start = multi_table_distribution["pdf_offset"] + size = multi_table_distribution["pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def pdf_last(multi_table_distribution, data, value): + start = multi_table_distribution["pdf_offset"] + size = multi_table_distribution["pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pdf_chunk(start, length, multi_table_distribution, data, value): + start += multi_table_distribution["pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cdf(index, multi_table_distribution, data, value): + offset = multi_table_distribution["cdf_offset"] + data[offset + index] = value + + +@njit +def cdf_all(multi_table_distribution, data, value): + start = multi_table_distribution["cdf_offset"] + size = multi_table_distribution["cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cdf_last(multi_table_distribution, data, value): + start = multi_table_distribution["cdf_offset"] + size = multi_table_distribution["cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cdf_chunk(start, length, multi_table_distribution, data, value): + start += multi_table_distribution["cdf_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/multigroup_material.py b/mcdc/mcdc/mcdc_set/multigroup_material.py new file mode 100644 index 000000000..67224a700 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/multigroup_material.py @@ -0,0 +1,385 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def mgxs_speed(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_speed_offset"] + data[offset + index] = value + + +@njit +def mgxs_speed_all(multigroup_material, data, value): + start = multigroup_material["mgxs_speed_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_speed_last(multigroup_material, data, value): + start = multigroup_material["mgxs_speed_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_speed_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_speed_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_decay_rate(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_decay_rate_offset"] + data[offset + index] = value + + +@njit +def mgxs_decay_rate_all(multigroup_material, data, value): + start = multigroup_material["mgxs_decay_rate_offset"] + size = multigroup_material["J"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_decay_rate_last(multigroup_material, data, value): + start = multigroup_material["mgxs_decay_rate_offset"] + size = multigroup_material["J"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_decay_rate_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_decay_rate_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_capture(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_capture_offset"] + data[offset + index] = value + + +@njit +def mgxs_capture_all(multigroup_material, data, value): + start = multigroup_material["mgxs_capture_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_capture_last(multigroup_material, data, value): + start = multigroup_material["mgxs_capture_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_capture_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_capture_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_scatter(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_scatter_offset"] + data[offset + index] = value + + +@njit +def mgxs_scatter_all(multigroup_material, data, value): + start = multigroup_material["mgxs_scatter_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_scatter_last(multigroup_material, data, value): + start = multigroup_material["mgxs_scatter_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_scatter_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_scatter_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_fission(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_fission_offset"] + data[offset + index] = value + + +@njit +def mgxs_fission_all(multigroup_material, data, value): + start = multigroup_material["mgxs_fission_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_fission_last(multigroup_material, data, value): + start = multigroup_material["mgxs_fission_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_fission_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_fission_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_total(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_total_offset"] + data[offset + index] = value + + +@njit +def mgxs_total_all(multigroup_material, data, value): + start = multigroup_material["mgxs_total_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_total_last(multigroup_material, data, value): + start = multigroup_material["mgxs_total_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_total_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_total_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_nu_s(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_s_offset"] + data[offset + index] = value + + +@njit +def mgxs_nu_s_all(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_s_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_nu_s_last(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_s_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_nu_s_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_nu_s_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_nu_p(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_p_offset"] + data[offset + index] = value + + +@njit +def mgxs_nu_p_all(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_p_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_nu_p_last(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_p_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_nu_p_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_nu_p_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_nu_d_vector(index_1, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_d_offset"] + stride = multigroup_material["J"] + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def mgxs_nu_d(index_1, index_2, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_d_offset"] + stride = multigroup_material["J"] + data[offset + index_1 * stride + index_2] = value + + +@njit +def mgxs_nu_d_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_nu_d_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_nu_d_total(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_d_total_offset"] + data[offset + index] = value + + +@njit +def mgxs_nu_d_total_all(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_d_total_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_nu_d_total_last(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_d_total_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_nu_d_total_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_nu_d_total_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_nu_f(index, multigroup_material, data, value): + offset = multigroup_material["mgxs_nu_f_offset"] + data[offset + index] = value + + +@njit +def mgxs_nu_f_all(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_f_offset"] + size = multigroup_material["G"] + end = start + size + data[start:end] = value + + +@njit +def mgxs_nu_f_last(multigroup_material, data, value): + start = multigroup_material["mgxs_nu_f_offset"] + size = multigroup_material["G"] + end = start + size + data[end - 1] = value + + +@njit +def mgxs_nu_f_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_nu_f_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_chi_s_vector(index_1, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_s_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def mgxs_chi_s(index_1, index_2, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_s_offset"] + stride = multigroup_material["G"] + data[offset + index_1 * stride + index_2] = value + + +@njit +def mgxs_chi_s_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_chi_s_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_chi_p_vector(index_1, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_p_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def mgxs_chi_p(index_1, index_2, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_p_offset"] + stride = multigroup_material["G"] + data[offset + index_1 * stride + index_2] = value + + +@njit +def mgxs_chi_p_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_chi_p_offset"] + end = start + length + data[start:end] = value + + +@njit +def mgxs_chi_d_vector(index_1, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_d_offset"] + stride = multigroup_material["G"] + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def mgxs_chi_d(index_1, index_2, multigroup_material, data, value): + offset = multigroup_material["mgxs_chi_d_offset"] + stride = multigroup_material["G"] + data[offset + index_1 * stride + index_2] = value + + +@njit +def mgxs_chi_d_chunk(start, length, multigroup_material, data, value): + start += multigroup_material["mgxs_chi_d_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/native_material.py b/mcdc/mcdc/mcdc_set/native_material.py new file mode 100644 index 000000000..303ffb474 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/native_material.py @@ -0,0 +1,119 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def nuclide_IDs(index, native_material, data, value): + offset = native_material["nuclide_IDs_offset"] + data[offset + index] = value + + +@njit +def nuclide_IDs_all(native_material, data, value): + start = native_material["nuclide_IDs_offset"] + size = native_material["N_nuclide"] + end = start + size + data[start:end] = value + + +@njit +def nuclide_IDs_last(native_material, data, value): + start = native_material["nuclide_IDs_offset"] + size = native_material["N_nuclide"] + end = start + size + data[end - 1] = value + + +@njit +def nuclide_IDs_chunk(start, length, native_material, data, value): + start += native_material["nuclide_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def element_IDs(index, native_material, data, value): + offset = native_material["element_IDs_offset"] + data[offset + index] = value + + +@njit +def element_IDs_all(native_material, data, value): + start = native_material["element_IDs_offset"] + size = native_material["N_element"] + end = start + size + data[start:end] = value + + +@njit +def element_IDs_last(native_material, data, value): + start = native_material["element_IDs_offset"] + size = native_material["N_element"] + end = start + size + data[end - 1] = value + + +@njit +def element_IDs_chunk(start, length, native_material, data, value): + start += native_material["element_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def nuclide_densities(index, native_material, data, value): + offset = native_material["nuclide_densities_offset"] + data[offset + index] = value + + +@njit +def nuclide_densities_all(native_material, data, value): + start = native_material["nuclide_densities_offset"] + size = native_material["nuclide_densities_length"] + end = start + size + data[start:end] = value + + +@njit +def nuclide_densities_last(native_material, data, value): + start = native_material["nuclide_densities_offset"] + size = native_material["nuclide_densities_length"] + end = start + size + data[end - 1] = value + + +@njit +def nuclide_densities_chunk(start, length, native_material, data, value): + start += native_material["nuclide_densities_offset"] + end = start + length + data[start:end] = value + + +@njit +def element_densities(index, native_material, data, value): + offset = native_material["element_densities_offset"] + data[offset + index] = value + + +@njit +def element_densities_all(native_material, data, value): + start = native_material["element_densities_offset"] + size = native_material["element_densities_length"] + end = start + size + data[start:end] = value + + +@njit +def element_densities_last(native_material, data, value): + start = native_material["element_densities_offset"] + size = native_material["element_densities_length"] + end = start + size + data[end - 1] = value + + +@njit +def element_densities_chunk(start, length, native_material, data, value): + start += native_material["element_densities_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/nbody_distribution.py b/mcdc/mcdc/mcdc_set/nbody_distribution.py new file mode 100644 index 000000000..1539d143c --- /dev/null +++ b/mcdc/mcdc/mcdc_set/nbody_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, nbody_distribution, data, value): + offset = nbody_distribution["value_offset"] + data[offset + index] = value + + +@njit +def value_all(nbody_distribution, data, value): + start = nbody_distribution["value_offset"] + size = nbody_distribution["value_length"] + end = start + size + data[start:end] = value + + +@njit +def value_last(nbody_distribution, data, value): + start = nbody_distribution["value_offset"] + size = nbody_distribution["value_length"] + end = start + size + data[end - 1] = value + + +@njit +def value_chunk(start, length, nbody_distribution, data, value): + start += nbody_distribution["value_offset"] + end = start + length + data[start:end] = value + + +@njit +def pdf(index, nbody_distribution, data, value): + offset = nbody_distribution["pdf_offset"] + data[offset + index] = value + + +@njit +def pdf_all(nbody_distribution, data, value): + start = nbody_distribution["pdf_offset"] + size = nbody_distribution["pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def pdf_last(nbody_distribution, data, value): + start = nbody_distribution["pdf_offset"] + size = nbody_distribution["pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pdf_chunk(start, length, nbody_distribution, data, value): + start += nbody_distribution["pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cdf(index, nbody_distribution, data, value): + offset = nbody_distribution["cdf_offset"] + data[offset + index] = value + + +@njit +def cdf_all(nbody_distribution, data, value): + start = nbody_distribution["cdf_offset"] + size = nbody_distribution["cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cdf_last(nbody_distribution, data, value): + start = nbody_distribution["cdf_offset"] + size = nbody_distribution["cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cdf_chunk(start, length, nbody_distribution, data, value): + start += nbody_distribution["cdf_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/neutron_capture_reaction.py b/mcdc/mcdc/mcdc_set/neutron_capture_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/neutron_capture_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/neutron_elastic_scattering_reaction.py b/mcdc/mcdc/mcdc_set/neutron_elastic_scattering_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/neutron_elastic_scattering_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/neutron_fission_reaction.py b/mcdc/mcdc/mcdc_set/neutron_fission_reaction.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/neutron_fission_reaction.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/neutron_inelastic_scattering_reaction.py b/mcdc/mcdc/mcdc_set/neutron_inelastic_scattering_reaction.py new file mode 100644 index 000000000..81468be2b --- /dev/null +++ b/mcdc/mcdc/mcdc_set/neutron_inelastic_scattering_reaction.py @@ -0,0 +1,84 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def spectrum_probability_grid(index, neutron_inelastic_scattering_reaction, data, value): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + data[offset + index] = value + + +@njit +def spectrum_probability_grid_all(neutron_inelastic_scattering_reaction, data, value): + start = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + size = neutron_inelastic_scattering_reaction["spectrum_probability_grid_length"] + end = start + size + data[start:end] = value + + +@njit +def spectrum_probability_grid_last(neutron_inelastic_scattering_reaction, data, value): + start = neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + size = neutron_inelastic_scattering_reaction["spectrum_probability_grid_length"] + end = start + size + data[end - 1] = value + + +@njit +def spectrum_probability_grid_chunk(start, length, neutron_inelastic_scattering_reaction, data, value): + start += neutron_inelastic_scattering_reaction["spectrum_probability_grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def spectrum_probability_vector(index_1, neutron_inelastic_scattering_reaction, data, value): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + stride = neutron_inelastic_scattering_reaction["N_spectrum"] + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def spectrum_probability(index_1, index_2, neutron_inelastic_scattering_reaction, data, value): + offset = neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + stride = neutron_inelastic_scattering_reaction["N_spectrum"] + data[offset + index_1 * stride + index_2] = value + + +@njit +def spectrum_probability_chunk(start, length, neutron_inelastic_scattering_reaction, data, value): + start += neutron_inelastic_scattering_reaction["spectrum_probability_offset"] + end = start + length + data[start:end] = value + + +@njit +def energy_spectrum_IDs(index, neutron_inelastic_scattering_reaction, data, value): + offset = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + data[offset + index] = value + + +@njit +def energy_spectrum_IDs_all(neutron_inelastic_scattering_reaction, data, value): + start = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + size = neutron_inelastic_scattering_reaction["N_energy_spectrum"] + end = start + size + data[start:end] = value + + +@njit +def energy_spectrum_IDs_last(neutron_inelastic_scattering_reaction, data, value): + start = neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + size = neutron_inelastic_scattering_reaction["N_energy_spectrum"] + end = start + size + data[end - 1] = value + + +@njit +def energy_spectrum_IDs_chunk(start, length, neutron_inelastic_scattering_reaction, data, value): + start += neutron_inelastic_scattering_reaction["energy_spectrum_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/neutron_reaction.py b/mcdc/mcdc/mcdc_set/neutron_reaction.py new file mode 100644 index 000000000..089e383a6 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/neutron_reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, neutron_reaction, data, value): + offset = neutron_reaction["xs_offset"] + data[offset + index] = value + + +@njit +def xs_all(neutron_reaction, data, value): + start = neutron_reaction["xs_offset"] + size = neutron_reaction["xs_length"] + end = start + size + data[start:end] = value + + +@njit +def xs_last(neutron_reaction, data, value): + start = neutron_reaction["xs_offset"] + size = neutron_reaction["xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def xs_chunk(start, length, neutron_reaction, data, value): + start += neutron_reaction["xs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/none_data.py b/mcdc/mcdc/mcdc_set/none_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/none_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/none_distribution.py b/mcdc/mcdc/mcdc_set/none_distribution.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/none_distribution.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/nuclide.py b/mcdc/mcdc/mcdc_set/nuclide.py new file mode 100644 index 000000000..257d62580 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/nuclide.py @@ -0,0 +1,380 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def neutron_xs_energy_grid(index, nuclide, data, value): + offset = nuclide["neutron_xs_energy_grid_offset"] + data[offset + index] = value + + +@njit +def neutron_xs_energy_grid_all(nuclide, data, value): + start = nuclide["neutron_xs_energy_grid_offset"] + size = nuclide["neutron_xs_energy_grid_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_xs_energy_grid_last(nuclide, data, value): + start = nuclide["neutron_xs_energy_grid_offset"] + size = nuclide["neutron_xs_energy_grid_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_xs_energy_grid_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_xs_energy_grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_total_xs(index, nuclide, data, value): + offset = nuclide["neutron_total_xs_offset"] + data[offset + index] = value + + +@njit +def neutron_total_xs_all(nuclide, data, value): + start = nuclide["neutron_total_xs_offset"] + size = nuclide["neutron_total_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_total_xs_last(nuclide, data, value): + start = nuclide["neutron_total_xs_offset"] + size = nuclide["neutron_total_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_total_xs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_total_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_elastic_xs(index, nuclide, data, value): + offset = nuclide["neutron_elastic_xs_offset"] + data[offset + index] = value + + +@njit +def neutron_elastic_xs_all(nuclide, data, value): + start = nuclide["neutron_elastic_xs_offset"] + size = nuclide["neutron_elastic_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_elastic_xs_last(nuclide, data, value): + start = nuclide["neutron_elastic_xs_offset"] + size = nuclide["neutron_elastic_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_elastic_xs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_elastic_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_capture_xs(index, nuclide, data, value): + offset = nuclide["neutron_capture_xs_offset"] + data[offset + index] = value + + +@njit +def neutron_capture_xs_all(nuclide, data, value): + start = nuclide["neutron_capture_xs_offset"] + size = nuclide["neutron_capture_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_capture_xs_last(nuclide, data, value): + start = nuclide["neutron_capture_xs_offset"] + size = nuclide["neutron_capture_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_capture_xs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_capture_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_inelastic_xs(index, nuclide, data, value): + offset = nuclide["neutron_inelastic_xs_offset"] + data[offset + index] = value + + +@njit +def neutron_inelastic_xs_all(nuclide, data, value): + start = nuclide["neutron_inelastic_xs_offset"] + size = nuclide["neutron_inelastic_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_inelastic_xs_last(nuclide, data, value): + start = nuclide["neutron_inelastic_xs_offset"] + size = nuclide["neutron_inelastic_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_inelastic_xs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_inelastic_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_fission_xs(index, nuclide, data, value): + offset = nuclide["neutron_fission_xs_offset"] + data[offset + index] = value + + +@njit +def neutron_fission_xs_all(nuclide, data, value): + start = nuclide["neutron_fission_xs_offset"] + size = nuclide["neutron_fission_xs_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_fission_xs_last(nuclide, data, value): + start = nuclide["neutron_fission_xs_offset"] + size = nuclide["neutron_fission_xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_fission_xs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_fission_xs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_elastic_scattering_reaction_IDs(index, nuclide, data, value): + offset = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def neutron_elastic_scattering_reaction_IDs_all(nuclide, data, value): + start = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_elastic_scattering_reaction"] + end = start + size + data[start:end] = value + + +@njit +def neutron_elastic_scattering_reaction_IDs_last(nuclide, data, value): + start = nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_elastic_scattering_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_elastic_scattering_reaction_IDs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_elastic_scattering_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_capture_reaction_IDs(index, nuclide, data, value): + offset = nuclide["neutron_capture_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def neutron_capture_reaction_IDs_all(nuclide, data, value): + start = nuclide["neutron_capture_reaction_IDs_offset"] + size = nuclide["N_neutron_capture_reaction"] + end = start + size + data[start:end] = value + + +@njit +def neutron_capture_reaction_IDs_last(nuclide, data, value): + start = nuclide["neutron_capture_reaction_IDs_offset"] + size = nuclide["N_neutron_capture_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_capture_reaction_IDs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_capture_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_inelastic_scattering_reaction_IDs(index, nuclide, data, value): + offset = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def neutron_inelastic_scattering_reaction_IDs_all(nuclide, data, value): + start = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_inelastic_scattering_reaction"] + end = start + size + data[start:end] = value + + +@njit +def neutron_inelastic_scattering_reaction_IDs_last(nuclide, data, value): + start = nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + size = nuclide["N_neutron_inelastic_scattering_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_inelastic_scattering_reaction_IDs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_inelastic_scattering_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_fission_reaction_IDs(index, nuclide, data, value): + offset = nuclide["neutron_fission_reaction_IDs_offset"] + data[offset + index] = value + + +@njit +def neutron_fission_reaction_IDs_all(nuclide, data, value): + start = nuclide["neutron_fission_reaction_IDs_offset"] + size = nuclide["N_neutron_fission_reaction"] + end = start + size + data[start:end] = value + + +@njit +def neutron_fission_reaction_IDs_last(nuclide, data, value): + start = nuclide["neutron_fission_reaction_IDs_offset"] + size = nuclide["N_neutron_fission_reaction"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_fission_reaction_IDs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_fission_reaction_IDs_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_fission_delayed_fractions(index, nuclide, data, value): + offset = nuclide["neutron_fission_delayed_fractions_offset"] + data[offset + index] = value + + +@njit +def neutron_fission_delayed_fractions_all(nuclide, data, value): + start = nuclide["neutron_fission_delayed_fractions_offset"] + size = nuclide["neutron_fission_delayed_fractions_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_fission_delayed_fractions_last(nuclide, data, value): + start = nuclide["neutron_fission_delayed_fractions_offset"] + size = nuclide["neutron_fission_delayed_fractions_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_fission_delayed_fractions_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_fission_delayed_fractions_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_fission_delayed_decay_rates(index, nuclide, data, value): + offset = nuclide["neutron_fission_delayed_decay_rates_offset"] + data[offset + index] = value + + +@njit +def neutron_fission_delayed_decay_rates_all(nuclide, data, value): + start = nuclide["neutron_fission_delayed_decay_rates_offset"] + size = nuclide["neutron_fission_delayed_decay_rates_length"] + end = start + size + data[start:end] = value + + +@njit +def neutron_fission_delayed_decay_rates_last(nuclide, data, value): + start = nuclide["neutron_fission_delayed_decay_rates_offset"] + size = nuclide["neutron_fission_delayed_decay_rates_length"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_fission_delayed_decay_rates_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_fission_delayed_decay_rates_offset"] + end = start + length + data[start:end] = value + + +@njit +def neutron_fission_delayed_spectrum_IDs(index, nuclide, data, value): + offset = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + data[offset + index] = value + + +@njit +def neutron_fission_delayed_spectrum_IDs_all(nuclide, data, value): + start = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + size = nuclide["N_neutron_fission_delayed_spectrum"] + end = start + size + data[start:end] = value + + +@njit +def neutron_fission_delayed_spectrum_IDs_last(nuclide, data, value): + start = nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + size = nuclide["N_neutron_fission_delayed_spectrum"] + end = start + size + data[end - 1] = value + + +@njit +def neutron_fission_delayed_spectrum_IDs_chunk(start, length, nuclide, data, value): + start += nuclide["neutron_fission_delayed_spectrum_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/particle.py b/mcdc/mcdc/mcdc_set/particle.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/particle.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/particle_bank.py b/mcdc/mcdc/mcdc_set/particle_bank.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/particle_bank.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/particle_data.py b/mcdc/mcdc/mcdc_set/particle_data.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/particle_data.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/pmf_distribution.py b/mcdc/mcdc/mcdc_set/pmf_distribution.py new file mode 100644 index 000000000..aa4fe04e2 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/pmf_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, pmf_distribution, data, value): + offset = pmf_distribution["value_offset"] + data[offset + index] = value + + +@njit +def value_all(pmf_distribution, data, value): + start = pmf_distribution["value_offset"] + size = pmf_distribution["value_length"] + end = start + size + data[start:end] = value + + +@njit +def value_last(pmf_distribution, data, value): + start = pmf_distribution["value_offset"] + size = pmf_distribution["value_length"] + end = start + size + data[end - 1] = value + + +@njit +def value_chunk(start, length, pmf_distribution, data, value): + start += pmf_distribution["value_offset"] + end = start + length + data[start:end] = value + + +@njit +def pmf(index, pmf_distribution, data, value): + offset = pmf_distribution["pmf_offset"] + data[offset + index] = value + + +@njit +def pmf_all(pmf_distribution, data, value): + start = pmf_distribution["pmf_offset"] + size = pmf_distribution["pmf_length"] + end = start + size + data[start:end] = value + + +@njit +def pmf_last(pmf_distribution, data, value): + start = pmf_distribution["pmf_offset"] + size = pmf_distribution["pmf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pmf_chunk(start, length, pmf_distribution, data, value): + start += pmf_distribution["pmf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cmf(index, pmf_distribution, data, value): + offset = pmf_distribution["cmf_offset"] + data[offset + index] = value + + +@njit +def cmf_all(pmf_distribution, data, value): + start = pmf_distribution["cmf_offset"] + size = pmf_distribution["cmf_length"] + end = start + size + data[start:end] = value + + +@njit +def cmf_last(pmf_distribution, data, value): + start = pmf_distribution["cmf_offset"] + size = pmf_distribution["cmf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cmf_chunk(start, length, pmf_distribution, data, value): + start += pmf_distribution["cmf_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/polynomial_data.py b/mcdc/mcdc/mcdc_set/polynomial_data.py new file mode 100644 index 000000000..cf058425d --- /dev/null +++ b/mcdc/mcdc/mcdc_set/polynomial_data.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def coefficients(index, polynomial_data, data, value): + offset = polynomial_data["coefficients_offset"] + data[offset + index] = value + + +@njit +def coefficients_all(polynomial_data, data, value): + start = polynomial_data["coefficients_offset"] + size = polynomial_data["coefficients_length"] + end = start + size + data[start:end] = value + + +@njit +def coefficients_last(polynomial_data, data, value): + start = polynomial_data["coefficients_offset"] + size = polynomial_data["coefficients_length"] + end = start + size + data[end - 1] = value + + +@njit +def coefficients_chunk(start, length, polynomial_data, data, value): + start += polynomial_data["coefficients_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/population_control.py b/mcdc/mcdc/mcdc_set/population_control.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/population_control.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/reaction.py b/mcdc/mcdc/mcdc_set/reaction.py new file mode 100644 index 000000000..3b29d18e3 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/reaction.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def xs(index, reaction, data, value): + offset = reaction["xs_offset"] + data[offset + index] = value + + +@njit +def xs_all(reaction, data, value): + start = reaction["xs_offset"] + size = reaction["xs_length"] + end = start + size + data[start:end] = value + + +@njit +def xs_last(reaction, data, value): + start = reaction["xs_offset"] + size = reaction["xs_length"] + end = start + size + data[end - 1] = value + + +@njit +def xs_chunk(start, length, reaction, data, value): + start += reaction["xs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/settings.py b/mcdc/mcdc/mcdc_set/settings.py new file mode 100644 index 000000000..7c876f2c2 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/settings.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def census_time(index, settings, data, value): + offset = settings["census_time_offset"] + data[offset + index] = value + + +@njit +def census_time_all(settings, data, value): + start = settings["census_time_offset"] + size = settings["census_time_length"] + end = start + size + data[start:end] = value + + +@njit +def census_time_last(settings, data, value): + start = settings["census_time_offset"] + size = settings["census_time_length"] + end = start + size + data[end - 1] = value + + +@njit +def census_time_chunk(start, length, settings, data, value): + start += settings["census_time_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/simulation.py b/mcdc/mcdc/mcdc_set/simulation.py new file mode 100644 index 000000000..fc4b2b24e --- /dev/null +++ b/mcdc/mcdc/mcdc_set/simulation.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def k_cycle(index, simulation, data, value): + offset = simulation["k_cycle_offset"] + data[offset + index] = value + + +@njit +def k_cycle_all(simulation, data, value): + start = simulation["k_cycle_offset"] + size = simulation["k_cycle_length"] + end = start + size + data[start:end] = value + + +@njit +def k_cycle_last(simulation, data, value): + start = simulation["k_cycle_offset"] + size = simulation["k_cycle_length"] + end = start + size + data[end - 1] = value + + +@njit +def k_cycle_chunk(start, length, simulation, data, value): + start += simulation["k_cycle_offset"] + end = start + length + data[start:end] = value + + +@njit +def gyration_radius(index, simulation, data, value): + offset = simulation["gyration_radius_offset"] + data[offset + index] = value + + +@njit +def gyration_radius_all(simulation, data, value): + start = simulation["gyration_radius_offset"] + size = simulation["gyration_radius_length"] + end = start + size + data[start:end] = value + + +@njit +def gyration_radius_last(simulation, data, value): + start = simulation["gyration_radius_offset"] + size = simulation["gyration_radius_length"] + end = start + size + data[end - 1] = value + + +@njit +def gyration_radius_chunk(start, length, simulation, data, value): + start += simulation["gyration_radius_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/source.py b/mcdc/mcdc/mcdc_set/source.py new file mode 100644 index 000000000..c4bb69f02 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/source.py @@ -0,0 +1,107 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def move_velocities_vector(index_1, source, data, value): + offset = source["move_velocities_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def move_velocities(index_1, index_2, source, data, value): + offset = source["move_velocities_offset"] + stride = 3 + data[offset + index_1 * stride + index_2] = value + + +@njit +def move_velocities_chunk(start, length, source, data, value): + start += source["move_velocities_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_durations(index, source, data, value): + offset = source["move_durations_offset"] + data[offset + index] = value + + +@njit +def move_durations_all(source, data, value): + start = source["move_durations_offset"] + size = source["N_move"] + end = start + size + data[start:end] = value + + +@njit +def move_durations_last(source, data, value): + start = source["move_durations_offset"] + size = source["N_move"] + end = start + size + data[end - 1] = value + + +@njit +def move_durations_chunk(start, length, source, data, value): + start += source["move_durations_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_time_grid(index, source, data, value): + offset = source["move_time_grid_offset"] + data[offset + index] = value + + +@njit +def move_time_grid_all(source, data, value): + start = source["move_time_grid_offset"] + size = source["N_move_grid"] + end = start + size + data[start:end] = value + + +@njit +def move_time_grid_last(source, data, value): + start = source["move_time_grid_offset"] + size = source["N_move_grid"] + end = start + size + data[end - 1] = value + + +@njit +def move_time_grid_chunk(start, length, source, data, value): + start += source["move_time_grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_translations_vector(index_1, source, data, value): + offset = source["move_translations_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def move_translations(index_1, index_2, source, data, value): + offset = source["move_translations_offset"] + stride = 3 + data[offset + index_1 * stride + index_2] = value + + +@njit +def move_translations_chunk(start, length, source, data, value): + start += source["move_translations_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/structured_mesh.py b/mcdc/mcdc/mcdc_set/structured_mesh.py new file mode 100644 index 000000000..7825e32f8 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/structured_mesh.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def x(index, structured_mesh, data, value): + offset = structured_mesh["x_offset"] + data[offset + index] = value + + +@njit +def x_all(structured_mesh, data, value): + start = structured_mesh["x_offset"] + size = structured_mesh["x_length"] + end = start + size + data[start:end] = value + + +@njit +def x_last(structured_mesh, data, value): + start = structured_mesh["x_offset"] + size = structured_mesh["x_length"] + end = start + size + data[end - 1] = value + + +@njit +def x_chunk(start, length, structured_mesh, data, value): + start += structured_mesh["x_offset"] + end = start + length + data[start:end] = value + + +@njit +def y(index, structured_mesh, data, value): + offset = structured_mesh["y_offset"] + data[offset + index] = value + + +@njit +def y_all(structured_mesh, data, value): + start = structured_mesh["y_offset"] + size = structured_mesh["y_length"] + end = start + size + data[start:end] = value + + +@njit +def y_last(structured_mesh, data, value): + start = structured_mesh["y_offset"] + size = structured_mesh["y_length"] + end = start + size + data[end - 1] = value + + +@njit +def y_chunk(start, length, structured_mesh, data, value): + start += structured_mesh["y_offset"] + end = start + length + data[start:end] = value + + +@njit +def z(index, structured_mesh, data, value): + offset = structured_mesh["z_offset"] + data[offset + index] = value + + +@njit +def z_all(structured_mesh, data, value): + start = structured_mesh["z_offset"] + size = structured_mesh["z_length"] + end = start + size + data[start:end] = value + + +@njit +def z_last(structured_mesh, data, value): + start = structured_mesh["z_offset"] + size = structured_mesh["z_length"] + end = start + size + data[end - 1] = value + + +@njit +def z_chunk(start, length, structured_mesh, data, value): + start += structured_mesh["z_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/surface.py b/mcdc/mcdc/mcdc_set/surface.py new file mode 100644 index 000000000..5ade2f533 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/surface.py @@ -0,0 +1,136 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def move_velocities_vector(index_1, surface, data, value): + offset = surface["move_velocities_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def move_velocities(index_1, index_2, surface, data, value): + offset = surface["move_velocities_offset"] + stride = 3 + data[offset + index_1 * stride + index_2] = value + + +@njit +def move_velocities_chunk(start, length, surface, data, value): + start += surface["move_velocities_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_durations(index, surface, data, value): + offset = surface["move_durations_offset"] + data[offset + index] = value + + +@njit +def move_durations_all(surface, data, value): + start = surface["move_durations_offset"] + size = surface["N_move"] + end = start + size + data[start:end] = value + + +@njit +def move_durations_last(surface, data, value): + start = surface["move_durations_offset"] + size = surface["N_move"] + end = start + size + data[end - 1] = value + + +@njit +def move_durations_chunk(start, length, surface, data, value): + start += surface["move_durations_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_time_grid(index, surface, data, value): + offset = surface["move_time_grid_offset"] + data[offset + index] = value + + +@njit +def move_time_grid_all(surface, data, value): + start = surface["move_time_grid_offset"] + size = surface["N_move_grid"] + end = start + size + data[start:end] = value + + +@njit +def move_time_grid_last(surface, data, value): + start = surface["move_time_grid_offset"] + size = surface["N_move_grid"] + end = start + size + data[end - 1] = value + + +@njit +def move_time_grid_chunk(start, length, surface, data, value): + start += surface["move_time_grid_offset"] + end = start + length + data[start:end] = value + + +@njit +def move_translations_vector(index_1, surface, data, value): + offset = surface["move_translations_offset"] + stride = 3 + start = offset + index_1 * stride + end = start + stride + data[start:end] - value + + +@njit +def move_translations(index_1, index_2, surface, data, value): + offset = surface["move_translations_offset"] + stride = 3 + data[offset + index_1 * stride + index_2] = value + + +@njit +def move_translations_chunk(start, length, surface, data, value): + start += surface["move_translations_offset"] + end = start + length + data[start:end] = value + + +@njit +def tally_IDs(index, surface, data, value): + offset = surface["tally_IDs_offset"] + data[offset + index] = value + + +@njit +def tally_IDs_all(surface, data, value): + start = surface["tally_IDs_offset"] + size = surface["N_tally"] + end = start + size + data[start:end] = value + + +@njit +def tally_IDs_last(surface, data, value): + start = surface["tally_IDs_offset"] + size = surface["N_tally"] + end = start + size + data[end - 1] = value + + +@njit +def tally_IDs_chunk(start, length, surface, data, value): + start += surface["tally_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/surface_tally.py b/mcdc/mcdc/mcdc_set/surface_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/surface_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/table_data.py b/mcdc/mcdc/mcdc_set/table_data.py new file mode 100644 index 000000000..16c422171 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/table_data.py @@ -0,0 +1,61 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def x(index, table_data, data, value): + offset = table_data["x_offset"] + data[offset + index] = value + + +@njit +def x_all(table_data, data, value): + start = table_data["x_offset"] + size = table_data["x_length"] + end = start + size + data[start:end] = value + + +@njit +def x_last(table_data, data, value): + start = table_data["x_offset"] + size = table_data["x_length"] + end = start + size + data[end - 1] = value + + +@njit +def x_chunk(start, length, table_data, data, value): + start += table_data["x_offset"] + end = start + length + data[start:end] = value + + +@njit +def y(index, table_data, data, value): + offset = table_data["y_offset"] + data[offset + index] = value + + +@njit +def y_all(table_data, data, value): + start = table_data["y_offset"] + size = table_data["y_length"] + end = start + size + data[start:end] = value + + +@njit +def y_last(table_data, data, value): + start = table_data["y_offset"] + size = table_data["y_length"] + end = start + size + data[end - 1] = value + + +@njit +def y_chunk(start, length, table_data, data, value): + start += table_data["y_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/tabulated_distribution.py b/mcdc/mcdc/mcdc_set/tabulated_distribution.py new file mode 100644 index 000000000..91bd5cfcd --- /dev/null +++ b/mcdc/mcdc/mcdc_set/tabulated_distribution.py @@ -0,0 +1,90 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def value(index, tabulated_distribution, data, value): + offset = tabulated_distribution["value_offset"] + data[offset + index] = value + + +@njit +def value_all(tabulated_distribution, data, value): + start = tabulated_distribution["value_offset"] + size = tabulated_distribution["value_length"] + end = start + size + data[start:end] = value + + +@njit +def value_last(tabulated_distribution, data, value): + start = tabulated_distribution["value_offset"] + size = tabulated_distribution["value_length"] + end = start + size + data[end - 1] = value + + +@njit +def value_chunk(start, length, tabulated_distribution, data, value): + start += tabulated_distribution["value_offset"] + end = start + length + data[start:end] = value + + +@njit +def pdf(index, tabulated_distribution, data, value): + offset = tabulated_distribution["pdf_offset"] + data[offset + index] = value + + +@njit +def pdf_all(tabulated_distribution, data, value): + start = tabulated_distribution["pdf_offset"] + size = tabulated_distribution["pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def pdf_last(tabulated_distribution, data, value): + start = tabulated_distribution["pdf_offset"] + size = tabulated_distribution["pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pdf_chunk(start, length, tabulated_distribution, data, value): + start += tabulated_distribution["pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cdf(index, tabulated_distribution, data, value): + offset = tabulated_distribution["cdf_offset"] + data[offset + index] = value + + +@njit +def cdf_all(tabulated_distribution, data, value): + start = tabulated_distribution["cdf_offset"] + size = tabulated_distribution["cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cdf_last(tabulated_distribution, data, value): + start = tabulated_distribution["cdf_offset"] + size = tabulated_distribution["cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cdf_chunk(start, length, tabulated_distribution, data, value): + start += tabulated_distribution["cdf_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/tabulated_energy_angle_distribution.py b/mcdc/mcdc/mcdc_set/tabulated_energy_angle_distribution.py new file mode 100644 index 000000000..e25747d14 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/tabulated_energy_angle_distribution.py @@ -0,0 +1,264 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def energy(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["energy_offset"] + data[offset + index] = value + + +@njit +def energy_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["energy_offset"] + size = tabulated_energy_angle_distribution["energy_length"] + end = start + size + data[start:end] = value + + +@njit +def energy_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["energy_offset"] + size = tabulated_energy_angle_distribution["energy_length"] + end = start + size + data[end - 1] = value + + +@njit +def energy_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["energy_offset"] + end = start + length + data[start:end] = value + + +@njit +def offset(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["offset_offset"] + data[offset + index] = value + + +@njit +def offset_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["offset_offset"] + size = tabulated_energy_angle_distribution["offset_length"] + end = start + size + data[start:end] = value + + +@njit +def offset_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["offset_offset"] + size = tabulated_energy_angle_distribution["offset_length"] + end = start + size + data[end - 1] = value + + +@njit +def offset_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["offset_offset"] + end = start + length + data[start:end] = value + + +@njit +def energy_out(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["energy_out_offset"] + data[offset + index] = value + + +@njit +def energy_out_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["energy_out_offset"] + size = tabulated_energy_angle_distribution["energy_out_length"] + end = start + size + data[start:end] = value + + +@njit +def energy_out_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["energy_out_offset"] + size = tabulated_energy_angle_distribution["energy_out_length"] + end = start + size + data[end - 1] = value + + +@njit +def energy_out_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["energy_out_offset"] + end = start + length + data[start:end] = value + + +@njit +def pdf(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["pdf_offset"] + data[offset + index] = value + + +@njit +def pdf_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["pdf_offset"] + size = tabulated_energy_angle_distribution["pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def pdf_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["pdf_offset"] + size = tabulated_energy_angle_distribution["pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def pdf_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cdf(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["cdf_offset"] + data[offset + index] = value + + +@njit +def cdf_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cdf_offset"] + size = tabulated_energy_angle_distribution["cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cdf_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cdf_offset"] + size = tabulated_energy_angle_distribution["cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cdf_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["cdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cosine_offset_(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["cosine_offset__offset"] + data[offset + index] = value + + +@njit +def cosine_offset__all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_offset__offset"] + size = tabulated_energy_angle_distribution["cosine_offset__length"] + end = start + size + data[start:end] = value + + +@njit +def cosine_offset__last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_offset__offset"] + size = tabulated_energy_angle_distribution["cosine_offset__length"] + end = start + size + data[end - 1] = value + + +@njit +def cosine_offset__chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["cosine_offset__offset"] + end = start + length + data[start:end] = value + + +@njit +def cosine(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["cosine_offset"] + data[offset + index] = value + + +@njit +def cosine_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_offset"] + size = tabulated_energy_angle_distribution["cosine_length"] + end = start + size + data[start:end] = value + + +@njit +def cosine_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_offset"] + size = tabulated_energy_angle_distribution["cosine_length"] + end = start + size + data[end - 1] = value + + +@njit +def cosine_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["cosine_offset"] + end = start + length + data[start:end] = value + + +@njit +def cosine_pdf(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["cosine_pdf_offset"] + data[offset + index] = value + + +@njit +def cosine_pdf_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_pdf_offset"] + size = tabulated_energy_angle_distribution["cosine_pdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cosine_pdf_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_pdf_offset"] + size = tabulated_energy_angle_distribution["cosine_pdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cosine_pdf_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["cosine_pdf_offset"] + end = start + length + data[start:end] = value + + +@njit +def cosine_cdf(index, tabulated_energy_angle_distribution, data, value): + offset = tabulated_energy_angle_distribution["cosine_cdf_offset"] + data[offset + index] = value + + +@njit +def cosine_cdf_all(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_cdf_offset"] + size = tabulated_energy_angle_distribution["cosine_cdf_length"] + end = start + size + data[start:end] = value + + +@njit +def cosine_cdf_last(tabulated_energy_angle_distribution, data, value): + start = tabulated_energy_angle_distribution["cosine_cdf_offset"] + size = tabulated_energy_angle_distribution["cosine_cdf_length"] + end = start + size + data[end - 1] = value + + +@njit +def cosine_cdf_chunk(start, length, tabulated_energy_angle_distribution, data, value): + start += tabulated_energy_angle_distribution["cosine_cdf_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/tally.py b/mcdc/mcdc/mcdc_set/tally.py new file mode 100644 index 000000000..d8daaf548 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/tally.py @@ -0,0 +1,264 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def scores(index, tally, data, value): + offset = tally["scores_offset"] + data[offset + index] = value + + +@njit +def scores_all(tally, data, value): + start = tally["scores_offset"] + size = tally["scores_length"] + end = start + size + data[start:end] = value + + +@njit +def scores_last(tally, data, value): + start = tally["scores_offset"] + size = tally["scores_length"] + end = start + size + data[end - 1] = value + + +@njit +def scores_chunk(start, length, tally, data, value): + start += tally["scores_offset"] + end = start + length + data[start:end] = value + + +@njit +def mu(index, tally, data, value): + offset = tally["mu_offset"] + data[offset + index] = value + + +@njit +def mu_all(tally, data, value): + start = tally["mu_offset"] + size = tally["mu_length"] + end = start + size + data[start:end] = value + + +@njit +def mu_last(tally, data, value): + start = tally["mu_offset"] + size = tally["mu_length"] + end = start + size + data[end - 1] = value + + +@njit +def mu_chunk(start, length, tally, data, value): + start += tally["mu_offset"] + end = start + length + data[start:end] = value + + +@njit +def azi(index, tally, data, value): + offset = tally["azi_offset"] + data[offset + index] = value + + +@njit +def azi_all(tally, data, value): + start = tally["azi_offset"] + size = tally["azi_length"] + end = start + size + data[start:end] = value + + +@njit +def azi_last(tally, data, value): + start = tally["azi_offset"] + size = tally["azi_length"] + end = start + size + data[end - 1] = value + + +@njit +def azi_chunk(start, length, tally, data, value): + start += tally["azi_offset"] + end = start + length + data[start:end] = value + + +@njit +def energy(index, tally, data, value): + offset = tally["energy_offset"] + data[offset + index] = value + + +@njit +def energy_all(tally, data, value): + start = tally["energy_offset"] + size = tally["energy_length"] + end = start + size + data[start:end] = value + + +@njit +def energy_last(tally, data, value): + start = tally["energy_offset"] + size = tally["energy_length"] + end = start + size + data[end - 1] = value + + +@njit +def energy_chunk(start, length, tally, data, value): + start += tally["energy_offset"] + end = start + length + data[start:end] = value + + +@njit +def time(index, tally, data, value): + offset = tally["time_offset"] + data[offset + index] = value + + +@njit +def time_all(tally, data, value): + start = tally["time_offset"] + size = tally["time_length"] + end = start + size + data[start:end] = value + + +@njit +def time_last(tally, data, value): + start = tally["time_offset"] + size = tally["time_length"] + end = start + size + data[end - 1] = value + + +@njit +def time_chunk(start, length, tally, data, value): + start += tally["time_offset"] + end = start + length + data[start:end] = value + + +@njit +def bin(index, tally, data, value): + offset = tally["bin_offset"] + data[offset + index] = value + + +@njit +def bin_all(tally, data, value): + start = tally["bin_offset"] + size = tally["bin_length"] + end = start + size + data[start:end] = value + + +@njit +def bin_last(tally, data, value): + start = tally["bin_offset"] + size = tally["bin_length"] + end = start + size + data[end - 1] = value + + +@njit +def bin_chunk(start, length, tally, data, value): + start += tally["bin_offset"] + end = start + length + data[start:end] = value + + +@njit +def bin_sum(index, tally, data, value): + offset = tally["bin_sum_offset"] + data[offset + index] = value + + +@njit +def bin_sum_all(tally, data, value): + start = tally["bin_sum_offset"] + size = tally["bin_sum_length"] + end = start + size + data[start:end] = value + + +@njit +def bin_sum_last(tally, data, value): + start = tally["bin_sum_offset"] + size = tally["bin_sum_length"] + end = start + size + data[end - 1] = value + + +@njit +def bin_sum_chunk(start, length, tally, data, value): + start += tally["bin_sum_offset"] + end = start + length + data[start:end] = value + + +@njit +def bin_sum_square(index, tally, data, value): + offset = tally["bin_sum_square_offset"] + data[offset + index] = value + + +@njit +def bin_sum_square_all(tally, data, value): + start = tally["bin_sum_square_offset"] + size = tally["bin_sum_square_length"] + end = start + size + data[start:end] = value + + +@njit +def bin_sum_square_last(tally, data, value): + start = tally["bin_sum_square_offset"] + size = tally["bin_sum_square_length"] + end = start + size + data[end - 1] = value + + +@njit +def bin_sum_square_chunk(start, length, tally, data, value): + start += tally["bin_sum_square_offset"] + end = start + length + data[start:end] = value + + +@njit +def bin_shape(index, tally, data, value): + offset = tally["bin_shape_offset"] + data[offset + index] = value + + +@njit +def bin_shape_all(tally, data, value): + start = tally["bin_shape_offset"] + size = tally["bin_shape_length"] + end = start + size + data[start:end] = value + + +@njit +def bin_shape_last(tally, data, value): + start = tally["bin_shape_offset"] + size = tally["bin_shape_length"] + end = start + size + data[end - 1] = value + + +@njit +def bin_shape_chunk(start, length, tally, data, value): + start += tally["bin_shape_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/tracklength_tally.py b/mcdc/mcdc/mcdc_set/tracklength_tally.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/tracklength_tally.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/uniform_mesh.py b/mcdc/mcdc/mcdc_set/uniform_mesh.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/uniform_mesh.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/universe.py b/mcdc/mcdc/mcdc_set/universe.py new file mode 100644 index 000000000..a398e1a6b --- /dev/null +++ b/mcdc/mcdc/mcdc_set/universe.py @@ -0,0 +1,32 @@ +# The following is automatically generated by code_factory.py + +from numba import njit + + +@njit +def cell_IDs(index, universe, data, value): + offset = universe["cell_IDs_offset"] + data[offset + index] = value + + +@njit +def cell_IDs_all(universe, data, value): + start = universe["cell_IDs_offset"] + size = universe["N_cell"] + end = start + size + data[start:end] = value + + +@njit +def cell_IDs_last(universe, data, value): + start = universe["cell_IDs_offset"] + size = universe["N_cell"] + end = start + size + data[end - 1] = value + + +@njit +def cell_IDs_chunk(start, length, universe, data, value): + start += universe["cell_IDs_offset"] + end = start + length + data[start:end] = value diff --git a/mcdc/mcdc/mcdc_set/weight_roulette.py b/mcdc/mcdc/mcdc_set/weight_roulette.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/weight_roulette.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/mcdc_set/weighted_emission.py b/mcdc/mcdc/mcdc_set/weighted_emission.py new file mode 100644 index 000000000..fdbf8e750 --- /dev/null +++ b/mcdc/mcdc/mcdc_set/weighted_emission.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +from numba import njit diff --git a/mcdc/mcdc/numba_types.py b/mcdc/mcdc/numba_types.py new file mode 100644 index 000000000..8ca7ba4f4 --- /dev/null +++ b/mcdc/mcdc/numba_types.py @@ -0,0 +1,864 @@ +# The following is automatically generated by code_factory.py + +from numpy import bool_ +from numpy import float64 +from numpy import int64 +from numpy import uint64 +from numpy import uintp + +### + +from mcdc.code_factory.numba_objects_generator import into_dtype + +particle_data = into_dtype([ + ('x', float64), + ('y', float64), + ('z', float64), + ('t', float64), + ('ux', float64), + ('uy', float64), + ('uz', float64), + ('g', int64), + ('E', float64), + ('w', float64), + ('particle_type', int64), + ('rng_seed', uint64), +]) + +particle = into_dtype([ + ('cell_ID', int64), + ('material_ID', int64), + ('surface_ID', int64), + ('alive', bool), + ('fresh', bool), + ('event', int64), + ('x', float64), + ('y', float64), + ('z', float64), + ('t', float64), + ('ux', float64), + ('uy', float64), + ('uz', float64), + ('g', int64), + ('E', float64), + ('w', float64), + ('particle_type', int64), + ('rng_seed', uint64), +]) + +cell = into_dtype([ + ('name', 'U32'), + ('fill_translated', bool), + ('fill_rotated', bool), + ('translation', float64, (3,)), + ('rotation', float64, (3,)), + ('region_RPN_tokens_offset', int64), + ('region_RPN_tokens_length', int64), + ('N_surface', int64), + ('surface_IDs_offset', int64), + ('N_tally', int64), + ('tally_IDs_offset', int64), + ('fill_type', int64), + ('fill_ID', int64), + ('ID', int64), +]) + +lattice = into_dtype([ + ('name', 'U32'), + ('x0', float64), + ('dx', float64), + ('Nx', int64), + ('y0', float64), + ('dy', float64), + ('Ny', int64), + ('z0', float64), + ('dz', float64), + ('Nz', int64), + ('universe_IDs_offset', int64), + ('universe_IDs_length', int64), + ('ID', int64), +]) + +material = into_dtype([ + ('name', 'U32'), + ('fissionable', bool), + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +tally = into_dtype([ + ('name', 'U32'), + ('scores_offset', int64), + ('scores_length', int64), + ('filter_direction', bool), + ('filter_energy', bool), + ('filter_time', bool), + ('mu_offset', int64), + ('mu_length', int64), + ('azi_offset', int64), + ('azi_length', int64), + ('polar_reference', float64, (3,)), + ('energy_offset', int64), + ('energy_length', int64), + ('time_offset', int64), + ('time_length', int64), + ('bin_offset', int64), + ('bin_length', int64), + ('bin_sum_offset', int64), + ('bin_sum_length', int64), + ('bin_sum_square_offset', int64), + ('bin_sum_square_length', int64), + ('bin_shape_offset', int64), + ('bin_shape_length', int64), + ('stride_mu', int64), + ('stride_azi', int64), + ('stride_energy', int64), + ('stride_time', int64), + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +universe = into_dtype([ + ('name', 'U32'), + ('N_cell', int64), + ('cell_IDs_offset', int64), + ('ID', int64), +]) + +data = into_dtype([ + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +none_data = into_dtype([ + ('ID', int64), + ('parent_ID', int64), +]) + +polynomial_data = into_dtype([ + ('coefficients_offset', int64), + ('coefficients_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +table_data = into_dtype([ + ('x_offset', int64), + ('x_length', int64), + ('y_offset', int64), + ('y_length', int64), + ('interpolation', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +distribution = into_dtype([ + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +evaporation_distribution = into_dtype([ + ('nuclear_temperature_ID', int64), + ('restriction_energy', float64), + ('ID', int64), + ('parent_ID', int64), +]) + +kalbach_mann_distribution = into_dtype([ + ('energy_offset', int64), + ('energy_length', int64), + ('offset_offset', int64), + ('offset_length', int64), + ('energy_out_offset', int64), + ('energy_out_length', int64), + ('pdf_offset', int64), + ('pdf_length', int64), + ('cdf_offset', int64), + ('cdf_length', int64), + ('precompound_factor_offset', int64), + ('precompound_factor_length', int64), + ('angular_slope_offset', int64), + ('angular_slope_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +level_scattering_distribution = into_dtype([ + ('C1', float64), + ('C2', float64), + ('ID', int64), + ('parent_ID', int64), +]) + +maxwellian_distribution = into_dtype([ + ('nuclear_temperature_ID', int64), + ('restriction_energy', float64), + ('ID', int64), + ('parent_ID', int64), +]) + +multi_table_distribution = into_dtype([ + ('grid_offset', int64), + ('grid_length', int64), + ('offset_offset', int64), + ('offset_length', int64), + ('value_offset', int64), + ('value_length', int64), + ('pdf_offset', int64), + ('pdf_length', int64), + ('cdf_offset', int64), + ('cdf_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +nbody_distribution = into_dtype([ + ('value_offset', int64), + ('value_length', int64), + ('pdf_offset', int64), + ('pdf_length', int64), + ('cdf_offset', int64), + ('cdf_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +none_distribution = into_dtype([ + ('ID', int64), + ('parent_ID', int64), +]) + +pmf_distribution = into_dtype([ + ('value_offset', int64), + ('value_length', int64), + ('pmf_offset', int64), + ('pmf_length', int64), + ('cmf_offset', int64), + ('cmf_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +tabulated_distribution = into_dtype([ + ('value_offset', int64), + ('value_length', int64), + ('pdf_offset', int64), + ('pdf_length', int64), + ('cdf_offset', int64), + ('cdf_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +tabulated_energy_angle_distribution = into_dtype([ + ('energy_offset', int64), + ('energy_length', int64), + ('offset_offset', int64), + ('offset_length', int64), + ('energy_out_offset', int64), + ('energy_out_length', int64), + ('pdf_offset', int64), + ('pdf_length', int64), + ('cdf_offset', int64), + ('cdf_length', int64), + ('cosine_offset__offset', int64), + ('cosine_offset__length', int64), + ('cosine_offset', int64), + ('cosine_length', int64), + ('cosine_pdf_offset', int64), + ('cosine_pdf_length', int64), + ('cosine_cdf_offset', int64), + ('cosine_cdf_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +electron_reaction = into_dtype([ + ('MT', int64), + ('xs_offset', int64), + ('xs_length', int64), + ('xs_offset_', int64), + ('reference_frame', int64), + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +electron_bremsstrahlung_reaction = into_dtype([ + ('eloss_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +electron_elastic_scattering_reaction = into_dtype([ + ('mu_cut', float64), + ('xs_large_ID', int64), + ('mu_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +electron_excitation_reaction = into_dtype([ + ('eloss_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +electron_ionization_reaction = into_dtype([ + ('N_subshell', int64), + ('N_subshell_x', int64), + ('subshell_x_IDs_offset', int64), + ('N_subshell_product', int64), + ('subshell_product_IDs_offset', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +element = into_dtype([ + ('name', 'U32'), + ('atomic_weight_ratio', float64), + ('atomic_number', int64), + ('electron_xs_energy_grid_offset', int64), + ('electron_xs_energy_grid_length', int64), + ('electron_total_xs_offset', int64), + ('electron_total_xs_length', int64), + ('electron_ionization_xs_offset', int64), + ('electron_ionization_xs_length', int64), + ('electron_elastic_xs_offset', int64), + ('electron_elastic_xs_length', int64), + ('electron_excitation_xs_offset', int64), + ('electron_excitation_xs_length', int64), + ('electron_bremsstrahlung_xs_offset', int64), + ('electron_bremsstrahlung_xs_length', int64), + ('N_electron_ionization_reaction', int64), + ('electron_ionization_reaction_IDs_offset', int64), + ('N_electron_elastic_scattering_reaction', int64), + ('electron_elastic_scattering_reaction_IDs_offset', int64), + ('N_electron_excitation_reaction', int64), + ('electron_excitation_reaction_IDs_offset', int64), + ('N_electron_bremsstrahlung_reaction', int64), + ('electron_bremsstrahlung_reaction_IDs_offset', int64), + ('electron_ionization_subshell_binding_energy_offset', int64), + ('electron_ionization_subshell_binding_energy_length', int64), + ('ID', int64), +]) + +native_material = into_dtype([ + ('N_nuclide', int64), + ('nuclide_IDs_offset', int64), + ('N_element', int64), + ('element_IDs_offset', int64), + ('nuclide_densities_offset', int64), + ('nuclide_densities_length', int64), + ('element_densities_offset', int64), + ('element_densities_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +multigroup_material = into_dtype([ + ('G', int64), + ('J', int64), + ('mgxs_speed_offset', int64), + ('mgxs_speed_length', int64), + ('mgxs_decay_rate_offset', int64), + ('mgxs_decay_rate_length', int64), + ('mgxs_capture_offset', int64), + ('mgxs_capture_length', int64), + ('mgxs_scatter_offset', int64), + ('mgxs_scatter_length', int64), + ('mgxs_fission_offset', int64), + ('mgxs_fission_length', int64), + ('mgxs_total_offset', int64), + ('mgxs_total_length', int64), + ('mgxs_nu_s_offset', int64), + ('mgxs_nu_s_length', int64), + ('mgxs_nu_p_offset', int64), + ('mgxs_nu_p_length', int64), + ('mgxs_nu_d_offset', int64), + ('mgxs_nu_d_length', int64), + ('mgxs_nu_d_total_offset', int64), + ('mgxs_nu_d_total_length', int64), + ('mgxs_nu_f_offset', int64), + ('mgxs_nu_f_length', int64), + ('mgxs_chi_s_offset', int64), + ('mgxs_chi_s_length', int64), + ('mgxs_chi_p_offset', int64), + ('mgxs_chi_p_length', int64), + ('mgxs_chi_d_offset', int64), + ('mgxs_chi_d_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +nuclide = into_dtype([ + ('name', 'U32'), + ('temperature', float64), + ('atomic_number', int64), + ('atomic_weight_ratio', float64), + ('fissionable', bool), + ('excitation_level', int64), + ('neutron_xs_energy_grid_offset', int64), + ('neutron_xs_energy_grid_length', int64), + ('neutron_total_xs_offset', int64), + ('neutron_total_xs_length', int64), + ('neutron_elastic_xs_offset', int64), + ('neutron_elastic_xs_length', int64), + ('neutron_capture_xs_offset', int64), + ('neutron_capture_xs_length', int64), + ('neutron_inelastic_xs_offset', int64), + ('neutron_inelastic_xs_length', int64), + ('neutron_fission_xs_offset', int64), + ('neutron_fission_xs_length', int64), + ('N_neutron_elastic_scattering_reaction', int64), + ('neutron_elastic_scattering_reaction_IDs_offset', int64), + ('N_neutron_capture_reaction', int64), + ('neutron_capture_reaction_IDs_offset', int64), + ('N_neutron_inelastic_scattering_reaction', int64), + ('neutron_inelastic_scattering_reaction_IDs_offset', int64), + ('N_neutron_fission_reaction', int64), + ('neutron_fission_reaction_IDs_offset', int64), + ('neutron_fission_prompt_multiplicity_ID', int64), + ('neutron_fission_delayed_multiplicity_ID', int64), + ('N_neutron_fission_delayed_precursor', int64), + ('neutron_fission_delayed_fractions_offset', int64), + ('neutron_fission_delayed_fractions_length', int64), + ('neutron_fission_delayed_decay_rates_offset', int64), + ('neutron_fission_delayed_decay_rates_length', int64), + ('N_neutron_fission_delayed_spectrum', int64), + ('neutron_fission_delayed_spectrum_IDs_offset', int64), + ('ID', int64), +]) + +mesh = into_dtype([ + ('name', 'U32'), + ('N_bin', int64), + ('Nx', int64), + ('Ny', int64), + ('Nz', int64), + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +structured_mesh = into_dtype([ + ('x_offset', int64), + ('x_length', int64), + ('y_offset', int64), + ('y_length', int64), + ('z_offset', int64), + ('z_length', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +uniform_mesh = into_dtype([ + ('x0', float64), + ('dx', float64), + ('Nx', int64), + ('y0', float64), + ('dy', float64), + ('Ny', int64), + ('z0', float64), + ('dz', float64), + ('Nz', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +neutron_reaction = into_dtype([ + ('MT', int64), + ('xs_offset', int64), + ('xs_length', int64), + ('xs_offset_', int64), + ('reference_frame', int64), + ('q_value', float64), + ('ID', int64), + ('child_type', int64), + ('child_ID', int64), +]) + +neutron_capture_reaction = into_dtype([ + ('ID', int64), + ('parent_ID', int64), +]) + +neutron_elastic_scattering_reaction = into_dtype([ + ('mu_table_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +neutron_fission_reaction = into_dtype([ + ('angle_type', int64), + ('mu_ID', int64), + ('spectrum_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +neutron_inelastic_scattering_reaction = into_dtype([ + ('multiplicity', int64), + ('angle_type', int64), + ('mu_ID', int64), + ('N_spectrum_probability_bin', int64), + ('N_spectrum', int64), + ('spectrum_probability_grid_offset', int64), + ('spectrum_probability_grid_length', int64), + ('spectrum_probability_offset', int64), + ('spectrum_probability_length', int64), + ('N_energy_spectrum', int64), + ('energy_spectrum_IDs_offset', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +collision_data = into_dtype([ + ('energy_deposition', float64), +]) + +particle_bank = into_dtype([ + ('size', int64, (1,)), + ('tag', 'U32'), +]) + +settings = into_dtype([ + ('N_particle', int64), + ('N_batch', int64), + ('rng_seed', int64), + ('N_inactive', int64), + ('N_active', int64), + ('N_cycle', int64), + ('k_init', float64), + ('use_gyration_radius', bool), + ('gyration_radius_type', int64), + ('use_source_file', bool), + ('source_file_name', 'U32'), + ('time_boundary', float64), + ('output_name', 'U32'), + ('use_progress_bar', bool), + ('N_census', int64), + ('census_time_offset', int64), + ('census_time_length', int64), + ('use_census_based_tally', bool), + ('census_tally_frequency', int64), + ('save_particle', bool), + ('active_bank_buffer', int64), + ('census_bank_buffer_ratio', float64), + ('source_bank_buffer_ratio', float64), + ('future_bank_buffer_ratio', float64), + ('neutron_transport', bool), + ('electron_transport', bool), + ('proton_transport', bool), + ('neutron_multigroup_mode', bool), + ('neutron_eigenvalue_mode', bool), + ('gpu_strategy', int64), + ('gpu_async_type', int64), + ('gpu_storage', int64), +]) + +implicit_capture = into_dtype([ + ('active', bool), +]) + +population_control = into_dtype([ + ('active', bool), +]) + +weight_roulette = into_dtype([ + ('weight_threshold', float64), + ('weight_target', float64), +]) + +weighted_emission = into_dtype([ + ('active', bool), + ('weight_target', float64), +]) + +source = into_dtype([ + ('name', 'U32'), + ('point_source', bool), + ('point', float64, (3,)), + ('x', float64, (2,)), + ('y', float64, (2,)), + ('z', float64, (2,)), + ('isotropic_direction', bool), + ('mono_direction', bool), + ('white_direction', bool), + ('direction', float64, (3,)), + ('polar_cosine', float64, (2,)), + ('azimuthal', float64, (2,)), + ('mono_energetic', bool), + ('energy_group', int64), + ('energy', float64), + ('energy_group_pmf_ID', int64), + ('energy_pdf_ID', int64), + ('discrete_time', bool), + ('time', float64), + ('time_range', float64, (2,)), + ('particle_type', int64), + ('probability', float64), + ('moving', bool), + ('N_move', int64), + ('N_move_grid', int64), + ('move_velocities_offset', int64), + ('move_velocities_length', int64), + ('move_durations_offset', int64), + ('move_durations_length', int64), + ('move_time_grid_offset', int64), + ('move_time_grid_length', int64), + ('move_translations_offset', int64), + ('move_translations_length', int64), + ('ID', int64), +]) + +surface = into_dtype([ + ('type', int64), + ('name', 'U32'), + ('boundary_condition', int64), + ('A', float64), + ('B', float64), + ('C', float64), + ('D', float64), + ('E', float64), + ('F', float64), + ('G', float64), + ('H', float64), + ('I', float64), + ('J', float64), + ('R', float64), + ('r', float64), + ('linear', bool), + ('nx', float64), + ('ny', float64), + ('nz', float64), + ('moving', bool), + ('N_move', int64), + ('N_move_grid', int64), + ('move_velocities_offset', int64), + ('move_velocities_length', int64), + ('move_durations_offset', int64), + ('move_durations_length', int64), + ('move_time_grid_offset', int64), + ('move_time_grid_length', int64), + ('move_translations_offset', int64), + ('move_translations_length', int64), + ('N_tally', int64), + ('tally_IDs_offset', int64), + ('ID', int64), +]) + +surface_tally = into_dtype([ + ('surface_ID', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +collision_tally = into_dtype([ + ('spatial_filter_type', int64), + ('spatial_filter_ID', int64), + ('spatial_filter_subtype', int64), + ('mesh_stride_z', int64), + ('mesh_stride_y', int64), + ('mesh_stride_x', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +tracklength_tally = into_dtype([ + ('spatial_filter_type', int64), + ('spatial_filter_ID', int64), + ('spatial_filter_subtype', int64), + ('mesh_stride_z', int64), + ('mesh_stride_y', int64), + ('mesh_stride_x', int64), + ('ID', int64), + ('parent_ID', int64), +]) + +gpu_meta = into_dtype([ + ('state_pointer', uintp), + ('program_pointer', uintp), + ('simulation_pointer', uintp), + ('data_pointer', uintp), +]) + +bank_active = None +def set_bank_active(N: dict): + global bank_active + bank_active = into_dtype([ + ('size', int64, (1,)), + ('tag', 'U32'), + ('particle_data', particle_data, (N['particle_data'],)), + ]) + +bank_census = None +def set_bank_census(N: dict): + global bank_census + bank_census = into_dtype([ + ('size', int64, (1,)), + ('tag', 'U32'), + ('particle_data', particle_data, (N['particle_data'],)), + ]) + +bank_source = None +def set_bank_source(N: dict): + global bank_source + bank_source = into_dtype([ + ('size', int64, (1,)), + ('tag', 'U32'), + ('particle_data', particle_data, (N['particle_data'],)), + ]) + +bank_future = None +def set_bank_future(N: dict): + global bank_future + bank_future = into_dtype([ + ('size', int64, (1,)), + ('tag', 'U32'), + ('particle_data', particle_data, (N['particle_data'],)), + ]) + +simulation = None +def set_simulation(N: dict): + global simulation + simulation = into_dtype([ + ('data', data, (N['data'])), + ('N_data', int64), + ('none_data', none_data, (N['none_data'])), + ('N_none_data', int64), + ('polynomial_data', polynomial_data, (N['polynomial_data'])), + ('N_polynomial_data', int64), + ('table_data', table_data, (N['table_data'])), + ('N_table_data', int64), + ('distributions', distribution, (N['distribution'])), + ('N_distribution', int64), + ('evaporation_distributions', evaporation_distribution, (N['evaporation_distribution'])), + ('N_evaporation_distribution', int64), + ('kalbach_mann_distributions', kalbach_mann_distribution, (N['kalbach_mann_distribution'])), + ('N_kalbach_mann_distribution', int64), + ('level_scattering_distributions', level_scattering_distribution, (N['level_scattering_distribution'])), + ('N_level_scattering_distribution', int64), + ('maxwellian_distributions', maxwellian_distribution, (N['maxwellian_distribution'])), + ('N_maxwellian_distribution', int64), + ('multi_table_distributions', multi_table_distribution, (N['multi_table_distribution'])), + ('N_multi_table_distribution', int64), + ('nbody_distributions', nbody_distribution, (N['nbody_distribution'])), + ('N_nbody_distribution', int64), + ('none_distributions', none_distribution, (N['none_distribution'])), + ('N_none_distribution', int64), + ('pmf_distributions', pmf_distribution, (N['pmf_distribution'])), + ('N_pmf_distribution', int64), + ('tabulated_distributions', tabulated_distribution, (N['tabulated_distribution'])), + ('N_tabulated_distribution', int64), + ('tabulated_energy_angle_distributions', tabulated_energy_angle_distribution, (N['tabulated_energy_angle_distribution'])), + ('N_tabulated_energy_angle_distribution', int64), + ('materials', material, (N['material'])), + ('N_material', int64), + ('native_materials', native_material, (N['native_material'])), + ('N_native_material', int64), + ('multigroup_materials', multigroup_material, (N['multigroup_material'])), + ('N_multigroup_material', int64), + ('elements', element, (N['element'])), + ('N_element', int64), + ('electron_reactions', electron_reaction, (N['electron_reaction'])), + ('N_electron_reaction', int64), + ('electron_bremsstrahlung_reactions', electron_bremsstrahlung_reaction, (N['electron_bremsstrahlung_reaction'])), + ('N_electron_bremsstrahlung_reaction', int64), + ('electron_elastic_scattering_reactions', electron_elastic_scattering_reaction, (N['electron_elastic_scattering_reaction'])), + ('N_electron_elastic_scattering_reaction', int64), + ('electron_excitation_reactions', electron_excitation_reaction, (N['electron_excitation_reaction'])), + ('N_electron_excitation_reaction', int64), + ('electron_ionization_reactions', electron_ionization_reaction, (N['electron_ionization_reaction'])), + ('N_electron_ionization_reaction', int64), + ('nuclides', nuclide, (N['nuclide'])), + ('N_nuclide', int64), + ('neutron_reactions', neutron_reaction, (N['neutron_reaction'])), + ('N_neutron_reaction', int64), + ('neutron_capture_reactions', neutron_capture_reaction, (N['neutron_capture_reaction'])), + ('N_neutron_capture_reaction', int64), + ('neutron_elastic_scattering_reactions', neutron_elastic_scattering_reaction, (N['neutron_elastic_scattering_reaction'])), + ('N_neutron_elastic_scattering_reaction', int64), + ('neutron_fission_reactions', neutron_fission_reaction, (N['neutron_fission_reaction'])), + ('N_neutron_fission_reaction', int64), + ('neutron_inelastic_scattering_reactions', neutron_inelastic_scattering_reaction, (N['neutron_inelastic_scattering_reaction'])), + ('N_neutron_inelastic_scattering_reaction', int64), + ('sources', source, (N['source'])), + ('N_source', int64), + ('cells', cell, (N['cell'])), + ('N_cell', int64), + ('lattices', lattice, (N['lattice'])), + ('N_lattice', int64), + ('surfaces', surface, (N['surface'])), + ('N_surface', int64), + ('universes', universe, (N['universe'])), + ('N_universe', int64), + ('meshes', mesh, (N['mesh'])), + ('N_mesh', int64), + ('structured_meshes', structured_mesh, (N['structured_mesh'])), + ('N_structured_mesh', int64), + ('uniform_meshes', uniform_mesh, (N['uniform_mesh'])), + ('N_uniform_mesh', int64), + ('tallies', tally, (N['tally'])), + ('N_tally', int64), + ('surface_tallies', surface_tally, (N['surface_tally'])), + ('N_surface_tally', int64), + ('collision_tallies', collision_tally, (N['collision_tally'])), + ('N_collision_tally', int64), + ('tracklength_tallies', tracklength_tally, (N['tracklength_tally'])), + ('N_tracklength_tally', int64), + ('settings', settings), + ('implicit_capture', implicit_capture), + ('weighted_emission', weighted_emission), + ('weight_roulette', weight_roulette), + ('population_control', population_control), + ('gpu_meta', gpu_meta), + ('bank_future', bank_future), + ('bank_source', bank_source), + ('bank_census', bank_census), + ('bank_active', bank_active), + ('idx_work', int64), + ('idx_cycle', int64), + ('idx_census', int64), + ('idx_batch', int64), + ('dd_idx', int64), + ('dd_N_local_source', int64), + ('dd_local_rank', int64), + ('k_eff', float64), + ('k_cycle_offset', int64), + ('k_cycle_length', int64), + ('k_avg', float64), + ('k_sdv', float64), + ('n_avg', float64), + ('n_sdv', float64), + ('n_max', float64), + ('C_avg', float64), + ('C_sdv', float64), + ('C_max', float64), + ('k_avg_running', float64), + ('k_sdv_running', float64), + ('gyration_radius_offset', int64), + ('gyration_radius_length', int64), + ('cycle_active', bool), + ('eigenvalue_tally_nuSigmaF', float64, (1,)), + ('eigenvalue_tally_n', float64, (1,)), + ('eigenvalue_tally_C', float64, (1,)), + ('mpi_size', int64), + ('mpi_rank', int64), + ('mpi_master', bool), + ('mpi_work_start', int64), + ('mpi_work_size', int64), + ('mpi_work_size_total', int64), + ('mpi_work_iter', int64, (1,)), + ('runtime_total', float64), + ('runtime_preparation', float64), + ('runtime_simulation', float64), + ('runtime_output', float64), + ('runtime_bank_management', float64), + ('source_seed', int64), + ]) + diff --git a/mcdc/mcdc/object_/.ipynb_checkpoints/particle-checkpoint.py b/mcdc/mcdc/object_/.ipynb_checkpoints/particle-checkpoint.py new file mode 100644 index 000000000..3d9fa43f5 --- /dev/null +++ b/mcdc/mcdc/object_/.ipynb_checkpoints/particle-checkpoint.py @@ -0,0 +1,58 @@ +import numpy as np + +from dataclasses import dataclass, field +from typing import Annotated +from numpy import int64, uint64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import PARTICLE_NEUTRON +from mcdc.object_.base import ObjectBase, ObjectSingleton + + +@dataclass +class ParticleData(ObjectBase): + label: str = "particle_data" + x: float = 0.0 + y: float = 0.0 + z: float = 0.0 + t: float = 0.0 + ux: float = 0.0 + uy: float = 0.0 + uz: float = 0.0 + g: int = -1 + E: float = 0.0 + w: float = 0.0 + particle_type: int = PARTICLE_NEUTRON + rng_seed: uint64 = uint64(1) + + +@dataclass +class CollisionData(ObjectBase): + label: str = "collision_data" + energy_deposition: float = 0.0 + + +@dataclass +class Particle(ParticleData): + label: str = "particle" + cell_ID: int = -1 + material_ID: int = -1 + surface_ID: int = -1 + alive: bool = False + fresh: bool = False + event: int = -1 + + +class ParticleBank(ObjectSingleton): + label: str = "particle_bank" + non_numba: list[str] = ["particles"] + particles: list[ParticleData] = [] + size: Annotated[NDArray[int64], (1,)] + tag: str = "" + + def __init__(self, tag): + super().__init__() + self.tag = tag + self.size = np.zeros(1, dtype=int64) diff --git a/mcdc/mcdc/object_/base.py b/mcdc/mcdc/object_/base.py new file mode 100644 index 000000000..11d87e8ad --- /dev/null +++ b/mcdc/mcdc/object_/base.py @@ -0,0 +1,383 @@ +from mcdc.print_ import print_error + +# ====================================================================================== +# Object base classes +# ====================================================================================== + + +class ObjectBase: + def __init__(self, register): + if register and isinstance(self, ObjectNonSingleton): + register_object(self) + + if "non_numba" in dir(self): + self.non_numba += ["non_numba", "label"] + else: + self.non_numba = ["non_numba", "label"] + + def __setattr__(self, key, value): + hints = getattr(self.__class__, "__annotations__", {}) + if key in hints and not check_type(value, hints[key], self.__class__, self): + print_error(f"{key} must be {hints[key]!r}, got {value!r}") + super().__setattr__(key, value) + + +class ObjectSingleton(ObjectBase): + def __init__(self): + super().__init__(register=False) + + +class ObjectNonSingleton(ObjectBase): + ID: int + + def __init__(self, register=True): + self.ID = -1 + super().__init__(register) + + if "non_numba" in dir(self): + self.non_numba += ["ID"] + else: + self.non_numba = ["ID"] + + +class ObjectPolymorphic(ObjectNonSingleton): + child_ID: int + type: int + + def __init__(self, type_, register=True): + self.child_ID = -1 + self.type = type_ + super().__init__(register) + + self.non_numba += ["child_ID"] + + +# ====================================================================================== +# Helper functions +# ====================================================================================== + + +def register_object(object_): + from mcdc.object_.simulation import simulation + + from mcdc.object_.cell import Region, Cell + from mcdc.object_.universe import Universe, Lattice + from mcdc.object_.data import DataBase + from mcdc.object_.distribution import DistributionBase + from mcdc.object_.element import Element + from mcdc.object_.electron_reaction import ElectronReactionBase + from mcdc.object_.material import MaterialBase + from mcdc.object_.mesh import MeshBase + from mcdc.object_.nuclide import Nuclide + from mcdc.object_.neutron_reaction import NeutronReactionBase + from mcdc.object_.source import Source + from mcdc.object_.surface import Surface + from mcdc.object_.tally import Tally + + object_list = [] + if isinstance(object_, Cell): + object_list = simulation.cells + elif isinstance(object_, DataBase): + object_list = simulation.data + elif isinstance(object_, DistributionBase): + object_list = simulation.distributions + elif isinstance(object_, Lattice): + object_list = simulation.lattices + elif isinstance(object_, MaterialBase): + object_list = simulation.materials + elif isinstance(object_, MeshBase): + object_list = simulation.meshes + elif isinstance(object_, Element): + object_list = simulation.elements + elif isinstance(object_, ElectronReactionBase): + object_list = simulation.electron_reactions + elif isinstance(object_, Nuclide): + object_list = simulation.nuclides + elif isinstance(object_, NeutronReactionBase): + object_list = simulation.neutron_reactions + elif isinstance(object_, Region): + object_list = simulation.regions + elif isinstance(object_, Source): + object_list = simulation.sources + elif isinstance(object_, Surface): + object_list = simulation.surfaces + elif isinstance(object_, Tally): + object_list = simulation.tallies + elif isinstance(object_, Universe): + object_list = simulation.universes + else: + print_error(f"Unidentified object list for object {object_}") + + object_.ID = len(object_list) + if isinstance(object_, ObjectPolymorphic): + object_.child_ID = sum([x.type == object_.type for x in object_list]) + object_list.append(object_) + + +# ====================================================================================== +# Type checker +# ====================================================================================== + + +import re +import numpy as np +from typing import get_origin, get_args, Union, Annotated + + +def _name_from_str(s: str) -> str: + s = _strip_prefixes(s) + # strip generic args like "NDArray[float64]" → "NDArray" + s = s.split("[", 1)[0] + return s.split(".")[-1].strip() + + +def _mro_name_match(value, want: str) -> bool: + """Subclass-friendly match without resolving: compare wanted name to any base in MRO.""" + want_name = _name_from_str(want) + return any(base.__name__ == want_name for base in value.__class__.mro()) + + +# ---------- helpers for STRING annotations ---------- +_ANN_RE = re.compile(r"^\s*(?:typing\.)?Annotated\[(.*)\]\s*$") + + +def _split_top_level(s: str, sep: str = ",", brackets: str = "[]()") -> list[str]: + out, buf, depth = [], [], 0 + opens = set(brackets[::2]) + closes = set(brackets[1::2]) + pairs = dict(zip(brackets[1::2], brackets[::2])) + for ch in s: + if ch in opens: + depth += 1 + elif ch in closes: + depth -= 1 + if ch == sep and depth == 0: + out.append("".join(buf).strip()) + buf = [] + else: + buf.append(ch) + if buf: + out.append("".join(buf).strip()) + return out + + +def _strip_prefixes(s: str) -> str: + # normalize common module prefixes used in annotations + return ( + s.replace("typing.", "") + .replace("numpy.typing.", "") + .replace("numpy.", "") + .replace("np.", "") + ) + + +def _parse_annotated_str(hint_str: str): + """ + If hint_str is 'Annotated[ ... ]', return (base_str, meta_list) else None. + meta_list items remain raw strings (no eval). + """ + m = _ANN_RE.match(_strip_prefixes(hint_str)) + if not m: + return None + inner = m.group(1) + parts = _split_top_level(inner, sep=",") + if not parts: + return None + base = parts[0].strip() + meta = [p.strip() for p in parts[1:]] + return base, meta + + +def _shape_tuple_from_str(s: str): + """ + Parse '(3,)', '(None, 3)', '(2,3,4)' → tuple[int|None, ...] or None if not a shape. + """ + s = s.strip() + if not (s.startswith("(") and s.endswith(")")): + return None + body = s[1:-1].strip() + if not body: + return () + items = _split_top_level(body, sep=",") + out = [] + for it in items: + it = it.strip() + if it == "": + continue # allow trailing comma + if it == "None": + out.append(None) + else: + try: + out.append(int(it)) + except ValueError: + return None + return tuple(out) + + +def _is_ndarray_base_str(base_str: str) -> bool: + base_norm = _strip_prefixes(base_str) + return base_norm.startswith("NDArray[") or base_norm.startswith("ndarray[") + + +def _extract_ndarray_dtype_key_from_str(base_str: str) -> str | None: + base_norm = _strip_prefixes(base_str) + if "[" not in base_norm or "]" not in base_norm: + return None + inside = base_norm[base_norm.find("[") + 1 : base_norm.rfind("]")].strip() + return _strip_prefixes(inside) # e.g. 'float' or 'float64' + + +def _dtype_matches(arr: np.ndarray, dtype_key: str | None) -> bool: + if dtype_key is None: + return True + key = dtype_key.lower() + if key == "float": + return np.issubdtype(arr.dtype, np.floating) + if key == "int": + return np.issubdtype(arr.dtype, np.integer) + try: + return arr.dtype == np.dtype(key) # e.g. 'float64', 'int32' + except TypeError: + return True # unknown key → do not fail hard + + +def _shape_matches(arr: np.ndarray, shape: tuple[int | None, ...]) -> bool: + if arr.ndim != len(shape): + return False + return all(dim is None or dim == s for s, dim in zip(arr.shape, shape)) + + +# ---------- main checker ---------- +def check_type(value, hint, cls, obj=None) -> bool: + """ + Best-effort runtime checker tolerant of *string* annotations (no eval). + Supports: + - typing objects: list[T], set[T], dict[K,V], tuple[...,], Union/|, Annotated + - string 'Annotated[NDArray[float], (shape,)]' (dtype+shape) + - plain string class names (accept subclasses via MRO) + - string unions 'A | B' + """ + # -------- STRING annotations path (no resolution) -------- + if isinstance(hint, str): + h = hint.strip() + + # Handle plain "NDArray[...]" (dtype-only) without Annotated + if _is_ndarray_base_str(h): + if not isinstance(value, np.ndarray): + return False + dtype_key = _extract_ndarray_dtype_key_from_str(h) + return _dtype_matches(value, dtype_key) + + # String Annotated[...] + parsed = _parse_annotated_str(h) + if parsed: + base_str, meta = parsed + + # NDArray with shape metadata + if _is_ndarray_base_str(base_str) and meta: + shape = _shape_tuple_from_str(meta[0]) + dtype_key = _extract_ndarray_dtype_key_from_str(base_str) + if not isinstance(value, np.ndarray): + return False + if shape is not None and not _shape_matches(value, shape): + return False + return _dtype_matches(value, dtype_key) + + # Otherwise treat base as class-like name → accept subclasses via MRO + return _mro_name_match(value, base_str) + + # String union: "A | B" + if "|" in h: + parts = _split_top_level(h, sep="|") + return any(check_type(value, p.strip(), cls) for p in parts) + + # Simple string container: "list[str]" (lightweight support) + if h.startswith("list[") and h.endswith("]"): + inner = _name_from_str(h[5:-1]) + if not isinstance(value, list): + return False + if inner == "str": + return all(isinstance(x, str) for x in value) + if inner in ("float", "float32", "float64"): + return all(isinstance(x, (float, int)) for x in value) + return True # permissive other inners + + # Plain forward-ref name → subclass-friendly check + return _mro_name_match(value, h) + + # -------- Structured typing objects path -------- + origin = get_origin(hint) + + # Annotated[T, meta...] (real object) + if origin is Annotated: + base, *meta = get_args(hint) + if isinstance(value, np.ndarray) and meta and isinstance(meta[0], tuple): + expected_shape = meta[0] + base_args = get_args(base) # e.g., NDArray[dtype] + dtype_key = None + if base_args: + dtype_arg = base_args[0] + if dtype_arg is float: + dtype_key = "float" + elif hasattr(dtype_arg, "name"): # np.float64 + dtype_key = dtype_arg.name + expected_shape_list = list(expected_shape) + for i, item in enumerate(expected_shape): + if type(item) == str: + expected_shape_list[i] = getattr(obj, item) + expected_shape = tuple(expected_shape_list) + return _shape_matches(value, expected_shape) and _dtype_matches( + value, dtype_key + ) + return check_type(value, base, cls) + + # NDArray[...] without shape meta + if origin is np.ndarray: + return isinstance(value, np.ndarray) + + # Builtins / classes + if origin is None: + try: + return isinstance(value, hint) + except TypeError: + return True + + # list[T] + if origin is list: + (t,) = get_args(hint) + return isinstance(value, list) and all(check_type(x, t, cls) for x in value) + + # set[T] + if origin is set: + (t,) = get_args(hint) + return isinstance(value, set) and all(check_type(x, t, cls) for x in value) + + # dict[K, V] + if origin is dict: + kt, vt = get_args(hint) + return isinstance(value, dict) and all( + check_type(k, kt, cls) and check_type(v, vt, cls) for k, v in value.items() + ) + + # tuple[T1, T2] or tuple[T, ...] + if origin is tuple: + args = get_args(hint) + if len(args) == 2 and args[1] is Ellipsis: + return isinstance(value, tuple) and all( + check_type(x, args[0], cls) for x in value + ) + return ( + isinstance(value, tuple) + and len(value) == len(args) + and all(check_type(x, t, cls) for x, t in zip(value, args)) + ) + + # Union[...] (incl Optional[T]) + if origin is Union: + return any(check_type(value, t, cls) for t in get_args(hint)) + + # Fallback: ABCs (Iterable, Sequence, etc.) + try: + return isinstance(value, origin) + except TypeError: + return True diff --git a/mcdc/mcdc/object_/cell.py b/mcdc/mcdc/object_/cell.py new file mode 100644 index 000000000..e2885f642 --- /dev/null +++ b/mcdc/mcdc/object_/cell.py @@ -0,0 +1,308 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mcdc.object_.surface import Surface + +#### + +import numpy as np +import sympy + +from numpy import float64 +from numpy.typing import NDArray +from operator import attrgetter +from types import NoneType +from typing import Annotated, Iterable +from sympy.logic.boolalg import Boolean + +#### + +from mcdc.constant import ( + BOOL_AND, + BOOL_NOT, + BOOL_OR, + FILL_LATTICE, + FILL_MATERIAL, + FILL_NONE, + FILL_UNIVERSE, + PI, +) +from mcdc.object_.base import ObjectNonSingleton +from mcdc.object_.material import MaterialBase +from mcdc.object_.simulation import simulation +from mcdc.object_.tally import Tally +from mcdc.object_.universe import Universe, Lattice +from mcdc.print_ import print_error + +# ====================================================================================== +# Region +# ====================================================================================== + + +# Region-making helper that checks if an identical region is already created +def make_region(type_, A, B): + for existing_region in simulation.regions: + if ( + type_ == existing_region.type + and A == existing_region.A + and B == existing_region.B + ): + return existing_region + return Region(type_, A, B) + + +class Region(ObjectNonSingleton): + type: str + A: Surface | Region | NoneType + B: Region | int | NoneType + + def __init__(self, type_, A, B): + super().__init__() + + self.type = type_ + self.A = A + self.B = B + + @classmethod + def make_halfspace(cls, surface, sense): + region = make_region("halfspace", surface, sense) + return region + + def __and__(self, other): + return make_region("intersection", self, other) + + def __or__(self, other): + return make_region("union", self, other) + + def __invert__(self): + return make_region("complement", self, None) + + def __repr__(self): + text = "Region: " + if self.type == "halfspace": + if self.B > 0: + text += "+s%i" % self.A.ID + else: + text += "-s%i" % self.A.ID + elif self.type == "intersection": + text += "r%i & r%i" % (self.A.ID, self.B.ID) + elif self.type == "union": + text += "r%i | r%i" % (self.A.ID, self.B.ID) + elif self.type == "complement": + text += "~r%i" % (self.A.ID) + elif self.type == "all": + text += "all" + + return text + + +# ====================================================================================== +# Cell +# ====================================================================================== + + +class Cell(ObjectNonSingleton): + """ + Define a cell from a region and a fill. + + Parameters + ---------- + region : Region, optional + The spatial region defining the cell boundaries. + Constructed using ``+surface`` / ``-surface`` half-space operators. + fill : Material or MaterialMG or Universe or Lattice, optional + The material or universe that fills the cell. + name : str, optional + User label. + translation : array_like of float, optional + Translation vector ``[tx, ty, tz]`` in cm. + rotation : array_like of float, optional + Rotation angles ``[rx, ry, rz]`` in degrees. + + See Also + -------- + mcdc.Surface : Creates surfaces that can be used to define cell regions. + mcdc.Universe : Groups cells into a universe. + """ + + # Annotations for Numba mode + label: str = "cell" + non_numba: list[str] = ["region", "fill", "region_RPN"] + # + name: str + region: Region + fill: MaterialBase | Universe | Lattice | NoneType + fill_translated: bool + fill_rotated: bool + translation: Annotated[NDArray[float64], (3,)] + rotation: Annotated[NDArray[float64], (3,)] + region_RPN_tokens: list[int] + region_RPN: Boolean + surfaces: list[Surface] + tallies: list[Tally] + # + fill_type: int + fill_ID: int + + def __init__( + self, + region: Region | NoneType = None, + fill: MaterialBase | Universe | Lattice | NoneType = None, + name: str = "", + translation: Iterable[float] = [0.0, 0.0, 0.0], + rotation: Iterable[float] = [0.0, 0.0, 0.0], + ): + super().__init__() + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.ID}" + + # Set region + if region is None: + self.region = make_region("all", None, None) + else: + self.region = region + + # Set fill + self.fill = fill + + # Local coordinate modifier + self.translation = np.array(translation, dtype=float) + self.rotation = np.array(rotation, dtype=float) + self.fill_translated = False + self.fill_rotated = False + if (self.translation != 0.0).any(): + self.fill_translated = True + if (self.rotation != 0.0).any(): + self.fill_rotated = True + # Convert ritation + self.rotation *= PI / 180.0 + + # Set region Reversed Polished Notation (RPN) + if self.region.type != "all": + self.region_RPN_tokens = generate_RPN_tokens(self.region) + self.region_RPN = generate_RPN(self.region_RPN_tokens) + else: + self.region_RPN_tokens = [] + self.region_RPN = Boolean(True) + + # List surfaces + self.surfaces = list_surfaces(self.region_RPN_tokens) + + # Cell tallies + self.tallies = [] + + # ============================================================================== + # Numba attribute manual set up + # ============================================================================== + + # Numba representation of the cell fill + # (Because polymorphic Ffill object is not supported) + if isinstance(fill, MaterialBase): + self.fill_type = FILL_MATERIAL + self.fill_ID = fill.ID + elif isinstance(fill, Universe): + self.fill_type = FILL_UNIVERSE + self.fill_ID = fill.ID + elif isinstance(fill, Lattice): + self.fill_type = FILL_LATTICE + self.fill_ID = fill.ID + elif fill == None: + self.fill_type = FILL_NONE + self.fill_ID = -1 + else: + print_error(f"Unsupported cell fill: {fill}") + + def __repr__(self): + text = "\n" + text += f"Cell\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - {self.region}\n" + if isinstance(self.fill, MaterialBase): + text += f" - Fill (material): {self.fill.name}\n" + elif isinstance(self.fill, Lattice): + text += f" - Fill (lattice): {self.fill.name}\n" + elif isinstance(self.fill, Universe): + text += f" - Fill (universe): {self.fill.name}\n" + if self.fill_translated: + text += f" - Translation: {self.translation}\n" + if self.fill_rotated: + text += f" - Rotation: {self.rotation * 180 / PI}\n" + text += f" - Bounding surfaces: {[x.ID for x in self.surfaces]}\n" + if len(self.tallies) > 0: + text += f" - Tallies: {[x.ID for x in self.tallies]}\n" + return text + + +def generate_RPN_tokens(region): + # The RPN tokens + rpn_tokens = [] + + # Build RPN based on recursive evaluation of the region + stack = [region] + while len(stack) > 0: + token = stack.pop() + if isinstance(token, Region): + if token.type == "halfspace": + rpn_tokens.append(token.A.ID) + if token.B < 0: + rpn_tokens.append(BOOL_NOT) + elif token.type == "intersection": + stack += ["&", token.A, token.B] + elif token.type == "union": + stack += ["|", token.A, token.B] + elif token.type == "complement": + stack += ["~", token.A] + else: + if token == "&": + rpn_tokens.append(BOOL_AND) + elif token == "|": + rpn_tokens.append(BOOL_OR) + elif token == "~": + rpn_tokens.append(BOOL_NOT) + else: + print_error(f"Unrecognized token in the generating region RPN: {token}") + + return rpn_tokens + + +def generate_RPN(rpn_tokens): + stack = [] + + for token in rpn_tokens: + if token >= 0: + stack.append(sympy.symbols(f"s{token}")) + else: + if token == BOOL_AND or token == BOOL_OR: + item_1 = stack.pop() + item_2 = stack.pop() + if token == BOOL_AND: + stack.append(item_1 & item_2) + else: + stack.append(item_1 | item_2) + + elif token == BOOL_NOT: + item = stack.pop() + if isinstance(item, Region): + item = sympy.symbols(str(item)[8:]) + + stack.append(~item) + + return sympy.logic.boolalg.simplify_logic(stack[0]) + + +def list_surfaces(rpn_tokens): + surfaces = [] + + for token in rpn_tokens: + if token >= 0: + surface = simulation.surfaces[token] + if surface not in surfaces: + surfaces.append(surface) + + return sorted(surfaces, key=attrgetter("ID")) diff --git a/mcdc/mcdc/object_/data.py b/mcdc/mcdc/object_/data.py new file mode 100644 index 000000000..569659294 --- /dev/null +++ b/mcdc/mcdc/object_/data.py @@ -0,0 +1,113 @@ +from numpy import float64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import ( + DATA_NONE, + DATA_TABLE, + DATA_POLYNOMIAL, + INTERPOLATION_LINEAR, + INTERPOLATION_LOG, +) +from mcdc.object_.base import ObjectPolymorphic +from mcdc.print_ import print_1d_array + +# ====================================================================================== +# Data base class +# ====================================================================================== + + +class DataBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "data" + + def __init__(self, type_, register=True): + super().__init__(type_, register) + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + return text + + +def decode_type(type_): + if type_ == DATA_NONE: + return "Data (None)" + elif type_ == DATA_TABLE: + return "Data (Table)" + elif type_ == DATA_POLYNOMIAL: + return "Data (Polynomial function)" + + +# ====================================================================================== +# None +# ====================================================================================== +# Placeholder for data that does not need to store anything: +# - Fission multiplicity and delayed precursor data for non-fissionable nuclide + + +class DataNone(DataBase): + # Annotations for Numba mode + label: str = "none_data" + + def __init__(self): + type_ = DATA_NONE + super().__init__(type_, False) + self.ID = 0 + + +# ====================================================================================== +# Table data +# ====================================================================================== + + +class DataTable(DataBase): + # Annotations for Numba mode + label: str = "table_data" + # + x: NDArray[float64] + y: NDArray[float64] + interpolation: int + + def __init__(self, x, y, interpolation=INTERPOLATION_LINEAR): + type_ = DATA_TABLE + super().__init__(type_) + + self.x = x + self.y = y + self.interpolation = interpolation + + def __repr__(self): + text = super().__repr__() + text += f" - x {print_1d_array(self.x)}\n" + text += f" - y {print_1d_array(self.y)}\n" + if self.interpolation == INTERPOLATION_LINEAR: + text += f" - Interpolation: linear\n" + elif self.interpolation == INTERPOLATION_LOG: + text += f" - Interpolation: log\n" + return text + + +# ====================================================================================== +# Polynomial data +# ====================================================================================== + + +class DataPolynomial(DataBase): + # Annotations for Numba mode + label: str = "polynomial_data" + # + coefficients: NDArray[float64] + + def __init__(self, coeffs): + type_ = DATA_POLYNOMIAL + super().__init__(type_) + + self.coefficients = coeffs + + def __repr__(self): + text = super().__repr__() + text += f" - coefficients {print_1d_array(self.coefficients)}\n" + return text diff --git a/mcdc/mcdc/object_/distribution.py b/mcdc/mcdc/object_/distribution.py new file mode 100644 index 000000000..5585c29c2 --- /dev/null +++ b/mcdc/mcdc/object_/distribution.py @@ -0,0 +1,415 @@ +import numpy as np + +from numpy import float64, int64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import ( + DISTRIBUTION_NONE, + DISTRIBUTION_PMF, + DISTRIBUTION_TABULATED, + DISTRIBUTION_MULTITABLE, + DISTRIBUTION_LEVEL_SCATTERING, + DISTRIBUTION_EVAPORATION, + DISTRIBUTION_MAXWELLIAN, + DISTRIBUTION_KALBACH_MANN, + DISTRIBUTION_TABULATED_ENERGY_ANGLE, + DISTRIBUTION_N_BODY, +) +from mcdc.object_.base import ObjectPolymorphic +from mcdc.object_.data import DataTable +from mcdc.object_.util import cdf_from_pdf, multi_cdf_from_pdf, cmf_from_pmf +from mcdc.print_ import print_1d_array + +# ====================================================================================== +# Distribution base class +# ====================================================================================== + + +class DistributionBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "distribution" + + def __init__(self, type_, register=True): + super().__init__(type_, register) + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + return text + + +def decode_type(type_): + if type_ == DISTRIBUTION_NONE: + return "Distribution (None)" + elif type_ == DISTRIBUTION_PMF: + return "Distribution (PMF)" + elif type_ == DISTRIBUTION_TABULATED: + return "Distribution (Tabulated)" + elif type_ == DISTRIBUTION_MULTITABLE: + return "Distribution (Multi Table)" + elif type_ == DISTRIBUTION_LEVEL_SCATTERING: + return "Distribution (Level scattering)" + elif type_ == DISTRIBUTION_EVAPORATION: + return "Distribution (Evaporation)" + elif type_ == DISTRIBUTION_MAXWELLIAN: + return "Distribution (Maxwellian spectrum)" + elif type_ == DISTRIBUTION_KALBACH_MANN: + return "Distribution (Kalbach-Mann)" + elif type_ == DISTRIBUTION_TABULATED_ENERGY_ANGLE: + return "Distribution (Tabulated energy-angle)" + elif type_ == DISTRIBUTION_N_BODY: + return "Distribution (N-body)" + + +# ====================================================================================== +# None +# ====================================================================================== +# Placeholder for distribution that does not need to store data: +# - Isotropic +# - Energy-correlated angle (stored in the energy distribution) + + +class DistributionNone(DistributionBase): + # Annotations for Numba mode + label: str = "none_distribution" + + def __init__(self): + type_ = DISTRIBUTION_NONE + super().__init__(type_, False) + self.ID = 0 + + +# ====================================================================================== +# Probability Mass Function (PMF) +# ====================================================================================== + + +class DistributionPMF(DistributionBase): + # Annotations for Numba mode + label: str = "pmf_distribution" + # + value: NDArray[float64] + pmf: NDArray[float64] + cmf: NDArray[float64] + + def __init__(self, value, pmf): + type_ = DISTRIBUTION_PMF + super().__init__(type_) + + self.value = value + self.pmf = pmf + + self.pmf, self.cmf = cmf_from_pmf(pmf) + + def __repr__(self): + text = super().__repr__() + text += f" - value {print_1d_array(self.value)}\n" + text += f" - pmf {print_1d_array(self.pmf)}\n" + return text + + +# ====================================================================================== +# Tabulated +# ====================================================================================== + + +class DistributionTabulated(DistributionBase): + # Annotations for Numba mode + label: str = "tabulated_distribution" + # + value: NDArray[float64] + pdf: NDArray[float64] + cdf: NDArray[float64] + + def __init__(self, value, pdf): + type_ = DISTRIBUTION_TABULATED + super().__init__(type_) + + self.value = value + self.pdf = pdf + + self.pdf, self.cdf = cdf_from_pdf(value, pdf) + + def __repr__(self): + text = super().__repr__() + text += f" - value {print_1d_array(self.value)}\n" + text += f" - pdf {print_1d_array(self.pdf)}\n" + return text + + +# ====================================================================================== +# Multi-table +# ====================================================================================== + + +class DistributionMultiTable(DistributionBase): + # Annotations for Numba mode + label: str = "multi_table_distribution" + # + grid: NDArray[float64] + offset: NDArray[int64] + value: NDArray[float64] + pdf: NDArray[float64] + cdf: NDArray[float64] + + def __init__(self, grid, offset, value, pdf): + type_ = DISTRIBUTION_MULTITABLE + super().__init__(type_) + + self.grid = grid + self.offset = offset + self.value = value + self.pdf = pdf + + self.pdf, self.cdf = multi_cdf_from_pdf(offset, value, pdf) + + def __repr__(self): + text = super().__repr__() + text += f" - grid {print_1d_array(self.grid)}\n" + text += f" - offset {print_1d_array(self.offset)}\n" + text += f" - value {print_1d_array(self.value)}\n" + text += f" - pdf {print_1d_array(self.pdf)}\n" + return text + + +# ====================================================================================== +# Level scattering +# ====================================================================================== + + +class DistributionLevelScattering(DistributionBase): + # Annotations for Numba mode + label: str = "level_scattering_distribution" + # + C1: float + C2: float + + def __init__(self, C1, C2): + type_ = DISTRIBUTION_LEVEL_SCATTERING + super().__init__(type_) + + self.C1 = C1 + self.C2 = C2 + + def __repr__(self): + text = super().__repr__() + text += f" - C1 {print_1d_array(self.C1)} [/eV^l]\n" + text += f" - C2: {self.C2}\n" + return text + + +# ====================================================================================== +# Evaporation +# ====================================================================================== + + +class DistributionEvaporation(DistributionBase): + # Annotations for Numba mode + label: str = "evaporation_distribution" + # + nuclear_temperature: DataTable + restriction_energy: float + + def __init__( + self, + nuclear_temperature_energy_grid, + nuclear_temperature_value, + restriction_energy, + ): + type_ = DISTRIBUTION_EVAPORATION + super().__init__(type_) + + self.restriction_energy = restriction_energy + self.nuclear_temperature = DataTable( + nuclear_temperature_energy_grid, nuclear_temperature_value + ) + + def __repr__(self): + text = super().__repr__() + text += f" - Restriction energy: {self.restriction_energy} [eV]\n" + text += f" - Nuclear temperature {print_1d_array(self.nuclear_temperature.y)} [eV]\n" + text += f" - Nuclear temperature energy grid {print_1d_array(self.nuclear_temperature.x)} [eV]\n" + return text + + +# ====================================================================================== +# Maxwellian distribution +# ====================================================================================== + + +class DistributionMaxwellian(DistributionBase): + # Annotations for Numba mode + label: str = "maxwellian_distribution" + # + nuclear_temperature: DataTable + restriction_energy: float + + def __init__( + self, + nuclear_temperature_energy_grid, + nuclear_temperature_value, + restriction_energy, + temperature_interpolation, + ): + type_ = DISTRIBUTION_MAXWELLIAN + super().__init__(type_) + + self.restriction_energy = restriction_energy + self.nuclear_temperature = DataTable( + nuclear_temperature_energy_grid, + nuclear_temperature_value, + temperature_interpolation, + ) + + def __repr__(self): + text = super().__repr__() + text += f" - Restriction energy: {self.restriction_energy} [eV]\n" + text += f" - Nuclear temperature {print_1d_array(self.nuclear_temperature.y)} [eV]\n" + text += f" - Nuclear temperature energy grid {print_1d_array(self.nuclear_temperature.x)} [eV]\n" + return text + + +# ====================================================================================== +# Kalbach-Mann +# ====================================================================================== + + +class DistributionKalbachMann(DistributionBase): + # Annotations for Numba mode + label: str = "kalbach_mann_distribution" + # + energy: NDArray[float64] + offset: NDArray[int64] + energy_out: NDArray[float64] + pdf: NDArray[float64] + cdf: NDArray[float64] + precompound_factor: NDArray[float64] + angular_slope: NDArray[float64] + + def __init__( + self, energy, offset, energy_out, pdf, precompound_factor, angular_slope + ): + type_ = DISTRIBUTION_KALBACH_MANN + super().__init__(type_) + + self.energy = energy + self.offset = offset + + self.energy_out = energy_out + self.pdf = pdf + + self.precompound_factor = precompound_factor + self.angular_slope = angular_slope + + self.pdf, self.cdf = multi_cdf_from_pdf(offset, energy_out, pdf) + + def __repr__(self): + text = super().__repr__() + text += f" - grid {print_1d_array(self.energy)} [eV]\n" + text += f" - offset {print_1d_array(self.offset)}\n" + text += f" - energy {print_1d_array(self.energy_out)} [eV]\n" + text += f" - energy-pdf {print_1d_array(self.pdf)} [/eV]\n" + text += f" - precompound factor {print_1d_array(self.precompound_factor)}\n" + text += f" - angular slope {print_1d_array(self.angular_slope)}\n" + return text + + +# ====================================================================================== +# Tabulated energy-angle +# ====================================================================================== + + +class DistributionTabulatedEnergyAngle(DistributionBase): + # Annotations for Numba mode + label: str = "tabulated_energy_angle_distribution" + # + energy: NDArray[float64] + offset: NDArray[int64] + energy_out: NDArray[float64] + pdf: NDArray[float64] + cdf: NDArray[float64] + cosine_offset_: NDArray[int64] # "cosine_offset" is reserved to describe "cosine" + cosine: NDArray[float64] + cosine_pdf: NDArray[float64] + cosine_cdf: NDArray[float64] + + def __init__( + self, energy, offset, energy_out, pdf, cosine_offset, cosine, cosine_pdf + ): + type_ = DISTRIBUTION_TABULATED_ENERGY_ANGLE + super().__init__(type_) + + self.energy = energy + self.offset = offset + + self.energy_out = energy_out + self.pdf = pdf + self.cosine_offset_ = cosine_offset + + self.cosine = cosine + self.cosine_pdf = cosine_pdf + + self.pdf, self.cdf = multi_cdf_from_pdf(offset, energy_out, pdf) + + self.cosine_cdf = np.zeros_like(self.cosine_pdf) + for i in range(len(offset)): + start = offset[i] + if i + 1 < len(offset): + end = offset[i + 1] + else: + end = len(cosine) + inner_offset = cosine_offset[start:end] + + start = inner_offset[0] + if i + 1 < len(offset): + end = cosine_offset[end] + else: + end = len(cosine) + + inner_offset_local = inner_offset - inner_offset[0] + self.cosine_pdf[start:end], self.cosine_cdf[start:end] = multi_cdf_from_pdf( + inner_offset_local, cosine[start:end], cosine_pdf[start:end] + ) + + def __repr__(self): + text = super().__repr__() + text += f" - grid {print_1d_array(self.energy)} [eV]\n" + text += f" - offset {print_1d_array(self.offset)}\n" + text += f" - energy {print_1d_array(self.energy_out)} [eV]\n" + text += f" - energy-pdf {print_1d_array(self.pdf)} [/eV]\n" + text += f" - cosine-offset {print_1d_array(self.cosine_offset_)}\n" + text += f" - cosine {print_1d_array(self.cosine)}\n" + text += f" - cosine-pdf {print_1d_array(self.cosine_pdf)}\n" + return text + + +# ====================================================================================== +# N-Body +# ====================================================================================== + + +class DistributionNBody(DistributionBase): + # Annotations for Numba mode + label: str = "nbody_distribution" + # + value: NDArray[float64] + pdf: NDArray[float64] + cdf: NDArray[float64] + + def __init__(self, value, pdf): + type_ = DISTRIBUTION_N_BODY + super().__init__(type_) + + self.value = value + self.pdf = pdf + + self.pdf, self.cdf = cdf_from_pdf(value, pdf) + + def __repr__(self): + text = super().__repr__() + text += f" - value {print_1d_array(self.value)}\n" + text += f" - pdf {print_1d_array(self.pdf)}\n" + return text diff --git a/mcdc/mcdc/object_/electron_reaction.py b/mcdc/mcdc/object_/electron_reaction.py new file mode 100644 index 000000000..0feeb8fff --- /dev/null +++ b/mcdc/mcdc/object_/electron_reaction.py @@ -0,0 +1,278 @@ +from numpy import float64 +from numpy.typing import NDArray + +#### + +import mcdc.object_.distribution as distribution + +from mcdc.constant import ( + ELECTRON_REACTION_BREMSSTRAHLUNG, + ELECTRON_REACTION_EXCITATION, + ELECTRON_REACTION_ELASTIC_SCATTERING, + ELECTRON_REACTION_IONIZATION, + MU_CUTOFF, + REFERENCE_FRAME_COM, + REFERENCE_FRAME_LAB, +) +from mcdc.object_.base import ObjectPolymorphic +from mcdc.object_.data import DataBase, DataTable +from mcdc.object_.distribution import DistributionBase, DistributionMultiTable +from mcdc.print_ import print_1d_array + +# ====================================================================================== +# Electron reaction base class +# ====================================================================================== + + +class ElectronReactionBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "electron_reaction" + # + MT: int + xs: NDArray[float64] + xs_offset_: int # "xs_offset" is reserved for "xs" + reference_frame: int + + def __init__(self, type_, MT, xs, xs_offset, reference_frame): + super().__init__(type_) + self.MT = MT + self.xs = xs + self.xs_offset_ = xs_offset + self.reference_frame = reference_frame + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - MT: {self.MT}\n" + text += f" - XS {print_1d_array(self.xs)} barn\n" + text += f" - Reference frame: {decode_reference_frame(self.reference_frame)}\n" + return text + + +def decode_type(type_): + if type_ == ELECTRON_REACTION_IONIZATION: + return "Electron ionization" + elif type_ == ELECTRON_REACTION_ELASTIC_SCATTERING: + return "Electron elastic scattering" + elif type_ == ELECTRON_REACTION_BREMSSTRAHLUNG: + return "Electron bremsstrahlung" + elif type_ == ELECTRON_REACTION_EXCITATION: + return "Electron excitation" + + +def decode_reference_frame(type_): + if type_ == REFERENCE_FRAME_LAB: + return "Laboratory" + elif type_ == REFERENCE_FRAME_COM: + return "Center of mass" + + +# ====================================================================================== +# Electron ionization +# ====================================================================================== + + +class ElectronReactionIonization(ElectronReactionBase): + # Annotations for Numba mode + label: str = "electron_ionization_reaction" + # + N_subshell: int + subshell_xs: list[DataBase] + subshell_product: list[DistributionBase] + + def __init__( + self, + MT, + xs, + xs_offset, + reference_frame, + subshell_xs, + subshell_product, + ): + type_ = ELECTRON_REACTION_IONIZATION + super().__init__(type_, MT, xs, xs_offset, reference_frame) + + self.N_subshell = len(subshell_xs) + self.subshell_xs = subshell_xs + self.subshell_product = subshell_product + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame = set_basic_properties(h5_group) + + subshells = h5_group["subshells"] + subshell_names = list(subshells.keys()) + + subshell_xs = [] + subshell_product = [] + + for name in subshell_names: + subshell = subshells[name] + + # Subshell cross section table (each has its own energy grid) + subshell_xs.append( + DataTable(subshell["energy_grid"][()], subshell["xs"][()]) + ) + + # Secondary electron energy distribution + product = subshell["product"] + subshell_product.append( + DistributionMultiTable( + product["energy_grid"][()], + product["energy_offset"][()], + product["value"][()], + product["PDF"][()], + ) + ) + + return cls( + MT, + xs, + xs_offset, + reference_frame, + subshell_xs, + subshell_product, + ) + + def __repr__(self): + text = super().__repr__() + text += f" - Number of subshells: {self.N_subshell}\n" + for i in range(self.N_subshell): + text += f" - Subshell {i+1}\n" + text += f" - XS: DataTable [ID: {self.subshell_xs[i].ID}]\n" + product = self.subshell_product[i] + text += f" - Secondary electron spectrum: {distribution.decode_type(product.type)} [ID: {product.ID}]\n" + return text + + +# ====================================================================================== +# Electron elastic scattering +# ====================================================================================== + + +class ElectronReactionElasticScattering(ElectronReactionBase): + # Annotations for Numba mode + label: str = "electron_elastic_scattering_reaction" + # + mu_cut: float + xs_large: DataBase + mu: DistributionMultiTable + + def __init__( + self, + MT, + xs, + xs_offset, + reference_frame, + xs_large, + mu, + ): + type_ = ELECTRON_REACTION_ELASTIC_SCATTERING + super().__init__(type_, MT, xs, xs_offset, reference_frame) + self.mu_cut = MU_CUTOFF + self.xs_large = xs_large + self.mu = mu + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame = set_basic_properties(h5_group) + + large_angle = h5_group["large_angle"] + xs_large = DataTable(large_angle["xs_energy"][()], large_angle["xs"][()]) + + mu_group = large_angle["scattering_cosine"] + mu = DistributionMultiTable( + mu_group["energy_grid"][()], + mu_group["energy_offset"][()], + mu_group["value"][()], + mu_group["PDF"][()], + ) + + return cls(MT, xs, xs_offset, reference_frame, xs_large, mu) + + def __repr__(self): + text = super().__repr__() + text += f" - Mu cut: {self.mu_cut}\n" + text += f" - Large angle XS: DataTable [ID: {self.xs_large.ID}]\n" + text += f" - Scattering cosine: {distribution.decode_type(self.mu.type)} [ID: {self.mu.ID}]\n" + return text + + +# ====================================================================================== +# Electron bremsstrahlung +# ====================================================================================== + + +class ElectronReactionBremsstrahlung(ElectronReactionBase): + # Annotations for Numba mode + label: str = "electron_bremsstrahlung_reaction" + # + eloss: DataBase + + def __init__(self, MT, xs, xs_offset, reference_frame, eloss): + type_ = ELECTRON_REACTION_BREMSSTRAHLUNG + super().__init__(type_, MT, xs, xs_offset, reference_frame) + self.eloss = eloss + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame = set_basic_properties(h5_group) + + base = h5_group["energy_loss"] + eloss = DataTable(base["energy"][()], base["value"][()]) + + return cls(MT, xs, xs_offset, reference_frame, eloss) + + def __repr__(self): + text = super().__repr__() + text += f" - Energy loss: DataTable [ID: {self.eloss.ID}]\n" + return text + + +# ====================================================================================== +# Electron excitation +# ====================================================================================== + + +class ElectronReactionExcitation(ElectronReactionBase): + # Annotations for Numba mode + label: str = "electron_excitation_reaction" + # + eloss: DataBase + + def __init__(self, MT, xs, xs_offset, reference_frame, eloss): + type_ = ELECTRON_REACTION_EXCITATION + super().__init__(type_, MT, xs, xs_offset, reference_frame) + self.eloss = eloss + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame = set_basic_properties(h5_group) + + base = h5_group["energy_loss"] + eloss = DataTable(base["energy"][()], base["value"][()]) + + return cls(MT, xs, xs_offset, reference_frame, eloss) + + def __repr__(self): + text = super().__repr__() + text += f" - Energy loss: DataTable [ID: {self.eloss.ID}]\n" + return text + + +# ====================================================================================== +# Helper functions +# ====================================================================================== + + +def set_basic_properties(h5_group): + MT = h5_group.attrs["MT"][()] + xs = h5_group["xs"][()] + xs_offset = h5_group["xs"].attrs["offset"] + reference_frame = h5_group["reference_frame"][()].decode("utf-8") + if reference_frame == "LAB": + reference_frame = REFERENCE_FRAME_LAB + elif reference_frame == "COM": + reference_frame = REFERENCE_FRAME_COM + return MT, xs, xs_offset, reference_frame diff --git a/mcdc/mcdc/object_/element.py b/mcdc/mcdc/object_/element.py new file mode 100644 index 000000000..acc1270a0 --- /dev/null +++ b/mcdc/mcdc/object_/element.py @@ -0,0 +1,160 @@ +import h5py +import numpy as np +import os + +from numpy import float64 +from numpy.typing import NDArray + +#### + +from mcdc.object_.base import ObjectNonSingleton +from mcdc.object_.electron_reaction import ( + ElectronReactionBremsstrahlung, + ElectronReactionElasticScattering, + ElectronReactionExcitation, + ElectronReactionIonization, +) + + +class Element(ObjectNonSingleton): + # Annotations for Numba mode + label: str = "element" + # + name: str + atomic_weight_ratio: float + atomic_number: int + # + electron_xs_energy_grid: NDArray[float64] + electron_total_xs: NDArray[float64] + electron_ionization_xs: NDArray[float64] + electron_elastic_xs: NDArray[float64] + electron_excitation_xs: NDArray[float64] + electron_bremsstrahlung_xs: NDArray[float64] + # + electron_ionization_reactions: list[ElectronReactionIonization] + electron_elastic_scattering_reactions: list[ElectronReactionElasticScattering] + electron_excitation_reactions: list[ElectronReactionExcitation] + electron_bremsstrahlung_reactions: list[ElectronReactionBremsstrahlung] + # + electron_ionization_subshell_binding_energy: NDArray[float64] + + def __init__(self, element_name: str): + super().__init__() + + self.name = element_name + + # Basic properties + dir_name = os.getenv("MCDC_LIB") + file_name = f"{element_name}.h5" + file = h5py.File(f"{dir_name}/{file_name}", "r") + self.atomic_weight_ratio = float(file["atomic_weight_ratio"][()]) + self.atomic_number = int(file["atomic_number"][()]) + file.close() + + def set_electron_data(self): + element_name = self.name + + # Load data library + dir_name = os.getenv("MCDC_LIB") + file_name = f"{element_name}.h5" + file = h5py.File(f"{dir_name}/{file_name}", "r") + + # The reactions + rx_names = [ + "elastic_scattering", + "excitation", + "bremsstrahlung", + "ionization", + ] + + # The reaction MTs + MTs = {} + for name in rx_names: + if name not in file["electron_reactions"]: + MTs[name] = [] + continue + + MTs[name] = [ + x for x in file[f"electron_reactions/{name}"] if x.startswith("MT") + ] + + # ========================================================================== + # Reaction XS + # ========================================================================== + + self.electron_xs_energy_grid = file["electron_reactions/xs_energy_grid"][()] + self.electron_total_xs = np.zeros_like(self.electron_xs_energy_grid) + self.electron_elastic_xs = np.zeros_like(self.electron_xs_energy_grid) + self.electron_excitation_xs = np.zeros_like(self.electron_xs_energy_grid) + self.electron_bremsstrahlung_xs = np.zeros_like(self.electron_xs_energy_grid) + self.electron_ionization_xs = np.zeros_like(self.electron_xs_energy_grid) + + xs_containers = [ + self.electron_elastic_xs, + self.electron_excitation_xs, + self.electron_bremsstrahlung_xs, + self.electron_ionization_xs, + ] + for xs_container, rx_name in list(zip(xs_containers, rx_names)): + for MT in MTs[rx_name]: + xs = file[f"electron_reactions/{rx_name}/{MT}/xs"] + xs_container[xs.attrs["offset"] :] += xs[()] + + self.electron_total_xs = ( + self.electron_elastic_xs + + self.electron_excitation_xs + + self.electron_bremsstrahlung_xs + + self.electron_ionization_xs + ) + + # ========================================================================== + # The reactions + # ========================================================================== + + self.electron_elastic_scattering_reactions = [] + self.electron_excitation_reactions = [] + self.electron_bremsstrahlung_reactions = [] + self.electron_ionization_reactions = [] + + rx_containers = [ + self.electron_elastic_scattering_reactions, + self.electron_excitation_reactions, + self.electron_bremsstrahlung_reactions, + self.electron_ionization_reactions, + ] + rx_classes = [ + ElectronReactionElasticScattering, + ElectronReactionExcitation, + ElectronReactionBremsstrahlung, + ElectronReactionIonization, + ] + for rx_container, rx_name, rx_class in list( + zip(rx_containers, rx_names, rx_classes) + ): + for MT in MTs[rx_name]: + h5_group = file[f"electron_reactions/{rx_name}/{MT}"] + rx_container.append(rx_class.from_h5_group(h5_group)) + + # ========================================================================== + # Ionization element attributes + # ========================================================================== + + binding_energy = [] + if len(MTs["ionization"]) > 0: + h5_group = file[f"electron_reactions/ionization/{MTs['ionization'][0]}"] + for name in h5_group["subshells"]: + subshell = h5_group[f"subshells/{name}"] + binding_energy.append(float(subshell["binding_energy"][()])) + + self.electron_ionization_subshell_binding_energy = np.asarray(binding_energy) + + file.close() + + def __repr__(self): + text = "\n" + text += f"Element\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - Atomic number: {self.atomic_number}\n" + text += f" - Atomic weight ratio: {self.atomic_weight_ratio}\n" + return text diff --git a/mcdc/mcdc/object_/gpu_tools.py b/mcdc/mcdc/object_/gpu_tools.py new file mode 100644 index 000000000..b4d4965a3 --- /dev/null +++ b/mcdc/mcdc/object_/gpu_tools.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass +from numpy import uintp + +#### + +from mcdc.object_.base import ObjectSingleton + + +@dataclass +class GPUMeta(ObjectSingleton): + # Annotations for Numba mode + label: str = "gpu_meta" + # + state_pointer: uintp = uintp(0) + program_pointer: uintp = uintp(0) + simulation_pointer: uintp = uintp(0) + data_pointer: uintp = uintp(0) + + # Note that the uintp is manually overriden in code_factory. diff --git a/mcdc/mcdc/object_/material.py b/mcdc/mcdc/object_/material.py new file mode 100644 index 000000000..c2b2fbfbe --- /dev/null +++ b/mcdc/mcdc/object_/material.py @@ -0,0 +1,541 @@ +import numpy as np +import os + +from numpy import float64 +from numpy.typing import NDArray +from types import NoneType +from typing import Annotated + +#### + +from mcdc.constant import MATERIAL, MATERIAL_MG +from mcdc.object_.base import ObjectPolymorphic +from mcdc.object_.element import Element +from mcdc.object_.nuclide import Nuclide +from mcdc.object_.simulation import simulation +from mcdc.object_.util import ISOTOPIC_ABUNDANCE +from mcdc.print_ import print_1d_array, print_error + +# ====================================================================================== +# Material base class +# ====================================================================================== + + +class MaterialBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "material" + # + name: str + fissionable: bool + + def __init__(self, type_, name): + super().__init__(type_) + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.child_ID}" + + self.fissionable = False + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - Fissionable: {self.fissionable}\n" + return text + + +def decode_type(type_): + if type_ == MATERIAL: + return "Material" + elif type_ == MATERIAL_MG: + return "Multigroup material" + + +# ====================================================================================== +# Native material +# ====================================================================================== + + +class Material(MaterialBase): + """ + Define a continuous-energy material from a nuclide composition. + + Parameters + ---------- + name : str, optional + User label. + nuclide_composition : dict + Dictionary mapping nuclide names (str) to atom densities (float). + element_composition : dict + Dictionary mapping element names (str) to atom densities (float). + temperature : float, optional + Temperature in Kelvin (default 293.6 K). + + Returns + ------- + Material + The material object. + + Notes + ----- + Requires the ``MCDC_LIB`` environment variable to point to the nuclear + data library directory. + + See Also + -------- + mcdc.MaterialMG : Creates a multigroup material. + """ + + # Annotations for Numba mode + label: str = "native_material" + non_numba: list[str] = ["nuclide_composition", "element_composition"] + # + nuclide_composition: dict[Nuclide, float] + element_composition: dict[Element, float] + # + nuclides: list[Nuclide] + elements: list[Element] + nuclide_densities: NDArray[float64] + element_densities: NDArray[float64] + + def __init__( + self, + name: str = "", + nuclide_composition: dict[str, float] = {}, + element_composition: dict[str, float] = {}, + temperature: float = 293.6, + ): + type_ = MATERIAL + super().__init__(type_, name) + + # Temperature + self.temperature = temperature + + # Dictionary connecting nuclides to respective densities + self.nuclide_composition = {} + + # Dictionary connecting elements to respective densities + self.element_composition = {} + + # Numba representation of nuclide_composition + self.nuclides = [] + self.nuclide_densities = np.zeros(len(nuclide_composition)) + + # Numba representation of element_composition + self.elements = [] + self.element_densities = np.zeros(len(element_composition)) + + # Check if library directory is set + lib_dir = os.getenv("MCDC_LIB") + if lib_dir is None: + print_error("Environment variable MCDC_LIB is not set") + + # Check that only one composition is supplied + if len(nuclide_composition) > 0 and len(element_composition) > 0: + print_error( + "Cannot specify both nuclide_composition and element_composition" + ) + + if len(nuclide_composition) == 0 and len(element_composition) == 0: + print_error( + "Must specify either nuclide_composition or element_composition" + ) + + # Loop over the items in the elemental composition + for i, (key, value) in enumerate(element_composition.items()): + element_name = key + element_density = value + + # Check if element is already created + found = False + for element in simulation.elements: + if element.name == element_name: + found = True + break + + # Create the element object if needed + if not found: + element = Element(element_name) + + # Register the element composition + self.elements.append(element) + self.element_densities[i] = element_density + self.element_composition[element] = element_density + + # Loop over the items in the nuclide composition + for i, (key, value) in enumerate(nuclide_composition.items()): + nuclide_name = key + nuclide_density = value + + # Get supported temperature + nearest_temperature = min(TEMPERATURES, key=lambda x: abs(x - temperature)) + + # Check if nuclide-temperature is available in the library + file_name = f"{nuclide_name}-{nearest_temperature}K.h5" + if not file_name in os.listdir(lib_dir): + print_error( + f"Nuclide {nuclide_name} at temperature {nearest_temperature} K is not available in the library" + ) + + # Check if nuclide is already created + found = False + for nuclide in simulation.nuclides: + if ( + nuclide.name == nuclide_name + and nearest_temperature == nuclide.temperature + ): + found = True + break + + # Create the nuclide to objects if needed + if not found: + nuclide = Nuclide(nuclide_name, nearest_temperature) + + # Register the nuclide composition + self.nuclides.append(nuclide) + self.nuclide_densities[i] = nuclide_density + self.nuclide_composition[nuclide] = nuclide_density + + # Promote nuclide flags to material + if nuclide.fissionable: + self.fissionable = True + + def __repr__(self): + text = super().__repr__() + text += f" - Temperature: {self.temperature} K\n" + if len(self.nuclide_composition) > 0: + text += f" - Nuclide composition [atoms/barn-cm]\n" + for nuclide in self.nuclide_composition.keys(): + text += ( + f" - {nuclide.name:<5} | {self.nuclide_composition[nuclide]}\n" + ) + if len(self.element_composition) > 0: + text += f" - Element composition [atoms/barn-cm]\n" + for element in self.element_composition.keys(): + text += ( + f" - {element.name:<5} | {self.element_composition[element]}\n" + ) + return text + + +# Currently supported temperatures +TEMPERATURES = [0.1, 233.15, 273.15, 293.6, 600.0, 900.0, 1200.0, 2500.0] + + +# ====================================================================================== +# Multigroup material +# ====================================================================================== + + +class MaterialMG(MaterialBase): + """ + Define a multigroup material. + + Cross-section arrays are provided as NumPy arrays of length ``G`` (number + of energy groups). Scatter and fission matrices are ``(G, G)``. + + Parameters + ---------- + name : str, optional + User label. + capture : ndarray, optional + Capture cross section for each group. + scatter : ndarray, optional + Scattering matrix ``(G, G)``. + fission : ndarray, optional + Fission cross section for each group. + nu_s : ndarray, optional + Average scattering multiplicity. + nu_p : ndarray, optional + Average prompt fission neutron yield. + nu_d : ndarray, optional + Average delayed fission neutron yield. + chi_p : ndarray, optional + Prompt fission spectrum. + chi_d : ndarray, optional + Delayed fission spectrum. + speed : ndarray, optional + Neutron speeds for each group (cm/s). + decay_rate : ndarray, optional + Delayed neutron precursor decay rates (1/s). + + Returns + ------- + MaterialMG + The multigroup material object. + + See Also + -------- + mcdc.Material : Creates a continuous-energy material. + """ + + # Annotations for Numba mode + label: str = "multigroup_material" + # + G: int + J: int + mgxs_speed: Annotated[NDArray[float64], ("G",)] + mgxs_decay_rate: Annotated[NDArray[float64], ("J",)] + mgxs_capture: Annotated[NDArray[float64], ("G",)] + mgxs_scatter: Annotated[NDArray[float64], ("G",)] + mgxs_fission: Annotated[NDArray[float64], ("G",)] + mgxs_total: Annotated[NDArray[float64], ("G",)] + mgxs_nu_s: Annotated[NDArray[float64], ("G",)] + mgxs_nu_p: Annotated[NDArray[float64], ("G",)] + mgxs_nu_d: Annotated[NDArray[float64], ("G", "J")] + mgxs_nu_d_total: Annotated[NDArray[float64], ("G",)] + mgxs_nu_f: Annotated[NDArray[float64], ("G",)] + mgxs_chi_s: Annotated[NDArray[float64], ("G", "G")] + mgxs_chi_p: Annotated[NDArray[float64], ("G", "G")] + mgxs_chi_d: Annotated[NDArray[float64], ("J", "G")] + + def __init__( + self, + name: str = "", + capture: NDArray[float64] | NoneType = None, + scatter: NDArray[float64] | NoneType = None, + fission: NDArray[float64] | NoneType = None, + nu_s: NDArray[float64] | NoneType = None, + nu_p: NDArray[float64] | NoneType = None, + nu_d: NDArray[float64] | NoneType = None, + chi_p: NDArray[float64] | NoneType = None, + chi_d: NDArray[float64] | NoneType = None, + speed: NDArray[float64] | NoneType = None, + decay_rate: NDArray[float64] | NoneType = None, + ): + type_ = MATERIAL_MG + super().__init__(type_, name) + + # Energy group size + if capture is not None: + G = len(capture) + elif scatter is not None: + G = len(scatter) + elif fission is not None: + G = len(fission) + else: + print_error("Need to supply capture, scatter, or fission for MaterialMG") + self.G = G + + # Delayed group size + J = 0 + if nu_d is not None: + J = len(nu_d) + self.J = J + + # Allocate the attributes + self.mgxs_speed = np.ones(G) + self.mgxs_decay_rate = np.ones(J) * np.inf + self.mgxs_capture = np.zeros(G) + self.mgxs_scatter = np.zeros(G) + self.mgxs_fission = np.zeros(G) + self.mgxs_total = np.zeros(G) + self.mgxs_nu_s = np.ones(G) + self.mgxs_nu_p = np.zeros(G) + self.mgxs_nu_d = np.zeros([G, J]) + self.mgxs_nu_d_total = np.zeros([G]) + self.mgxs_nu_f = np.zeros(G) + self.mgxs_chi_s = np.zeros([G, G]) + self.mgxs_chi_p = np.zeros([G, G]) + self.mgxs_chi_d = np.zeros([J, G]) + + # Speed (vector of size G) + if speed is not None: + self.mgxs_speed = speed + + # Decay constant (vector of size J) + if decay_rate is not None: + self.mgxs_decay_rate = decay_rate + + # Cross-sections (vector of size G) + if capture is not None: + self.mgxs_capture = capture + if scatter is not None: + self.mgxs_scatter = np.sum(scatter, 0) + if fission is not None: + self.mgxs_fission = fission + self.fissionable = True + self.mgxs_total = self.mgxs_capture + self.mgxs_scatter + self.mgxs_fission + + # Scattering multiplication (vector of size G) + if nu_s is not None: + self.mgxs_nu_s = nu_s + + # Check if nu_p or nu_d is not provided, give fission + if fission is not None: + if nu_p is None and nu_d is None: + print_error("Need to supply nu_p or nu_d for fissionable MaterialMG") + + # Prompt fission production (vector of size G) + if nu_p is not None: + self.mgxs_nu_p = nu_p + + # Delayed fission production (matrix of size GxJ) + if nu_d is not None: + # Transpose: [dg, gin] -> [gin, dg] + self.mgxs_nu_d = np.swapaxes(nu_d, 0, 1)[:, :] + self.mgxs_nu_d_total = np.sum(self.mgxs_nu_d, axis=1) + + # Total fission production (vector of size G) + self.mgxs_nu_f = np.zeros_like(self.mgxs_nu_p) + self.mgxs_nu_f += self.mgxs_nu_p + for j in range(J): + self.mgxs_nu_f += self.mgxs_nu_d[:, j] + + # Scattering spectrum (matrix of size GxG) + if scatter is not None: + # Transpose: [gout, gin] -> [gin, gout] + self.mgxs_chi_s = np.swapaxes(scatter, 0, 1)[:, :] + for g in range(G): + if self.mgxs_scatter[g] > 0.0: + self.mgxs_chi_s[g, :] /= self.mgxs_scatter[g] + + # Prompt fission spectrum (matrix of size GxG) + if nu_p is not None: + if G == 1: + self.mgxs_chi_p[:, :] = np.array([[1.0]]) + elif chi_p is None: + print_error("Need to supply chi_p if nu_p is provided and G > 1") + else: + # Convert 1D spectrum to 2D + if chi_p.ndim == 1: + tmp = np.zeros((G, G)) + for g in range(G): + tmp[:, g] = chi_p + chi_p = tmp + # Transpose: [gout, gin] -> [gin, gout] + self.mgxs_chi_p[:, :] = np.swapaxes(chi_p, 0, 1)[:, :] + # Normalize + for g in range(G): + if np.sum(self.mgxs_chi_p[g, :]) > 0.0: + self.mgxs_chi_p[g, :] /= np.sum(self.mgxs_chi_p[g, :]) + + # Delayed fission spectrum (matrix of size JxG) + if nu_d is not None: + if G == 1: + self.mgxs_chi_d = np.ones([J, G]) + else: + if chi_d is None: + print_error("Need to supply chi_d if nu_d is provided and G > 1") + # Transpose: [gout, dg] -> [dg, gout] + self.mgxs_chi_d = np.swapaxes(chi_d, 0, 1)[:, :] + # Normalize + for dg in range(J): + if np.sum(self.mgxs_chi_d[dg, :]) > 0.0: + self.mgxs_chi_d[dg, :] /= np.sum(self.mgxs_chi_d[dg, :]) + + def __repr__(self): + text = super().__repr__() + text += f" - Multigroup data\n" + text += f" - G: {self.G}\n" + text += f" - J: {self.J}\n" + text += f" - Sigma_c {print_1d_array(self.mgxs_capture)}\n" + text += f" - Sigma_s {print_1d_array(self.mgxs_scatter)}\n" + text += f" - Sigma_f {print_1d_array(self.mgxs_fission)}\n" + text += f" - nu_s {print_1d_array(self.mgxs_nu_s)}\n" + text += f" - nu_p {print_1d_array(self.mgxs_nu_p)}\n" + text += f" - nu_d {print_1d_array(self.mgxs_nu_d.flatten())}\n" + text += f" - chi_s {print_1d_array(self.mgxs_chi_s.flatten())}\n" + text += f" - chi_fp {print_1d_array(self.mgxs_chi_p.flatten())}\n" + text += f" - chi_fd {print_1d_array(self.mgxs_chi_d.flatten())}\n" + text += f" - speed {print_1d_array(self.mgxs_speed)}\n" + text += f" - lambda {print_1d_array(self.mgxs_decay_rate)}\n" + return text + + +def set_nuclides_from_elements(material): + material.nuclides = [] + material.nuclide_composition = {} + nuclide_densities = [] + + # Get supported temperature + nearest_temperature = min(TEMPERATURES, key=lambda x: abs(x - material.temperature)) + + for element, element_density in material.element_composition.items(): + # To make sure that the abundance is normalized + norm = 0.0 + for abundance in ISOTOPIC_ABUNDANCE[element.name].values(): + norm += abundance + + # Loop over the nuclide composition + for nuclide_name, abundance in ISOTOPIC_ABUNDANCE[element.name].items(): + # Check if nuclide is already created + found = False + for nuclide in simulation.nuclides: + if ( + nuclide.name == nuclide_name + and nearest_temperature == nuclide.temperature + ): + found = True + break + + # Create the nuclide object if needed + if not found: + nuclide = Nuclide(nuclide_name, nearest_temperature) + + # Calculate nuclide density + nuclide_density = element_density * abundance / norm + + # Register the nuclide composition + material.nuclides.append(nuclide) + nuclide_densities.append(nuclide_density) + material.nuclide_composition[nuclide] = nuclide_density + + material.nuclide_densities = np.array(nuclide_densities) + + +def set_elements_from_nuclides(material): + material.elements = [] + material.element_composition = {} + + # Get the list of the element names + element_names = [] + for nuclide in material.nuclides: + element_name = nuclide.name[:2] + if element_name[1].isdigit(): + element_name = element_name[0] + if element_name not in element_names: + element_names.append(element_name) + element_densities = np.zeros(len(element_names)) + + # Iterate over all named elements + for i, element_name in enumerate(element_names): + # Check if element is already created + found = False + for element in simulation.elements: + if element.name == element_name: + found = True + break + + # Create the element object if needed + if not found: + element = Element(element_name) + + material.elements.append(element) + + # Iterate over all nuclides to get the total density + density = 0.0 + for nuclide, nuclide_density in material.nuclide_composition.items(): + # Skip if non-isotope + if nuclide.name[: len(element_name)] != element_name: + continue + + # Accumulate density + density += nuclide_density + + element_densities[i] = density + material.element_composition[element] = density + + material.element_densities = element_densities + + +def update_fissionable_from_nuclides(material): + material.fissionable = False + for nuclide in material.nuclides: + if nuclide.fissionable: + material.fissionable = True + break diff --git a/mcdc/mcdc/object_/mesh.py b/mcdc/mcdc/object_/mesh.py new file mode 100644 index 000000000..4c915f43e --- /dev/null +++ b/mcdc/mcdc/object_/mesh.py @@ -0,0 +1,198 @@ +from typing import Iterable +import numpy as np + +from numpy import float64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import INF, MESH_STRUCTURED, MESH_UNIFORM +from mcdc.object_.base import ObjectPolymorphic +from mcdc.print_ import print_1d_array + +# ====================================================================================== +# Mesh base class +# ====================================================================================== + + +class MeshBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "mesh" + # + name: str + N_bin: int + Nx: int + Ny: int + Nz: int + + def __init__(self, type_, name): + super().__init__(type_) + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.child_ID}" + + self.N_bin = 0 + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - # of bins: {self.N_bin}\n" + return text + + +def decode_type(type_): + if type_ == MESH_UNIFORM: + return "Uniform mesh" + elif type_ == MESH_STRUCTURED: + return "Structured mesh" + + +# ====================================================================================== +# Uniform mesh +# ====================================================================================== + + +class MeshUniform(MeshBase): + """ + Define a uniform rectilinear mesh. + + Each axis is specified as ``(origin, width, N_bins)``. + + Parameters + ---------- + name : str, optional + User label. + x : tuple of (float, float, int), optional + ``(x0, dx, Nx)`` — origin, bin width, and number of bins along x. + y : tuple of (float, float, int), optional + ``(y0, dy, Ny)`` — origin, bin width, and number of bins along y. + z : tuple of (float, float, int), optional + ``(z0, dz, Nz)`` — origin, bin width, and number of bins along z. + + Returns + ------- + MeshUniform + The uniform mesh object. + + See Also + -------- + mcdc.MeshStructured : Creates a mesh with arbitrary bin edges. + mcdc.TallyMesh : Creates a tally on a mesh. + """ + + # Annotations for Numba mode + label: str = "uniform_mesh" + # + x0: float + dx: float + Nx: int + y0: float + dy: float + Ny: int + z0: float + dz: float + Nz: int + + def __init__( + self, + name: str = "", + x: tuple[float, float, int] = (-INF, 2 * INF, 1), + y: tuple[float, float, int] = (-INF, 2 * INF, 1), + z: tuple[float, float, int] = (-INF, 2 * INF, 1), + ): + type_ = MESH_UNIFORM + super().__init__(type_, name) + + # Set the grid + self.x0 = x[0] + self.dx = x[1] + self.Nx = x[2] + self.y0 = y[0] + self.dy = y[1] + self.Ny = y[2] + self.z0 = z[0] + self.dz = z[1] + self.Nz = z[2] + + self.N_bin = self.Nx * self.Ny * self.Nz + + def __repr__(self): + text = super().__repr__() + text += f" - Grid specification\n" + text += f" - (x0, dx, Nx): ({self.x0}, {self.dx}, {self.Nx}) [cm]\n" + text += f" - (y0, dy, Ny): ({self.y0}, {self.dy}, {self.Ny}) [cm]\n" + text += f" - (z0, dz, Nz): ({self.z0}, {self.dz}, {self.Nz}) [cm]\n" + return text + + +# ====================================================================================== +# Structured mesh +# ====================================================================================== + + +class MeshStructured(MeshBase): + """ + Define a structured rectilinear mesh with arbitrary bin edges. + + Parameters + ---------- + name : str, optional + User label. + x : array_like of float, optional + Bin edges along x (cm). + y : array_like of float, optional + Bin edges along y (cm). + z : array_like of float, optional + Bin edges along z (cm). + + Returns + ------- + MeshStructured + The structured mesh object. + + See Also + -------- + mcdc.MeshUniform : Creates a uniform mesh. + mcdc.TallyMesh : Creates a tally on a mesh. + """ + + # Annotations for Numba mode + label: str = "structured_mesh" + # + x: NDArray[float64] + y: NDArray[float64] + z: NDArray[float64] + + def __init__( + self, + name: str = "", + x: Iterable[float] = [-INF, INF], + y: Iterable[float] = [-INF, INF], + z: Iterable[float] = [-INF, INF], + ): + type_ = MESH_STRUCTURED + super().__init__(type_, name) + + # Set the grid + self.x = np.array(x) + self.y = np.array(y) + self.z = np.array(z) + + self.Nx = len(self.x) - 1 + self.Ny = len(self.y) - 1 + self.Nz = len(self.z) - 1 + + self.N_bin = self.Nx * self.Ny * self.Nz + + def __repr__(self): + text = super().__repr__() + text += f" - Grid specification\n" + text += f" - x {print_1d_array(self.x)} cm\n" + text += f" - y {print_1d_array(self.y)} cm\n" + text += f" - z {print_1d_array(self.z)} cm\n" + return text diff --git a/mcdc/mcdc/object_/neutron_reaction.py b/mcdc/mcdc/object_/neutron_reaction.py new file mode 100644 index 000000000..b730798e5 --- /dev/null +++ b/mcdc/mcdc/object_/neutron_reaction.py @@ -0,0 +1,406 @@ +from typing import Annotated +from numpy import float64 +from numpy.typing import NDArray + +#### + +import mcdc.object_.distribution as distribution + +from mcdc.constant import ( + ANGLE_ISOTROPIC, + ANGLE_ENERGY_CORRELATED, + ANGLE_DISTRIBUTED, + INTERPOLATION_LINEAR, + INTERPOLATION_LOG, + NEUTRON_REACTION_CAPTURE, + NEUTRON_REACTION_ELASTIC_SCATTERING, + NEUTRON_REACTION_FISSION, + NEUTRON_REACTION_INELASTIC_SCATTERING, + REFERENCE_FRAME_COM, + REFERENCE_FRAME_LAB, +) +from mcdc.object_.base import ObjectPolymorphic +from mcdc.object_.distribution import ( + DistributionBase, + DistributionMultiTable, + DistributionLevelScattering, + DistributionEvaporation, + DistributionMaxwellian, + DistributionKalbachMann, + DistributionTabulatedEnergyAngle, + DistributionNBody, +) +from mcdc.object_.simulation import simulation +from mcdc.print_ import print_1d_array, print_error + +# ====================================================================================== +# Neutron reaction base class +# ====================================================================================== + + +class NeutronReactionBase(ObjectPolymorphic): + # Annotations for Numba mode + label: str = "neutron_reaction" + # + MT: int + xs: NDArray[float64] + xs_offset_: int # "xs_offset" ir reserved for "xs" + reference_frame: int + q_value: float64 + + def __init__(self, type_, MT, xs, xs_offset, reference_frame, q_value): + super().__init__(type_) + self.MT = MT + self.xs = xs + self.xs_offset_ = xs_offset + self.reference_frame = reference_frame + self.q_value = q_value + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - MT: {self.MT}\n" + text += f" - XS {print_1d_array(self.xs)} barn\n" + text += f" - Reference frame: {decode_reference_frame(self.reference_frame)}\n" + text += f" - Q-value: {self.q_value}\n" + return text + + +def decode_type(type_): + if type_ == NEUTRON_REACTION_ELASTIC_SCATTERING: + return "Neutron elastic scattering" + elif type_ == NEUTRON_REACTION_CAPTURE: + return "Neutron capture" + elif type_ == NEUTRON_REACTION_INELASTIC_SCATTERING: + return "Neutron inelastic scattering" + elif type_ == NEUTRON_REACTION_FISSION: + return "Neutron fission" + + +def decode_reference_frame(type_): + if type_ == REFERENCE_FRAME_LAB: + return "Laboratory" + elif type_ == REFERENCE_FRAME_COM: + return "Center of mass" + + +# ====================================================================================== +# Neutron elastic scattering +# ====================================================================================== + + +class NeutronReactionElasticScattering(NeutronReactionBase): + # Annotations for Numba mode + label: str = "neutron_elastic_scattering_reaction" + # + mu_table: DistributionMultiTable + + def __init__(self, MT, xs, xs_offset, reference_frame, mu): + type_ = NEUTRON_REACTION_ELASTIC_SCATTERING + super().__init__(type_, MT, xs, xs_offset, reference_frame, 0.0) + self.mu_table = mu + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame, _ = set_basic_properties(h5_group) + _, mu = set_angular_distribution(h5_group["angular_cosine_distribution"]) + return cls(MT, xs, xs_offset, reference_frame, mu) + + def __repr__(self): + text = super().__repr__() + text += f" - Scattering cosine: {distribution.decode_type(self.mu_table.type)} [ID: {self.mu_table.ID}]\n" + return text + + +# ====================================================================================== +# Neutron capture +# ====================================================================================== + + +class NeutronReactionCapture(NeutronReactionBase): + # Annotations for Numba mode + label: str = "neutron_capture_reaction" + + def __init__(self, MT, xs, xs_offset, reference_frame, q_value): + type_ = NEUTRON_REACTION_CAPTURE + super().__init__(type_, MT, xs, xs_offset, reference_frame, q_value) + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame, q_value = set_basic_properties(h5_group) + return cls(MT, xs, xs_offset, reference_frame, q_value) + + +# ====================================================================================== +# Neutron inelastic scattering +# ====================================================================================== + + +class NeutronReactionInelasticScattering(NeutronReactionBase): + # Annotations for Numba mode + label: str = "neutron_inelastic_scattering_reaction" + # + multiplicity: int + angle_type: int + mu: DistributionBase + N_spectrum_probability_bin: int + N_spectrum: int + spectrum_probability_grid: NDArray[float64] + spectrum_probability: Annotated[ + NDArray[float64], ("N_spectrum_probability_bin", "N_spectrum") + ] + energy_spectra: list[DistributionBase] + + def __init__( + self, + MT, + xs, + xs_offset, + reference_frame, + q_value, + multiplicity, + angle_type, + mu, + spectrum_probability_grid, + spectrum_probability, + energy_spectra, + ): + type_ = NEUTRON_REACTION_INELASTIC_SCATTERING + super().__init__(type_, MT, xs, xs_offset, reference_frame, q_value) + + self.multiplicity = multiplicity + self.angle_type = angle_type + self.mu = mu + self.N_spectrum_probability_bin = len(spectrum_probability_grid) - 1 + self.N_spectrum = len(energy_spectra) + self.spectrum_probability_grid = spectrum_probability_grid + self.spectrum_probability = spectrum_probability + self.energy_spectra = energy_spectra + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame, q_value = set_basic_properties(h5_group) + multiplicity = int(h5_group["multiplicity"][()]) + + angle_type, mu = set_angular_distribution( + h5_group["angular_cosine_distribution"] + ) + + # Energy spectra + spectrum_probability_grid = ( + h5_group[f"spectrum_probability_grid"][()] * 1e6 + ) # MeV to eV + spectrum_probability = h5_group[f"spectrum_probability"][()] + energy_spectra = [] + spectrum_names = [x for x in h5_group if x.startswith("energy_spectrum-")] + for spectrum_name in spectrum_names: + energy_spectra.append(set_energy_distribution(h5_group[f"{spectrum_name}"])) + + return cls( + MT, + xs, + xs_offset, + reference_frame, + q_value, + multiplicity, + angle_type, + mu, + spectrum_probability_grid, + spectrum_probability, + energy_spectra, + ) + + def __repr__(self): + text = super().__repr__() + if self.angle_type == ANGLE_ISOTROPIC: + text += f" - Scattering cosine: Isotropic\n" + elif self.angle_type == ANGLE_ENERGY_CORRELATED: + text += f" - Scattering cosine: Energy-correlated\n" + else: + text += f" - Scattering cosine: {distribution.decode_type(self.mu.type)} [ID: {self.mu.ID}]\n" + text += f" - Energy spectra\n" + text += f" - Probability energy grid {print_1d_array(self.spectrum_probability_grid)}\n" + for i in range(len(self.energy_spectra)): + text += f" - Spectrum {i+1}: {distribution.decode_type(self.energy_spectra[i])} [{print_1d_array(self.spectrum_probability[:,i])}] [ID: {self.energy_spectra[i].ID}]\n" + return text + + +# ====================================================================================== +# Neutron fission +# ====================================================================================== + + +class NeutronReactionFission(NeutronReactionBase): + # Annotations for Numba mode + label: str = "neutron_fission_reaction" + # + angle_type: int + mu: DistributionBase + spectrum: DistributionBase + + def __init__( + self, + MT, + xs, + xs_offset, + reference_frame, + q_value, + angle_type, + mu, + spectrum, + ): + type_ = NEUTRON_REACTION_FISSION + super().__init__(type_, MT, xs, xs_offset, reference_frame, q_value) + self.angle_type = angle_type + self.mu = mu + self.spectrum = spectrum + + @classmethod + def from_h5_group(cls, h5_group): + MT, xs, xs_offset, reference_frame, q_value = set_basic_properties(h5_group) + + # Prompt angular distribution + angle_type, mu = set_angular_distribution( + h5_group["angular_cosine_distribution"] + ) + + # Prompt spectrum + spectrum_names = [x for x in h5_group if x.startswith("energy_spectrum-")] + if len(spectrum_names) > 1: + print_error("Unsupported multi-distribution prompt fission spectrum") + spectrum = set_energy_distribution(h5_group[f"energy_spectrum-1"]) + + return cls( + MT, xs, xs_offset, reference_frame, q_value, angle_type, mu, spectrum + ) + + def __repr__(self): + text = super().__repr__() + text += f" - Prompt neutron\n" + if self.angle_type == ANGLE_ISOTROPIC: + text += f" - Emission cosine: Isotropic\n" + elif self.angle_type == ANGLE_ENERGY_CORRELATED: + text += f" - Emission cosine: Energy-correlated\n" + else: + text += f" - Emission cosine: {distribution.decode_type(self.mu.type)} [ID: {self.mu.ID}]\n" + text += f" - Energy spectrum: {distribution.decode_type(self.spectrum)} [ID: {self.spectrum.ID}]\n" + + return text + + +# ====================================================================================== +# Helper functions +# ====================================================================================== + + +def set_basic_properties(h5_group): + MT = h5_group.attrs["MT"][()] + xs = h5_group["xs"][()] + xs_offset = h5_group["xs"].attrs["offset"] + reference_frame = h5_group["reference_frame"][()].decode("utf-8") + if reference_frame == "LAB": + reference_frame = REFERENCE_FRAME_LAB + elif reference_frame == "COM": + reference_frame = REFERENCE_FRAME_COM + q_value = h5_group["Q-value"][()] + return MT, xs, xs_offset, reference_frame, q_value + + +def set_angular_distribution(h5_group): + mu_type = h5_group.attrs["type"] + if mu_type == "isotropic": + angle_type = ANGLE_ISOTROPIC + mu = simulation.distributions[0] + elif mu_type == "energy-correlated": + angle_type = ANGLE_ENERGY_CORRELATED + mu = simulation.distributions[0] + else: + angle_type = ANGLE_DISTRIBUTED + grid = h5_group[f"energy"][()] * 1e6 # MeV to eV + offset = h5_group[f"offset"][()] + value = h5_group[f"value"][()] + pdf = h5_group[f"pdf"][()] + mu = DistributionMultiTable(grid, offset, value, pdf) + + return angle_type, mu + + +def set_energy_distribution(h5_group): + spectrum_type = h5_group.attrs["type"] + + if spectrum_type == "tabulated": + grid = h5_group[f"energy"][()] * 1e6 # MeV to eV + offset = h5_group[f"offset"][()] + value = h5_group[f"value"][()] * 1e6 # MeV to eV + pdf = h5_group[f"pdf"][()] / 1e6 # /MeV to /eV + energy_spectrum = DistributionMultiTable(grid, offset, value, pdf) + + elif spectrum_type == "level-scattering": + C1 = h5_group["C1"][()] * 1e6 # MeV to eV + C2 = h5_group["C2"][()] + + energy_spectrum = DistributionLevelScattering(C1, C2) + + elif spectrum_type == "evaporation": + energy = h5_group[f"temperature_energy_grid"][()] * 1e6 # MeV to eV + temperature = h5_group[f"temperature"][()] * 1e6 # MeV to eV + restriction_energy = h5_group[f"restriction_energy"][()] * 1e6 # MeV to eV + + energy_spectrum = DistributionEvaporation( + energy, temperature, restriction_energy + ) + + elif spectrum_type == "maxwellian": + energy = h5_group[f"temperature_energy_grid"][()] * 1e6 # MeV to eV + temperature = h5_group[f"temperature"][()] * 1e6 # MeV to eV + restriction_energy = h5_group[f"restriction_energy"][()] * 1e6 # MeV to eV + interpolation = h5_group[f"temperature_interpolation"][()].decode("utf-8") + if interpolation == "linear": + interpolation = INTERPOLATION_LINEAR + elif interpolation == "log": + interpolation = INTERPOLATION_LOG + + energy_spectrum = DistributionMaxwellian( + energy, temperature, restriction_energy, interpolation + ) + + elif spectrum_type == "kalbach-mann": + energy = h5_group[f"energy"][()] * 1e6 # MeV to eV + offset = h5_group[f"offset"][()] + + energy_out = h5_group[f"energy_out"][()] * 1e6 # MeV to eV + pdf = h5_group[f"pdf"][()] / 1e6 # /MeV to /eV + + precompound_factor = h5_group[f"precompound_factor"][()] + angular_slope = h5_group[f"angular_slope"][()] + + energy_spectrum = DistributionKalbachMann( + energy, offset, energy_out, pdf, precompound_factor, angular_slope + ) + + elif spectrum_type == "energy-angle-tabulated": + energy = h5_group[f"energy"][()] * 1e6 # MeV to eV + offset = h5_group[f"offset"][()] + + energy_out = h5_group[f"energy_out"][()] * 1e6 # MeV to eV + pdf = h5_group[f"pdf"][()] / 1e6 # /MeV to /eV + cosine_offset = h5_group[f"cosine_offset"][()] + + cosine = h5_group[f"cosine"][()] + cosine_pdf = h5_group[f"cosine_pdf"][()] + + energy_spectrum = DistributionTabulatedEnergyAngle( + energy, offset, energy_out, pdf, cosine_offset, cosine, cosine_pdf + ) + + elif spectrum_type == "N-body": + value = h5_group["value"][()] * 1e6 # MeV to eV + pdf = h5_group["pdf"][()] / 1e6 # /MeV to /eV + + energy_spectrum = DistributionNBody(value, pdf) + + else: + print_error(f"Unsupported energy spectrum of type {spectrum_type}") + + return energy_spectrum diff --git a/mcdc/mcdc/object_/nuclide.py b/mcdc/mcdc/object_/nuclide.py new file mode 100644 index 000000000..db4b2f196 --- /dev/null +++ b/mcdc/mcdc/object_/nuclide.py @@ -0,0 +1,265 @@ +import h5py +import numpy as np +import os + +from numpy import float64 +from numpy.typing import NDArray + +#### + +from mcdc.object_.base import ObjectNonSingleton +from mcdc.object_.data import DataBase, DataPolynomial, DataTable +from mcdc.object_.distribution import DistributionBase +from mcdc.object_.neutron_reaction import ( + NeutronReactionCapture, + NeutronReactionElasticScattering, + NeutronReactionFission, + NeutronReactionInelasticScattering, + set_energy_distribution, +) +from mcdc.object_.simulation import simulation +from mcdc.print_ import print_1d_array, print_error + +# ====================================================================================== +# Nuclide +# ====================================================================================== + + +class Nuclide(ObjectNonSingleton): + # Annotations for Numba mode + label: str = "nuclide" + # + name: str + temperature: float + atomic_number: int + atomic_weight_ratio: float + fissionable: bool + excitation_level: int + # + neutron_xs_energy_grid: NDArray[float64] + neutron_total_xs: NDArray[float64] + neutron_elastic_xs: NDArray[float64] + neutron_capture_xs: NDArray[float64] + neutron_inelastic_xs: NDArray[float64] + neutron_fission_xs: NDArray[float64] + # + neutron_elastic_scattering_reactions: list[NeutronReactionElasticScattering] + neutron_capture_reactions: list[NeutronReactionCapture] + neutron_inelastic_scattering_reactions: list[NeutronReactionInelasticScattering] + neutron_fission_reactions: list[NeutronReactionFission] + # + neutron_fission_prompt_multiplicity: DataBase + neutron_fission_delayed_multiplicity: DataBase + N_neutron_fission_delayed_precursor: int + neutron_fission_delayed_fractions: NDArray[float64] + neutron_fission_delayed_decay_rates: NDArray[float64] + neutron_fission_delayed_spectra: list[DistributionBase] + + def __init__(self, nuclide_name, temperature): + super().__init__() + + self.name = nuclide_name + self.temperature = temperature + + # Basic properties + dir_name = os.getenv("MCDC_LIB") + file_name = f"{nuclide_name}-{temperature}K.h5" + file = h5py.File(f"{dir_name}/{file_name}", "r") + self.atomic_number = int(file["atomic_number"][()]) + self.atomic_weight_ratio = file["atomic_weight_ratio"][()] + self.fissionable = bool(file["fissionable"][()]) + self.excitation_level = int(file["excitation_level"][()]) + file.close() + + def set_neutron_data(self): + nuclide_name = self.name + temperature = self.temperature + + # Load data library + dir_name = os.getenv("MCDC_LIB") + file_name = f"{nuclide_name}-{temperature}K.h5" + file = h5py.File(f"{dir_name}/{file_name}", "r") + + # The reactions + rx_names = [ + "elastic_scattering", + "capture", + "inelastic_scattering", + "fission", + ] + + # The reaction MTs + MTs = {} + for name in rx_names: + if name not in file["neutron_reactions"]: + MTs[name] = [] + continue + + MTs[name] = [ + x for x in file[f"neutron_reactions/{name}"] if x.startswith("MT") + ] + + # ========================================================================== + # Reaction XS + # ========================================================================== + + # Energy grid + xs_energy = file["neutron_reactions/xs_energy_grid"][()] * 1e6 # MeV to eV + self.neutron_xs_energy_grid = xs_energy + + # The total XS + self.neutron_total_xs = np.zeros_like(self.neutron_xs_energy_grid) + self.neutron_elastic_xs = np.zeros_like(self.neutron_xs_energy_grid) + self.neutron_capture_xs = np.zeros_like(self.neutron_xs_energy_grid) + self.neutron_inelastic_xs = np.zeros_like(self.neutron_xs_energy_grid) + self.neutron_fission_xs = np.zeros_like(self.neutron_xs_energy_grid) + + xs_containers = [ + self.neutron_elastic_xs, + self.neutron_capture_xs, + self.neutron_inelastic_xs, + self.neutron_fission_xs, + ] + for xs_container, rx_name in list(zip(xs_containers, rx_names)): + for MT in MTs[rx_name]: + xs = file[f"neutron_reactions/{rx_name}/{MT}/xs"] + xs_container[xs.attrs["offset"] :] += xs[()] + + self.neutron_total_xs = ( + self.neutron_elastic_xs + + self.neutron_capture_xs + + self.neutron_inelastic_xs + + self.neutron_fission_xs + ) + + # ========================================================================== + # The reactions + # ========================================================================== + + self.neutron_elastic_scattering_reactions = [] + self.neutron_capture_reactions = [] + self.neutron_inelastic_scattering_reactions = [] + self.neutron_fission_reactions = [] + + rx_containers = [ + self.neutron_elastic_scattering_reactions, + self.neutron_capture_reactions, + self.neutron_inelastic_scattering_reactions, + self.neutron_fission_reactions, + ] + rx_classes = [ + NeutronReactionElasticScattering, + NeutronReactionCapture, + NeutronReactionInelasticScattering, + NeutronReactionFission, + ] + for rx_container, rx_name, rx_class in list( + zip(rx_containers, rx_names, rx_classes) + ): + for MT in MTs[rx_name]: + h5_group = file[f"neutron_reactions/{rx_name}/{MT}"] + reaction = rx_class.from_h5_group(h5_group) + rx_container.append(reaction) + + # ============================================================================== + # Fission nuclide attributes + # ============================================================================== + + if not self.fissionable: + self.neutron_fission_prompt_multiplicity = simulation.data[0] + self.neutron_fission_delayed_multiplicity = simulation.data[0] + self.N_neutron_fission_delayed_precursor = 0 + self.neutron_fission_delayed_fractions = np.zeros(0) + self.neutron_fission_delayed_decay_rates = np.zeros(0) + self.neutron_fission_delayed_spectra = [] + else: + fission_group = file["neutron_reactions/fission"] + + # Multiplicities + self.neutron_fission_prompt_multiplicity = set_fission_multiplicity( + fission_group["prompt_multiplicity"] + ) + self.neutron_fission_delayed_multiplicity = set_fission_multiplicity( + fission_group["delayed_multiplicity"] + ) + + # Delayed fractions and decay rates + self.neutron_fission_delayed_fractions = fission_group[ + "delayed_neutron_precursors/fractions" + ][()] + self.neutron_fission_delayed_decay_rates = fission_group[ + "delayed_neutron_precursors/decay_rates" + ][()] + self.N_neutron_fission_delayed_precursor = len( + self.neutron_fission_delayed_fractions + ) + + # Delayed spectra + self.neutron_fission_delayed_spectra = [] + spectrum_names = [ + x + for x in fission_group["delayed_neutron_precursors"] + if x.startswith("energy_spectrum-") + ] + for spectrum_name in spectrum_names: + self.neutron_fission_delayed_spectra.append( + set_energy_distribution( + fission_group[f"delayed_neutron_precursors/{spectrum_name}"] + ) + ) + + file.close() + + def __repr__(self): + text = "\n" + text += f"Nuclide\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - Atomic number: {self.atomic_number}\n" + text += f" - Atomic weight ratio: {self.atomic_weight_ratio}\n" + text += f" - Reaction MTs\n" + text += f" - Elastic scattering: {[int(x.MT) for x in self.neutron_elastic_scattering_reactions]}\n" + text += ( + f" - Capture: {[int(x.MT) for x in self.neutron_capture_reactions]}\n" + ) + text += f" - Inelastic scattering: {[int(x.MT) for x in self.neutron_inelastic_scattering_reactions]}\n" + if self.fissionable: + text += f" - Fission: {[int(x.MT) for x in self.neutron_fission_reactions]}\n" + text += f" - Reaction cross-sections (eV, barns)\n" + text += f" - Energy grid {print_1d_array(self.neutron_xs_energy_grid)}\n" + text += f" - Total {print_1d_array(self.neutron_total_xs)}\n" + text += f" - Elastic scattering {print_1d_array(self.neutron_elastic_xs)}\n" + text += f" - Capture {print_1d_array(self.neutron_capture_xs)}\n" + text += ( + f" - Inelastic scattering {print_1d_array(self.neutron_inelastic_xs)}\n" + ) + if self.fissionable: + text += f" - Fission {print_1d_array(self.neutron_fission_xs)}\n" + return text + + +# ====================================================================================== +# Helper functions +# ====================================================================================== + + +def set_fission_multiplicity(h5_group): + multiplicity_type = h5_group.attrs["type"] + + if multiplicity_type == "tabulated": + x = h5_group["energy"][()] * 1e6 # MeV to eV + y = h5_group["value"][()] + multiplicity = DataTable(x, y) + + elif multiplicity_type == "polynomial": + coefficient = h5_group["coefficient"][()] + + # MeV-based to eV-based + for l in range(len(coefficient)): + coefficient[l] /= 1e6**l + + multiplicity = DataPolynomial(coefficient) + else: + print_error(f"Unsupported multiplicity of type {multiplicity_type}") + + return multiplicity diff --git a/mcdc/mcdc/object_/particle.py b/mcdc/mcdc/object_/particle.py new file mode 100644 index 000000000..3d9fa43f5 --- /dev/null +++ b/mcdc/mcdc/object_/particle.py @@ -0,0 +1,58 @@ +import numpy as np + +from dataclasses import dataclass, field +from typing import Annotated +from numpy import int64, uint64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import PARTICLE_NEUTRON +from mcdc.object_.base import ObjectBase, ObjectSingleton + + +@dataclass +class ParticleData(ObjectBase): + label: str = "particle_data" + x: float = 0.0 + y: float = 0.0 + z: float = 0.0 + t: float = 0.0 + ux: float = 0.0 + uy: float = 0.0 + uz: float = 0.0 + g: int = -1 + E: float = 0.0 + w: float = 0.0 + particle_type: int = PARTICLE_NEUTRON + rng_seed: uint64 = uint64(1) + + +@dataclass +class CollisionData(ObjectBase): + label: str = "collision_data" + energy_deposition: float = 0.0 + + +@dataclass +class Particle(ParticleData): + label: str = "particle" + cell_ID: int = -1 + material_ID: int = -1 + surface_ID: int = -1 + alive: bool = False + fresh: bool = False + event: int = -1 + + +class ParticleBank(ObjectSingleton): + label: str = "particle_bank" + non_numba: list[str] = ["particles"] + particles: list[ParticleData] = [] + size: Annotated[NDArray[int64], (1,)] + tag: str = "" + + def __init__(self, tag): + super().__init__() + self.tag = tag + self.size = np.zeros(1, dtype=int64) diff --git a/mcdc/mcdc/object_/settings.py b/mcdc/mcdc/object_/settings.py new file mode 100644 index 000000000..60a28d896 --- /dev/null +++ b/mcdc/mcdc/object_/settings.py @@ -0,0 +1,165 @@ +from typing import List +import h5py +from h5py._hl.dataset import sel +import numpy as np + +from dataclasses import dataclass, field +from numpy.typing import NDArray + +#### + +from mcdc.constant import * +from mcdc.object_.base import ObjectSingleton +from mcdc.object_.util import is_sorted +from mcdc.print_ import print_error + +# ====================================================================================== +# Settings +# ====================================================================================== + + +@dataclass +class Settings(ObjectSingleton): + # Annotations for Numba mode + label: str = "settings" + + # Basic + N_particle: int = 0 + N_batch: int = 1 + rng_seed: int = 1 + + # k-eigenvalue + N_inactive: int = 0 + N_active: int = 0 + N_cycle: int = 0 + k_init: float = 1.0 + use_gyration_radius: bool = False + gyration_radius_type: int = GYRATION_RADIUS_ALL + + # Particle source + use_source_file: bool = False + source_file_name: str = "" + + # Misc. + time_boundary: float = np.inf + output_name: str = "output" + use_progress_bar: bool = True + + # Time census + N_census: int = 1 + census_time: NDArray[np.float64] = field(default_factory=lambda: np.array([np.inf])) + use_census_based_tally: bool = False + census_tally_frequency: int = 0 + + # Particle bank-related + save_particle: bool = False + active_bank_buffer: int = 100 + census_bank_buffer_ratio: float = 2.0 + source_bank_buffer_ratio: float = 2.0 + future_bank_buffer_ratio: float = 1.5 + + # Multi-particle options + neutron_transport: bool = True + electron_transport: bool = False + proton_transport: bool = False + + # Neutron transport modes + neutron_multigroup_mode: bool = False + neutron_eigenvalue_mode: bool = False + + # GPU mode + gpu_strategy: int = GPU_STRATEGY_ASYNC + gpu_async_type: int = GPU_ASYNC_SIMPLE + gpu_storage: int = GPU_STORAGE_SEPARATE + + def __post_init__(self): + super().__init__() + + def set_time_census(self, time, tally_frequency=None): + # Make sure that the time grid points are sorted + if not is_sorted(time): + print_error("Time census: Time grid points have to be sorted.") + + # Make sure that the starting point is larger than zero + if time[0] <= 0.0: + print_error("Time census: First census time should be larger than zero.") + + # Add the default, final census-at-infinity + time = np.append(time, np.inf) + + # Set the time census parameters + self.census_time = time + self.N_census = len(self.census_time) + + # Set the census-based tallying + if tally_frequency is not None and tally_frequency > 0: + # Flag to reset all tallies' time grids (done in main.py) + self.use_census_based_tally = True + self.census_tally_frequency = tally_frequency + + def set_eigenmode( + self, + N_inactive=0, + N_active=0, + k_init=1.0, + gyration_radius=None, + save_particle=False, + ): + # Update setting self + self.N_inactive = N_inactive + self.N_active = N_active + self.N_cycle = self.N_inactive + self.N_active + self.neutron_eigenvalue_mode = True + self.k_init = k_init + self.save_particle = save_particle + + # Gyration radius setup + if gyration_radius is not None: + self.use_gyration_radius = True + if gyration_radius == "all": + self.gyration_radius_type = GYRATION_RADIUS_ALL + elif gyration_radius == "infinite-x": + self.gyration_radius_type = GYRATION_RADIUS_INFINITE_X + elif gyration_radius == "infinite-y": + self.gyration_radius_type = GYRATION_RADIUS_INFINITE_Y + elif gyration_radius == "infinite-z": + self.gyration_radius_type = GYRATION_RADIUS_INFINITE_Z + elif gyration_radius == "only-x": + self.gyration_radius_type = GYRATION_RADIUS_ONLY_X + elif gyration_radius == "only-y": + self.gyration_radius_type = GYRATION_RADIUS_ONLY_Y + elif gyration_radius == "only-z": + self.gyration_radius_type = GYRATION_RADIUS_ONLY_Z + else: + print_error("Unknown gyration radius type") + + # Allocate cycle-wise quantities + from mcdc.object_.simulation import simulation + + simulation.k_cycle = np.zeros(self.N_cycle) + simulation.gyration_radius = np.zeros(self.N_cycle) + + def set_source_file(self, source_file_name): + self.use_source_file = True + self.source_file_name = source_file_name + + # Set number of particles + with h5py.File(source_file_name, "r") as f: + self.N_particle = int(f["particles_size"][()]) + + def set_transported_particles(self, transported_particles: List[str]): + # Reset the flags + self.neutron_transport = False + self.electron_transport = False + self.proton_transport = False + + # Set flags + for particle in transported_particles: + if particle == "neutron": + self.neutron_transport = True + elif particle == "electron": + self.electron_transport = True + elif particle == "proton": + self.proton_transport = True + else: + print_error(r"Unsupported particle types: {particle}") diff --git a/mcdc/mcdc/object_/simulation.py b/mcdc/mcdc/object_/simulation.py new file mode 100644 index 000000000..fa5b5f9bd --- /dev/null +++ b/mcdc/mcdc/object_/simulation.py @@ -0,0 +1,239 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Annotated + +from mcdc.object_.technique import ( + ImplicitCapture, + PopulationControl, + WeightRoulette, + WeightedEmission, +) + +if TYPE_CHECKING: + from mcdc.object_.cell import Cell, Region + from mcdc.object_.element import Element + from mcdc.object_.electron_reaction import ElectronReactionBase + from mcdc.object_.material import MaterialBase + from mcdc.object_.nuclide import Nuclide + from mcdc.object_.neutron_reaction import NeutronReactionBase + from mcdc.object_.source import Source + from mcdc.object_.surface import Surface + from mcdc.object_.tally import Tally + +#### + +import numpy as np + +from mpi4py import MPI +from numpy import float64, int64 +from numpy.typing import NDArray + +#### + +from mcdc.object_.base import ObjectSingleton +from mcdc.object_.data import DataBase, DataNone +from mcdc.object_.distribution import DistributionBase, DistributionNone +from mcdc.object_.gpu_tools import GPUMeta +from mcdc.object_.mesh import MeshBase, MeshUniform +from mcdc.object_.particle import ParticleBank +from mcdc.object_.settings import Settings +from mcdc.object_.universe import Universe, Lattice + +# ====================================================================================== +# Simulation +# ====================================================================================== + + +class Simulation(ObjectSingleton): + # Annotations for Numba mode + label: str = "simulation" + non_numba: list[str] = [ + "regions", + "bank_active", + "bank_census", + "bank_source", + "bank_future", + ] + + # Physics + data: list[DataBase] + distributions: list[DistributionBase] + materials: list[MaterialBase] + elements: list[Element] + electron_reactions: list[ElectronReactionBase] + nuclides: list[Nuclide] + neutron_reactions: list[NeutronReactionBase] + sources: list[Source] + + # Geometry + cells: list[Cell] + lattices: list[Lattice] + regions: list[Region] + surfaces: list[Surface] + universes: list[Universe] + meshes: list[MeshBase] + + # Tallies + tallies: list[Tally] + + # Settings + settings: Settings + + # Techniques + implicit_capture: ImplicitCapture + weighted_emission: WeightedEmission + weight_roulette: WeightRoulette + population_control: PopulationControl + + # Particle banks + bank_active: ParticleBank + bank_census: ParticleBank + bank_source: ParticleBank + bank_future: ParticleBank + + # Simulation parameters + idx_work: int + idx_cycle: int + idx_census: int + idx_batch: int + dd_idx: int + dd_N_local_source: int + dd_local_rank: int + k_eff: float + k_cycle: NDArray[float64] + k_avg: float + k_sdv: float + n_avg: float + n_sdv: float + n_max: float + C_avg: float + C_sdv: float + C_max: float + k_avg_running: float + k_sdv_running: float + gyration_radius: NDArray[float64] + cycle_active: bool + eigenvalue_tally_nuSigmaF: Annotated[NDArray[float64], (1,)] + eigenvalue_tally_n: Annotated[NDArray[float64], (1,)] + eigenvalue_tally_C: Annotated[NDArray[float64], (1,)] + mpi_size: int + mpi_rank: int + mpi_master: bool + mpi_work_start: int + mpi_work_size: int + mpi_work_size_total: int + mpi_work_iter: Annotated[NDArray[int64], (1,)] + runtime_total: float + runtime_preparation: float + runtime_simulation: float + runtime_output: float + runtime_bank_management: float + + # GPU metadata + gpu_meta: GPUMeta + source_seed: int + + def __init__(self): + super().__init__() + + # ============================================================================== + # Simulation objects + # ============================================================================== + + # Physics + self.data = [DataNone()] + self.distributions = [DistributionNone()] + self.materials = [] + self.elements = [] + self.electron_reactions = [] + self.nuclides = [] + self.neutron_reactions = [] + self.sources = [] + + # Geometry + self.cells = [] + self.lattices = [] + self.regions = [] + self.surfaces = [] + self.universes = [Universe("Root Universe", root=True)] + self.meshes = [] + + # Tallies + self.tallies = [] + + # Settings + self.settings = Settings() + + # Techniques + self.implicit_capture = ImplicitCapture() + self.weighted_emission = WeightedEmission() + self.weight_roulette = WeightRoulette() + self.population_control = PopulationControl() + + # ============================================================================== + # Particle banks + # ============================================================================== + + self.bank_active = ParticleBank(tag="active") + self.bank_census = ParticleBank(tag="census") + self.bank_source = ParticleBank(tag="source") + self.bank_future = ParticleBank(tag="future") + + # ============================================================================== + # Simulation parameters + # ============================================================================== + + # Simulation indices + self.idx_work = 0 + self.idx_cycle = 0 + self.idx_census = 0 + self.idx_batch = 0 + + # Domain decomposition + self.dd_idx = 0 + self.dd_N_local_source = 0 + self.dd_local_rank = 0 + + # Eigenvalue simulation + self.k_eff = 0.0 + self.k_cycle = np.ones(1) + self.k_avg = 0.0 + self.k_sdv = 0.0 + self.n_avg = 0.0 # Neutron density + self.n_sdv = 0.0 + self.n_max = 0.0 + self.C_avg = 0.0 # Precursor density + self.C_sdv = 0.0 + self.C_max = 0.0 + self.k_avg_running = 0.0 + self.k_sdv_running = 0.0 + self.gyration_radius = np.zeros(1) + self.cycle_active = False + self.eigenvalue_tally_nuSigmaF = np.zeros(1) + self.eigenvalue_tally_n = np.zeros(1) + self.eigenvalue_tally_C = np.zeros(1) + + # MPI parameters + self.mpi_size = MPI.COMM_WORLD.Get_size() + self.mpi_rank = MPI.COMM_WORLD.Get_rank() + self.mpi_master = self.mpi_rank == 0 + self.mpi_work_start = 0 + self.mpi_work_size = 0 + self.mpi_work_size_total = 0 + self.mpi_work_iter = np.zeros(1, dtype=int64) + + # Runtime records + self.runtime_total = 0.0 + self.runtime_preparation = 0.0 + self.runtime_simulation = 0.0 + self.runtime_output = 0.0 + self.runtime_bank_management = 0.0 + + # GPU metadata + self.gpu_meta = GPUMeta() + self.source_seed = 0 + + def set_root_universe(self, cells=[]): + self.universes[0].cells = cells + + +simulation = Simulation() diff --git a/mcdc/mcdc/object_/source.py b/mcdc/mcdc/object_/source.py new file mode 100644 index 000000000..9d8c642e6 --- /dev/null +++ b/mcdc/mcdc/object_/source.py @@ -0,0 +1,333 @@ +import numpy as np + +from numpy import float64, int64 +from numpy.typing import NDArray +from types import NoneType +from typing import Annotated, Iterable + +#### + +import mcdc.object_.distribution as distribution + +from mcdc.constant import PARTICLE_NEUTRON, PARTICLE_ELECTRON, PARTICLE_PROTON, INF, PI +from mcdc.object_.base import ObjectNonSingleton +from mcdc.object_.distribution import DistributionTabulated, DistributionPMF +from mcdc.object_.simulation import simulation +from mcdc.object_.util import move_object +from mcdc.print_ import print_error + + +def decode_particle_type(type_): + if type_ == PARTICLE_NEUTRON: + return "Neutron" + elif type_ == PARTICLE_ELECTRON: + return "Electron" + elif type_ == PARTICLE_PROTON: + return "Proton" + + +# ====================================================================================== +# Source +# ====================================================================================== + + +class Source(ObjectNonSingleton): + """ + Define a particle source. + + Parameters + ---------- + name : str, optional + User label. + position : array_like of float, optional + Point-source position ``[x, y, z]`` in cm. + x : array_like of float, optional + Source extent along x: ``[x_min, x_max]`` in cm. + y : array_like of float, optional + Source extent along y: ``[y_min, y_max]`` in cm. + z : array_like of float, optional + Source extent along z: ``[z_min, z_max]`` in cm. + direction : array_like of float, optional + Mono-directional source direction ``[ux, uy, uz]``. + white_direction : array_like of float, optional + White (cosine-weighted) boundary source normal direction. + isotropic : bool, optional + If True, source emits isotropically. + polar_cosine : array_like of float, optional + Polar cosine bounds ``[mu_min, mu_max]``. + azimuthal : array_like of float, optional + Azimuthal angle bounds ``[azi_min, azi_max]``. + energy : float or ndarray, optional + Source energy in eV (mono-energetic) or a tabulated PDF. + energy_group : int or ndarray, optional + Energy group index (mono-group) or a PMF array. + time : float or array_like of float, optional + Emission time (s) or time range ``[t_min, t_max]``. + probability : float, optional + Relative source probability weight. + + Returns + ------- + Source + The source object. + """ + + # Annotations for Numba mode + label: str = "source" + # + name: str + # Position + point_source: bool + point: Annotated[NDArray[float64], (3,)] + x: Annotated[NDArray[float64], (2,)] + y: Annotated[NDArray[float64], (2,)] + z: Annotated[NDArray[float64], (2,)] + # Direction + isotropic_direction: bool + mono_direction: bool + white_direction: bool + direction: Annotated[NDArray[float64], (3,)] + polar_cosine: Annotated[NDArray[float64], (2,)] + azimuthal: Annotated[NDArray[float64], (2,)] + # Energy + mono_energetic: bool + energy_group: int + energy: float + energy_group_pmf: DistributionPMF + energy_pdf: DistributionTabulated + # Time + discrete_time: bool + time: float + time_range: Annotated[NDArray[float64], (2,)] + # + particle_type: int + probability: float + moving: bool + N_move: int + N_move_grid: int + move_velocities: Annotated[NDArray[float64], ("N_move", 3)] + move_durations: Annotated[NDArray[float64], ("N_move",)] + move_time_grid: Annotated[NDArray[float64], ("N_move_grid",)] + move_translations: Annotated[NDArray[float64], ("N_move_grid", 3)] + + def __init__( + self, + name: str = "", + position: Iterable[float] | NoneType = None, + x: Iterable[float] | NoneType = None, + y: Iterable[float] | NoneType = None, + z: Iterable[float] | NoneType = None, + # + direction: Iterable[float] | NoneType = None, + white_direction: Iterable[float] | NoneType = None, + isotropic: bool | NoneType = None, + polar_cosine: Iterable[float] | NoneType = None, + azimuthal: Iterable[float] | NoneType = None, + # + energy: float | NDArray[float64] | NoneType = None, + energy_group: int | NDArray[int64] | NoneType = None, + # + time: float | Iterable[float] = 0.0, + # + particle_type: str = "neutron", + # + probability: float = 1.0, + ): + + super().__init__() + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.ID}" + + # ============================================================================== + # Default attributes + # Point source at origin, isotropic, mono-energetic at 1 MeV or at group 0, + # time = 0, neutron + # ============================================================================== + + # Position + self.point_source = True + self.point = np.zeros(3) + self.x = np.array([0.0, 0.0]) + self.y = np.array([0.0, 0.0]) + self.z = np.array([0.0, 0.0]) + + # Direction + self.isotropic_direction = True + self.mono_direction = False + self.white_direction = False + self.direction = np.array([0.0, 0.0, 1.0]) + self.polar_cosine = np.array([-1.0, 1.0]) + self.azimuthal = np.array([0.0, 2.0 * PI]) + + # Energy + self.mono_energetic = True + self.energy_group = 0 + self.energy = 1.0e6 + self.energy_group_pmf = DistributionPMF(np.array([0.0]), np.array([1.0])) + self.energy_pdf = DistributionTabulated( + np.array([1.0e6 - 1.0, 1.0e6 + 1.0]), np.array([1.0, 1.0]) + ) + + # Time + self.discrete_time = True + self.time = 0.0 + self.time_range = np.array([0.0, 0.0]) + + # Particle type + self.particle_type = PARTICLE_NEUTRON + + # Probability + self.probability = probability + + # ============================================================================== + # Assignment + # ============================================================================== + + # Position + if position is not None: + self.point = np.array(position) + else: + self.point_source = False + if x is not None: + self.x = np.array(x) + if y is not None: + self.y = np.array(y) + if z is not None: + self.z = np.array(z) + + # Direction + if isotropic is not None and isotropic: + pass + elif direction is not None: + self.isotropic_direction = False + self.direction = np.array(direction) + if polar_cosine is not None or azimuthal is not None: + self.mono_direction = False + if polar_cosine is not None: + self.polar_cosine = np.array(polar_cosine) + if azimuthal is not None: + self.azimuthal = np.array(azimuthal) + else: + self.mono_direction = True + elif white_direction is not None: + self.isotropic_direction = False + self.white_direction = True + self.direction = np.array(white_direction) + # Normalize direction + self.direction /= np.linalg.norm(self.direction) + + # Energy + if energy_group is not None: + if type(energy_group) == int: + self.energy_group = energy_group + else: + self.mono_energetic = False + self.energy_group_pmf = DistributionPMF( + energy_group[0], energy_group[1] + ) + elif energy is not None: + if type(energy) == float: + self.energy = energy + else: + self.mono_energetic = False + self.energy_pdf = DistributionTabulated(energy[0], energy[1]) + + # Time + if type(time) == float: + self.time = time + else: + self.discrete_time = False + self.time_range = np.array(time) + + # Particle type + if particle_type == "neutron": + self.particle_type = PARTICLE_NEUTRON + elif particle_type == "electron": + self.particle_type = PARTICLE_ELECTRON + elif particle_type == "proton": + self.particle_type = PARTICLE_PROTON + else: + print_error(rf"Unsupported particle types: {particle_type}") + + # Moving source parameters + self.moving = False + self.N_move = 1 + self.N_move_grid = 2 + self.move_velocities = np.zeros((1, 3)) + self.move_durations = np.array([INF]) + self.move_time_grid = np.array([0.0, INF]) + self.move_translations = np.zeros((2, 3)) + + def __repr__(self): + text = "\n" + text += f"Source\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - Particle: {decode_particle_type(self.particle_type)}\n" + text += f" - Probability: {self.probability * 100}%\n" + if self.point_source: + text += f" - Position [x, y, z]: {self.point} cm\n" + else: + text += f" - Position\n" + text += f" - x: {self.x} cm\n" + text += f" - y: {self.y} cm\n" + text += f" - z: {self.z} cm\n" + if self.isotropic_direction: + text += f" - Direction: Isotropic\n" + elif self.mono_direction: + text += f" - Direction [ux, uy, yz]: {self.direction}\n" + elif self.white_direction: + text += f" - Isotropic halfspace: {self.direction}\n" + if simulation.materials[0].label == "multigroup_material": + if self.mono_energetic: + text += f" - Energy group: {self.energy_group} \n" + else: + text += f" - Energy group: {distribution.decode_type(self.energy_group_pmf.type)} [ID: {self.energy_group_pmf.ID}]\n" + else: + if self.mono_energetic: + text += f" - Energy: {self.energy} eV\n" + else: + text += f" - Energy: {distribution.decode_type(self.energy_pdf)} [ID: {self.energy_pdf.ID}]\n" + if self.discrete_time: + text += f" - Time: {self.time} s\n" + else: + text += f" - Time: {self.time_range} s\n" + + return text + + # ================================================================================== + # Source moving + # ================================================================================== + + def move(self, velocities, durations): + """ + Define piecewise-constant motion for the source. + + Appends a final static segment (zero velocity, infinite duration) so that + the motion covers the whole simulation time. + + Parameters + ---------- + velocities : array_like, shape (N, 3) or list + Per-segment velocity vectors [cm/s]. + durations : array_like, shape (N,) or list + Per-segment durations [s]. + + Notes + ----- + - Internally converts lists to arrays and constructs + ``move_time_grid`` and cumulative ``move_translations``. + - Sets ``moving=True`` and ``N_move = len(durations) + 1``. + + Examples + -------- + >>> src = mcdc.Source(z=[-0.1, 0.1], isotropic=True, energy=0, time=[0.0, 1.0]) + >>> src.move(velocities=[[0,0,1.0]], durations=[0.5]) # 0.5 s upward, then static + >>> s.N_move + 2 + """ + move_object(self, velocities, durations) diff --git a/mcdc/mcdc/object_/surface.py b/mcdc/mcdc/object_/surface.py new file mode 100644 index 000000000..bbdeaa1d1 --- /dev/null +++ b/mcdc/mcdc/object_/surface.py @@ -0,0 +1,975 @@ +from typing import Annotated, Iterable +import numpy as np + +from numpy import float64 +from numpy.typing import NDArray + +#### + +from mcdc.constant import ( + BC_NONE, + BC_REFLECTIVE, + BC_VACUUM, + INF, + SURFACE_CYLINDER_X, + SURFACE_CYLINDER_Y, + SURFACE_CYLINDER_Z, + SURFACE_CYLINDER, + SURFACE_PLANE_X, + SURFACE_PLANE_Y, + SURFACE_PLANE_Z, + SURFACE_PLANE, + SURFACE_SPHERE, + SURFACE_QUADRIC, + SURFACE_CONE_X, + SURFACE_CONE_Y, + SURFACE_CONE_Z, + SURFACE_TORUS_Z, +) +from mcdc.object_.base import ObjectNonSingleton +from mcdc.object_.cell import Region +from mcdc.object_.tally import TallySurface +from mcdc.object_.util import move_object + +# ====================================================================================== +# Surface +# ====================================================================================== + + +class Surface(ObjectNonSingleton): + """ + Geometric surface primitive with optional boundary condition and motion. + + Surfaces are registered non-singletons and receive a stable ``ID``. Factory + constructors (:meth:`PlaneX`, :meth:`CylinderZ`, etc.) set the quadric + coefficients (A..J) and linearity flag. Motion segments can be defined with + :meth:`move`. + + Parameters + ---------- + type\\_ : int + One of ``SURFACE_*`` constants (e.g., ``SURFACE_PLANE_X``). + name : str + Optional label for reporting. + boundary_condition : str + Boundary behavior at the surface (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Attributes + ---------- + ID : int + Index in the global registry (assigned on construction). + type\\_ : int + Surface type code (``SURFACE_*``). + name : str + User label. + boundary_condition : int + One of ``BC_NONE``, ``BC_VACUUM``, ``BC_REFLECTIVE``. + A,B,C,D,E,F,G,H,I,J : float + Quadric coefficients defining the implicit surface. + linear : bool + True for linear (plane) surfaces; False for general quadrics. + nx, ny, nz : float + Outward normal components for linear planes. + moving : bool + True if :meth:`move` has been called. + N_move : int + Number of motion segments plus the final static segment. + move_velocities : (N_move, 3) ndarray + Per-segment velocity vectors. + move_durations : (N_move,) ndarray + Per-segment durations (s). + move_time_grid : (N_move+1,) ndarray + Cumulative time breakpoints. + move_translations : (N_move+1, 3) ndarray + Cumulative translations at each breakpoint. + + See Also + -------- + Region + Use unary ``+`` / ``-`` to form half-spaces: ``+surface`` or ``-surface``. + decode_type + Human-readable surface type. + decode_BC_type + Human-readable boundary condition name. + """ + + # Annotations for Numba mode + label: str = "surface" + # + type: int + name: str + boundary_condition: int + A: float + B: float + C: float + D: float + E: float + F: float + G: float + H: float + I: float + J: float + R: float + r: float + linear: bool + nx: float + ny: float + nz: float + moving: bool + N_move: int + N_move_grid: int + move_velocities: Annotated[NDArray[float64], ("N_move", 3)] + move_durations: Annotated[NDArray[float64], ("N_move",)] + move_time_grid: Annotated[NDArray[float64], ("N_move_grid",)] + move_translations: Annotated[NDArray[float64], ("N_move_grid", 3)] + tallies: list[TallySurface] + + def __init__(self, type_, name, boundary_condition): + super().__init__() + + # Type and name + self.type = type_ + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.ID}" + + # Boundary condition + if boundary_condition == "none": + self.boundary_condition = BC_NONE + elif boundary_condition == "vacuum": + self.boundary_condition = BC_VACUUM + elif boundary_condition == "reflective": + self.boundary_condition = BC_REFLECTIVE + + # Quadric surface coefficients + self.A = 0.0 + self.B = 0.0 + self.C = 0.0 + self.D = 0.0 + self.E = 0.0 + self.F = 0.0 + self.G = 0.0 + self.H = 0.0 + self.I = 0.0 + self.J = 0.0 + + # Torus surface parameters + self.R = 0.0 + self.r = 0.0 + + # Helpers + self.linear = True + + # Surface normal direction (if linear) + self.nx = 0.0 + self.ny = 0.0 + self.nz = 0.0 + + # Moving surface parameters + self.moving = False + self.N_move = 1 + self.N_move_grid = 2 + self.move_velocities = np.zeros((1, 3)) + self.move_durations = np.array([INF]) + self.move_time_grid = np.array([0.0, INF]) + self.move_translations = np.zeros((2, 3)) + + # Surface tallies + self.tallies = [] + + def __repr__(self): + """ + Return a human-readable description including type-specific parameters. + + Returns + ------- + str + Multi-line formatted string with ID, name, BC, and geometry details. + """ + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - Boundary condition: {decode_BC_type(self.boundary_condition)}\n" + + # ============================================================================== + # Type-based repr + # ============================================================================== + + if self.type == SURFACE_PLANE_X: + text += f" - x0: {-self.J} cm\n" + elif self.type == SURFACE_PLANE_Y: + text += f" - y0: {-self.J} cm\n" + elif self.type == SURFACE_PLANE_Z: + text += f" - z0: {-self.J} cm\n" + elif self.type == SURFACE_PLANE: + text += f" - Coeffs.: {self.G}, {self.H}, {self.I}, {self.J}\n" + text += f" - Normal: ({self.nx}, {self.ny}, {self.nz})\n" + elif self.type == SURFACE_CYLINDER_X: + y = -0.5 * self.H + z = -0.5 * self.I + r = (y**2 + z**2 - self.J) ** 0.5 + text += f" - Center (y, z): ({y}, {z}) cm\n" + text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER_Y: + x = -0.5 * self.G + z = -0.5 * self.I + r = (x**2 + z**2 - self.J) ** 0.5 + text += f" - Center (x, z): ({x}, {z}) cm\n" + text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER_Z: + x = -0.5 * self.G + y = -0.5 * self.H + r = (x**2 + y**2 - self.J) ** 0.5 + text += f" - Center (x, y): ({x}, {y}) cm\n" + text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_SPHERE: + x = -0.5 * self.G + y = -0.5 * self.H + z = -0.5 * self.I + r = (x**2 + y**2 + z**2 - self.J) ** 0.5 + text += f" - Center (x, y, z): ({x}, {y}, {z}) cm\n" + text += f" - Radius: {r} cm\n" + elif self.type == SURFACE_CYLINDER: + text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" + text += f" {self.D}, {self.E}, {self.F},\n" + text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" + elif self.type == SURFACE_CONE_X: + t_sq = -self.A + y0 = -0.5 * self.H + z0 = -0.5 * self.I + x0 = 0.0 if t_sq == 0.0 else 0.5 * self.G / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" + elif self.type == SURFACE_CONE_Y: + t_sq = -self.B + x0 = -0.5 * self.G + z0 = -0.5 * self.I + y0 = 0.0 if t_sq == 0.0 else 0.5 * self.H / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" + elif self.type == SURFACE_CONE_Z: + t_sq = -self.C + x0 = -0.5 * self.G + y0 = -0.5 * self.H + z0 = 0.0 if t_sq == 0.0 else 0.5 * self.I / t_sq + text += f" - Apex (x, y, z): ({x0}, {y0}, {z0}) cm\n" + text += f" - tan^2(theta): {t_sq}\n" + elif self.type == SURFACE_QUADRIC: + text += f" - Coeffs.: {self.A}, {self.B}, {self.C},\n" + text += f" {self.D}, {self.E}, {self.F},\n" + text += f" {self.G}, {self.H}, {self.I}, {self.J}\n" + + if len(self.tallies) > 0: + text += f" - Tallies: {[x.ID for x in self.tallies]}\n" + + return text + + # ================================================================================== + # Type-based creation methods + # ================================================================================== + + @classmethod + def PlaneX(cls, name: str = "", x: float = 0.0, boundary_condition: str = "none"): + """ + Create a plane perpendicular to +x at x = constant. + + Parameters + ---------- + name : str, optional + User label. + x : float, default 0.0 + Plane location (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Linear plane with normal ``(+1, 0, 0)``. + """ + type_ = SURFACE_PLANE_X + surface = cls(type_, name, boundary_condition) + + surface.linear = True + surface.G = 1.0 + surface.J = -x + surface.nx = 1.0 + + return surface + + @classmethod + def PlaneY(cls, name: str = "", y: float = 0.0, boundary_condition: str = "none"): + """ + Create a plane perpendicular to +y at y = constant. + + Parameters + ---------- + name : str, optional + User label. + y : float, default 0.0 + Plane location (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Linear plane with normal ``(0, +1, 0)``. + """ + type_ = SURFACE_PLANE_Y + surface = cls(type_, name, boundary_condition) + + surface.linear = True + surface.H = 1.0 + surface.J = -y + surface.ny = 1.0 + + return surface + + @classmethod + def PlaneZ(cls, name: str = "", z: float = 0.0, boundary_condition: str = "none"): + """ + Create a plane perpendicular to +z at z = constant. + + Parameters + ---------- + name : str, optional + User label. + z : float, default 0.0 + Plane location (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Linear plane with normal ``(0, 0, +1)``. + """ + type_ = SURFACE_PLANE_Z + surface = cls(type_, name, boundary_condition) + + surface.linear = True + surface.I = 1.0 + surface.J = -z + surface.nz = 1.0 + + return surface + + @classmethod + def Plane( + cls, + name: str = "", + A: float = 0.0, + B: float = 0.0, + C: float = 0.0, + D: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create a general plane defined by A x + B y + C z + D = 0. + + The normal is normalized to unit length and stored in ``(nx, ny, nz)``. + + Parameters + ---------- + name : str, optional + User label. + A, B, C, D : float + Plane coefficients. + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Linear plane with normalized normal vector. + """ + type_ = SURFACE_PLANE + surface = cls(type_, name, boundary_condition) + + surface.linear = True + + # Normalize + norm = (A**2 + B**2 + C**2) ** 0.5 + A /= norm + B /= norm + C /= norm + D /= norm + + # Coefficients + surface.G = A + surface.H = B + surface.I = C + surface.J = D + + # Surface normal direction + surface.nx = A + surface.ny = B + surface.nz = C + return surface + + @classmethod + def CylinderX( + cls, + name: str = "", + center: Iterable[float] = [0.0, 0.0], + radius: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cylinder aligned with the x-axis. + + Parameters + ---------- + name : str, optional + User label. + center : (2,) array_like of float, default (0, 0) + Cylinder center in (y, z) (cm). + radius : float, default 1.0 + Cylinder radius (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Quadratic cylinder surface. + """ + type_ = SURFACE_CYLINDER_X + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Center and radius + y, z = center + r = radius + + # Coefficients + surface.B = 1.0 + surface.C = 1.0 + surface.H = -2.0 * y + surface.I = -2.0 * z + surface.J = y**2 + z**2 - r**2 + return surface + + @classmethod + def CylinderY( + cls, + name: str = "", + center: Iterable[float] = [0.0, 0.0], + radius: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cylinder aligned with the y-axis. + + Parameters + ---------- + name : str, optional + User label. + center : (2,) array_like of float + Cylinder center in (x, z) (cm). + radius : float + Cylinder radius (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Quadratic cylinder surface. + """ + type_ = SURFACE_CYLINDER_Y + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Center and radius + x, z = center + r = radius + + # Coefficients + surface.A = 1.0 + surface.C = 1.0 + surface.G = -2.0 * x + surface.I = -2.0 * z + surface.J = x**2 + z**2 - r**2 + return surface + + @classmethod + def CylinderZ( + cls, + name: str = "", + center: Iterable[float] = [0.0, 0.0], + radius: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cylinder aligned with the z-axis. + + Parameters + ---------- + name : str, optional + User label. + center : (2,) array_like of float + Cylinder center in (x, y) (cm). + radius : float + Cylinder radius (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Quadratic cylinder surface. + """ + type_ = SURFACE_CYLINDER_Z + surface = cls(type_, name, boundary_condition) + surface.linear = False + + # Center and radius + x, y = center + r = radius + + # Coefficients + surface.A = 1.0 + surface.B = 1.0 + surface.G = -2.0 * x + surface.H = -2.0 * y + surface.J = x**2 + y**2 - r**2 + + return surface + + @classmethod + def Cylinder( + cls, + name: str = "", + radius: float = 0.0, + axis: Iterable[float] = [0.0, 0.0, 1.0], + point: Iterable[float] = [0.0, 0.0, 0.0], + boundary_condition: str = "none", + ): + """ + Create a general infinite cylinder with an arbitrary axis. + + Parameters + ---------- + name : str, optional + radius : float + Cylinder radius (cm). + axis : (3,) array_like of float + Direction vector of the cylinder axis (normalized automatically). + point : (3,) array_like of float + A point on the cylinder axis (cm). + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + General cylinder surface. + """ + type_ = SURFACE_CYLINDER + surface = cls(type_, name, boundary_condition) + surface.linear = False + + # Axis and point + ax, ay, az = axis + norm = (ax**2 + ay**2 + az**2) ** 0.5 + dx, dy, dz = ax / norm, ay / norm, az / norm + px, py, pz = point + r = radius + + # Coefficients + surface.A = 1.0 - dx**2 + surface.B = 1.0 - dy**2 + surface.C = 1.0 - dz**2 + surface.D = -2.0 * dx * dy + surface.E = -2.0 * dx * dz + surface.F = -2.0 * dy * dz + Qpx = (1.0 - dx**2) * px - dx * dy * py - dx * dz * pz + Qpy = -dx * dy * px + (1.0 - dy**2) * py - dy * dz * pz + Qpz = -dx * dz * px - dy * dz * py + (1.0 - dz**2) * pz + surface.G = -2.0 * Qpx + surface.H = -2.0 * Qpy + surface.I = -2.0 * Qpz + pdotd = px * dx + py * dy + pz * dz + surface.J = px**2 + py**2 + pz**2 - pdotd**2 - r**2 + + return surface + + @classmethod + def Sphere( + cls, + name: str = "", + center: Iterable[float] = [0.0, 0.0, 0.0], + radius: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create a sphere. + + Parameters + ---------- + name : str, optional + User label. + center : (3,) array_like of float + Sphere center (x, y, z) in cm. + radius : float + Radius (cm). + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + Quadratic spherical surface. + """ + type_ = SURFACE_SPHERE + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Center and radius + x, y, z = center + r = radius + + # Coefficients + surface.A = 1.0 + surface.B = 1.0 + surface.C = 1.0 + surface.G = -2.0 * x + surface.H = -2.0 * y + surface.I = -2.0 * z + surface.J = x**2 + y**2 + z**2 - r**2 + return surface + + @classmethod + def ConeX( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the x-axis. + + Equation: (y - y0)^2 + (z - z0)^2 - t_sq * (x - x0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + For a 45-degree half-angle use t_sq = 1.0. + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone-X surface. + """ + type_ = SURFACE_CONE_X + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = -t_sq + surface.B = 1.0 + surface.C = 1.0 + surface.G = 2.0 * t_sq * x0 + surface.H = -2.0 * y0 + surface.I = -2.0 * z0 + surface.J = y0**2 + z0**2 - t_sq * x0**2 + + return surface + + @classmethod + def ConeY( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the y-axis. + + Equation: (x - x0)^2 + (z - z0)^2 - t_sq * (y - y0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone-Y surface. + """ + type_ = SURFACE_CONE_Y + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = 1.0 + surface.B = -t_sq + surface.C = 1.0 + surface.G = -2.0 * x0 + surface.H = 2.0 * t_sq * y0 + surface.I = -2.0 * z0 + surface.J = x0**2 + z0**2 - t_sq * y0**2 + + return surface + + @classmethod + def ConeZ( + cls, + name: str = "", + apex: Iterable[float] = [0.0, 0.0, 0.0], + t_sq: float = 1.0, + boundary_condition: str = "none", + ): + """ + Create an infinite cone with axis along the z-axis. + + Equation: (x - x0)^2 + (y - y0)^2 - t_sq * (z - z0)^2 = 0 + + Parameters + ---------- + name : str, optional + apex : (3,) array_like of float + Cone apex (x0, y0, z0) in cm. + t_sq : float + Squared tangent of the half-angle: t_sq = tan^2(theta). + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Cone surface. + """ + type_ = SURFACE_CONE_Z + surface = cls(type_, name, boundary_condition) + surface.linear = False + + x0, y0, z0 = apex + + surface.A = 1.0 + surface.B = 1.0 + surface.C = -t_sq + surface.G = -2.0 * x0 + surface.H = -2.0 * y0 + surface.I = 2.0 * t_sq * z0 + surface.J = x0**2 + y0**2 - t_sq * z0**2 + + return surface + + @classmethod + def Quadric( + cls, + name: str = "", + A: float = 0.0, + B: float = 0.0, + C: float = 0.0, + D: float = 0.0, + E: float = 0.0, + F: float = 0.0, + G: float = 0.0, + H: float = 0.0, + I: float = 0.0, + J: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create a general quadric: + A x^2 + B y^2 + C z^2 + D xy + E yz + F zx + G x + H y + I z + J = 0 + + Parameters + ---------- + name : str, optional + User label. + A,B,C,D,E,F,G,H,I,J : float + Quadric coefficients. + boundary_condition : str, optional + Boundary type (``"none"``, ``"vacuum"``, or ``"reflective"``). + + Returns + ------- + Surface + General quadratic surface. + """ + type_ = SURFACE_QUADRIC + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Coefficients + surface.A = A + surface.B = B + surface.C = C + surface.D = D + surface.E = E + surface.F = F + surface.G = G + surface.H = H + surface.I = I + surface.J = J + return surface + + @classmethod + def TorusZ( + cls, + name: str = "", + A: float = 0.0, + B: float = 0.0, + C: float = 0.0, + R: float = 0.0, + r: float = 0.0, + boundary_condition: str = "none", + ): + """ + Create a torus on the x-y plane radially symetric around the z axis: + f(x, y, z) = ( sqrt[(x - A)^2 + (y - B)^2] - R )^2 + (z - C)^2 - r^2 + + Parameters + ---------- + name : str, optional + A,B,C,R,r : float + A, B, C are displacement values for the torus in the x, y, z directions respectfully + R is the radius around which a circle is revolved about the axis of revolution (parallel with the z-axis) + r is the radius of the circle that is being revolved + boundary_condition : {"none","vacuum","reflective"}, optional + + Returns + ------- + Surface + Torus surface. + """ + type_ = SURFACE_TORUS_Z + surface = cls(type_, name, boundary_condition) + + surface.linear = False + + # Coefficients + surface.A = A + surface.B = B + surface.C = C + surface.R = R + surface.r = r + + return surface + + # ================================================================================== + # Region building + # ================================================================================== + + def __pos__(self): + """ + Half-space on the **outward** side of the surface. + + Returns + ------- + Region + Region representing ``n · r + J >= 0`` (sign convention per type). + """ + return Region.make_halfspace(self, +1) + + def __neg__(self): + """ + Half-space on the **inward** side of the surface. + + Returns + ------- + Region + Region representing the complement half-space. + """ + return Region.make_halfspace(self, -1) + + # ================================================================================== + # Surface moving + # ================================================================================== + + def move(self, velocities, durations): + """ + Define piecewise-constant motion for the surface. + + Appends a final static segment (zero velocity, infinite duration) so that + the motion covers the whole simulation time. + + Parameters + ---------- + velocities : array_like, shape (N, 3) or list + Per-segment velocity vectors [cm/s]. + durations : array_like, shape (N,) or list + Per-segment durations [s]. + + Notes + ----- + - Internally converts lists to arrays and constructs + ``move_time_grid`` and cumulative ``move_translations``. + - Sets ``moving=True`` and ``N_move = len(durations) + 1``. + + Examples + -------- + >>> s = Surface.PlaneZ(z=0.0) + >>> s.move(velocities=[[0,0,1.0]], durations=[0.5]) # 0.5 s upward, then static + >>> s.N_move + 2 + """ + move_object(self, velocities, durations) + + +# ====================================================================================== +# Type decoder +# ====================================================================================== + + +def decode_type(type_): + if type_ == SURFACE_PLANE_X: + return "Plane-X surface" + elif type_ == SURFACE_PLANE_Y: + return "Plane-Y surface" + elif type_ == SURFACE_PLANE_Z: + return "Plane-Z surface" + elif type_ == SURFACE_PLANE: + return "Plane surface" + elif type_ == SURFACE_CYLINDER_X: + return "Infinite cylinder-X surface" + elif type_ == SURFACE_CYLINDER_Y: + return "Infinite cylinder-Y surface" + elif type_ == SURFACE_CYLINDER_Z: + return "Infinite cylinder-Z surface" + elif type_ == SURFACE_CYLINDER: + return "General cylinder surface" + elif type_ == SURFACE_SPHERE: + return "Sphere surface" + elif type_ == SURFACE_QUADRIC: + return "Quadric surface" + elif type_ == SURFACE_CONE_X: + return "Infinite cone-X surface" + elif type_ == SURFACE_CONE_Y: + return "Infinite cone-Y surface" + elif type_ == SURFACE_CONE_Z: + return "Infinite cone-Z surface" + elif type_ == SURFACE_TORUS_Z: + return "Torus-Z surface" + + +def decode_BC_type(type_): + if type_ == BC_NONE: + return "None" + elif type_ == BC_VACUUM: + return "Vacuum" + elif type_ == BC_REFLECTIVE: + return "Reflective" diff --git a/mcdc/mcdc/object_/tally.py b/mcdc/mcdc/object_/tally.py new file mode 100644 index 000000000..f55632bbd --- /dev/null +++ b/mcdc/mcdc/object_/tally.py @@ -0,0 +1,545 @@ +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from mcdc.object_.cell import Cell + from mcdc.object_.surface import Surface + +#### + +import numpy as np +import operator + +from functools import reduce +from numpy import float64 +from numpy.typing import NDArray +from typing import Annotated, Iterable +from types import NoneType + +#### + +import mcdc.object_.mesh as mesh_module + +from mcdc.constant import ( + INF, + MESH_STRUCTURED, + MESH_UNIFORM, + PI, + SCORE_FLUX, + SCORE_DENSITY, + SCORE_COLLISION, + SCORE_CAPTURE, + SCORE_FISSION, + SCORE_NET_CURRENT, + SCORE_ENERGY_DEPOSITION, + SPATIAL_FILTER_CELL, + SPATIAL_FILTER_MESH, + SPATIAL_FILTER_NONE, + TALLY_SURFACE, + TALLY_COLLISION, + TALLY_TRACKLENGTH, +) +from mcdc.object_.mesh import MeshBase, MeshStructured, MeshUniform +from mcdc.object_.base import ObjectPolymorphic +from mcdc.object_.simulation import simulation +from mcdc.print_ import print_1d_array, print_error + +SURFACE_SCORES = set(["net-current"]) +TRACKLENGTH_SCORES = set(["flux", "density", "collision", "capture", "fission"]) +COLLISION_SCORES = set(["energy_deposition"]) + + +class Tally(ObjectPolymorphic): + """ + Define a tally. + """ + + # Annotations for Numba mode + label: str = "tally" + # + name: str + scores: list[int] + # + filter_direction: bool + filter_energy: bool + filter_time: bool + mu: NDArray[float64] + azi: NDArray[float64] + polar_reference: Annotated[NDArray[float64], (3,)] + energy: NDArray[float64] + time: NDArray[float64] + # + bin: NDArray[float64] + bin_sum: NDArray[float64] + bin_sum_square: NDArray[float64] + bin_shape: list[int] + # + stride_mu: int + stride_azi: int + stride_energy: int + stride_time: int + + def __new__( + cls, + name: str = "", + scores: list[str] = ["flux"], + surface: Surface | NoneType = None, + cell: Cell | NoneType = None, + mesh: MeshBase | NoneType = None, + mu: Iterable[float] | NoneType = None, + azi: Iterable[float] | NoneType = None, + polar_reference: Iterable[float] | NoneType = None, + energy: Iterable[float] | str | NoneType = None, + time: Iterable[float] | NoneType = None, + ) -> TallySurface | TallyTracklength | TallyCollision: + # Determine type and create the tally self based on the provided + # spatial filters and scores + + # Surface tally + if surface is not None: + for score in scores: + if not score in SURFACE_SCORES: + print_error( + f"Scoring '{score}' with surface tally is not supported. " + f"Supported surface tally scores: {SURFACE_SCORES}." + ) + return super().__new__(TallySurface) + + # Collision tally + if set(scores) <= COLLISION_SCORES: + return super().__new__(TallyCollision) + + # Tracklength tally + if set(scores) <= TRACKLENGTH_SCORES: + return super().__new__(TallyTracklength) + + # Error: Missing surface for surface score + for score in scores: + if score in SURFACE_SCORES and surface is None: + print_error(f"Scoring '{score}' needs a surface tally.") + + # Error: Unsupported score combination + print_error( + "Cannot mix tracklength scores with collision ones." + f"\n Tracklength scores: {TRACKLENGTH_SCORES}" + f"\n Collision scores: {COLLISION_SCORES}" + ) + + def __init__( + self, + name: str = "", + scores: list[str] = ["flux"], + surface: Surface | NoneType = None, + cell: Cell | NoneType = None, + mesh: MeshBase | NoneType = None, + mu: Iterable[float] | NoneType = None, + azi: Iterable[float] | NoneType = None, + polar_reference: Iterable[float] | NoneType = None, + energy: Iterable[float] | str | NoneType = None, + time: Iterable[float] | NoneType = None, + spatial_shape: tuple[int] | NoneType = None, + ): + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.child_ID}" + + # Set scores + self.scores = [] + for score in scores: + if score == "flux": + self.scores.append(SCORE_FLUX) + elif score == "density": + self.scores.append(SCORE_DENSITY) + elif score == "collision": + self.scores.append(SCORE_COLLISION) + elif score == "capture": + self.scores.append(SCORE_CAPTURE) + elif score == "fission": + self.scores.append(SCORE_FISSION) + elif score == "net-current": + self.scores.append(SCORE_NET_CURRENT) + elif score == "energy_deposition": + self.scores.append(SCORE_ENERGY_DEPOSITION) + else: + print_error(f"Unknown tally score: {score}") + + # Phase-space filters + self.mu = np.array([-1.0, 1.0]) + self.azi = np.array([-PI, PI]) + self.polar_reference = np.array([0.0, 0.0, 1.0]) + self.energy = np.array([-1.0, INF]) + self.time = np.array([0.0, INF]) + self.filter_direction = False + self.filter_energy = False + self.filter_time = False + if mu is not None: + self.mu = np.array(mu) + self.filter_direction = True + if azi is not None: + self.azi = np.array(azi) + self.filter_direction = True + if polar_reference is not None: + polar_reference = np.array(polar_reference) + self.polar_reference /= polar_reference / np.linalg.norm(polar_reference) + if energy is not None: + if type(energy) == str and energy == "all_groups": + G = simulation.materials[0].G + self.energy = np.linspace(0, G, G + 1) - 0.5 + else: + self.energy = np.array(energy) + self.filter_energy = True + if time is not None: + self.time = np.array(time) + self.filter_time = True + + # Determine bin shape + N_mu = len(self.mu) - 1 + N_azi = len(self.azi) - 1 + N_energy = len(self.energy) - 1 + N_time = len(self.time) - 1 + N_score = len(self.scores) + # + if spatial_shape is None: + shape = (N_mu, N_azi, N_energy, N_time, N_score) + else: + shape = (N_mu, N_azi, N_energy, N_time) + spatial_shape + (N_score,) + + # Set bins and strides + self._set_bin_shape_and_strides(shape) + + def _set_bin_shape_and_strides(self, shape): + # Set bins + self.bin_shape = list(shape) + + # Set strides + self.stride_time = reduce(operator.mul, shape[4:]) + self.stride_energy = reduce(operator.mul, shape[3:]) + self.stride_azi = reduce(operator.mul, shape[2:]) + self.stride_mu = reduce(operator.mul, shape[1:]) + + def _use_census_based_tally(self, frequency): + first_census = simulation.settings.census_time[0] + self.time = np.linspace(0.0, first_census, frequency + 1) + + N_mu = len(self.mu) - 1 + N_azi = len(self.azi) - 1 + N_energy = len(self.energy) - 1 + N_score = len(self.scores) + + spatial_shape = None + if len(self.bin_shape) > 5: + spatial_shape = tuple(self.bin_shape[4:-1]) + + if spatial_shape is None: + shape = (N_mu, N_azi, N_energy, frequency, N_score) + else: + shape = (N_mu, N_azi, N_energy, frequency) + spatial_shape + (N_score,) + + self._set_bin_shape_and_strides(shape) + + def _phasespace_filter_text(self): + text = "" + text += f" - Scores: {[decode_score_type(x) for x in self.scores]}\n" + if self.filter_time or self.filter_energy or self.filter_direction: + text += f" - Phase-space filters\n" + if self.filter_time: + text += f" - Time {print_1d_array(self.time)} s\n" + if self.filter_energy: + text += f" - Energy {print_1d_array(self.energy)} eV\n" + if self.filter_direction: + text += f" - Direction\n" + text += f" - Polar reference: {self.polar_reference}\n" + text += f" - Polar cosine {print_1d_array(self.mu)}\n" + text += f" - Azimuthal angle {print_1d_array(self.azi)}\n" + return text + + def __repr__(self): + text = "\n" + text += f"{decode_type(self.type)}\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + return text + + +def decode_type(type_): + if type_ == TALLY_TRACKLENGTH: + return "Tracklength tally" + elif type_ == TALLY_SURFACE: + return "Surface tally" + elif type_ == TALLY_COLLISION: + return "Collision tally" + + +def decode_score_type(type_, lower_case=False): + if type_ == SCORE_FLUX: + return "Flux" if not lower_case else "flux" + elif type_ == SCORE_DENSITY: + return "Density" if not lower_case else "density" + elif type_ == SCORE_COLLISION: + return "Collision" if not lower_case else "collision" + elif type_ == SCORE_CAPTURE: + return "Capture" if not lower_case else "capture" + elif type_ == SCORE_FISSION: + return "Fission" if not lower_case else "fission" + elif type_ == SCORE_NET_CURRENT: + return "Net current" if not lower_case else "net-current" + elif type_ == SCORE_ENERGY_DEPOSITION: + return "Energy deposition" if not lower_case else "energy_deposition" + + +# ====================================================================================== +# Surface tally +# ====================================================================================== + + +class TallySurface(Tally): + # Annotations for Numba mode + label: str = "surface_tally" + # + surface: Surface + + def __init__( + self, + surface: Surface, + name: str = "", + scores: list[str] = ["flux"], + mu: Iterable[float] | NoneType = None, + azi: Iterable[float] | NoneType = None, + polar_reference: Iterable[float] | NoneType = None, + energy: Iterable[float] | str | NoneType = None, + time: Iterable[float] | NoneType = None, + ): + type_ = TALLY_SURFACE + super(Tally, self).__init__(type_) + super().__init__( + name, + scores, + mu=mu, + azi=azi, + polar_reference=polar_reference, + energy=energy, + time=time, + ) + + if SCORE_ENERGY_DEPOSITION in self.scores: + print_error( + "Score 'energy_deposition' uses the collision estimator and is not supported " + "for this tally type." + ) + + # Set surface and attach tally to the surface + self.surface = surface + surface.tallies.append(self) + + def __repr__(self): + text = super().__repr__() + text += f" - Surface: {self.surface.name}\n" + text += super()._phasespace_filter_text() + text += f" - Bin shape [mu, azi, energy, time, score]: {self.bin_shape} \n" + return text + + +# ====================================================================================== +# Collision tally +# ====================================================================================== + + +class TallyCollision(Tally): + label: str = "collision_tally" + non_numba: list[str] = ["spatial_filter"] + + spatial_filter: Cell | MeshBase | NoneType + spatial_filter_type: int + spatial_filter_ID: int + spatial_filter_subtype: int + + mesh_stride_z: int + mesh_stride_y: int + mesh_stride_x: int + + def __init__( + self, + cell: Cell | NoneType = None, + mesh: MeshBase | NoneType = None, + name: str = "", + scores: list[str] = ["energy_deposition"], + mu: Iterable[float] | NoneType = None, + azi: Iterable[float] | NoneType = None, + polar_reference: Iterable[float] | NoneType = None, + energy: Iterable[float] | str | NoneType = None, + time: Iterable[float] | NoneType = None, + ): + type_ = TALLY_COLLISION + spatial_shape = None + if mesh is not None: + spatial_shape = (mesh.Nx, mesh.Ny, mesh.Nz) + + super(Tally, self).__init__(type_) + super().__init__( + name, + scores, + mu=mu, + azi=azi, + polar_reference=polar_reference, + energy=energy, + time=time, + spatial_shape=spatial_shape, + ) + + if len(self.scores) != 1 or SCORE_ENERGY_DEPOSITION not in self.scores: + print_error( + "Collision tally currently supports only scores=['energy_deposition']." + ) + + # Support check + if SCORE_ENERGY_DEPOSITION in self.scores and mesh is None: + print_error( + "Score 'energy_deposition' is currently only supported with a mesh spatial filter." + ) + + # ============================================================================== + # Set spatial filter + # ============================================================================== + + # Default: no filter + self.spatial_filter = None + self.spatial_filter_type = SPATIAL_FILTER_NONE + self.spatial_filter_subtype = -1 + self.spatial_filter_ID = -1 + self.mesh_stride_z = -1 + self.mesh_stride_y = -1 + self.mesh_stride_x = -1 + + # Cell filter + if cell is not None: + self.spatial_filter = cell + self.spatial_filter_type = SPATIAL_FILTER_CELL + self.spatial_filter_ID = cell.ID + + # Attach tally to the cell + cell.tallies.append(self) + + # Mesh filter + if mesh is not None: + self.spatial_filter = mesh + self.spatial_filter_type = SPATIAL_FILTER_MESH + if isinstance(mesh, MeshStructured): + self.spatial_filter_subtype = MESH_STRUCTURED + elif isinstance(mesh, MeshUniform): + self.spatial_filter_subtype = MESH_UNIFORM + self.spatial_filter_ID = mesh.ID + + # Set the strides + N_score = len(self.scores) + self.mesh_stride_z = N_score + self.mesh_stride_y = N_score * mesh.Nz + self.mesh_stride_x = N_score * mesh.Nz * mesh.Ny + + def __repr__(self): + text = super().__repr__() + if self.spatial_filter_type == SPATIAL_FILTER_CELL: + text += f" - Cell: {self.spatial_filter.name}\n" + elif self.spatial_filter_type == SPATIAL_FILTER_MESH: + text += f" - Mesh: {mesh_module.decode_type(self.spatial_filter.type)} (ID {self.spatial_filter.ID})\n" + text += super()._phasespace_filter_text() + text += f" - Bin shape [mu, azi, energy, time, score]: {self.bin_shape} \n" + return text + + +# ====================================================================================== +# Tracklength tally +# ====================================================================================== + + +class TallyTracklength(Tally): + # Annotations for Numba mode + label: str = "tracklength_tally" + non_numba: list[str] = ["spatial_filter"] + # + spatial_filter: Cell | MeshBase | NoneType + spatial_filter_type: int + spatial_filter_ID: int + spatial_filter_subtype: int + # + mesh_stride_z: int + mesh_stride_y: int + mesh_stride_x: int + + def __init__( + self, + cell: Cell | NoneType = None, + mesh: MeshBase | NoneType = None, + name: str = "", + scores: list[str] = ["flux"], + mu: Iterable[float] | NoneType = None, + azi: Iterable[float] | NoneType = None, + polar_reference: Iterable[float] | NoneType = None, + energy: Iterable[float] | str | NoneType = None, + time: Iterable[float] | NoneType = None, + ): + type_ = TALLY_TRACKLENGTH + spatial_shape = None + if mesh is not None: + spatial_shape = (mesh.Nx, mesh.Ny, mesh.Nz) + + super(Tally, self).__init__(type_) + super().__init__( + name, + scores, + mu=mu, + azi=azi, + polar_reference=polar_reference, + energy=energy, + time=time, + spatial_shape=spatial_shape, + ) + + # ============================================================================== + # Set spatial filter + # ============================================================================== + + # Default: no filter + self.spatial_filter = None + self.spatial_filter_type = SPATIAL_FILTER_NONE + self.spatial_filter_subtype = -1 + self.spatial_filter_ID = -1 + self.mesh_stride_z = -1 + self.mesh_stride_y = -1 + self.mesh_stride_x = -1 + + # Cell filter + if cell is not None: + self.spatial_filter = cell + self.spatial_filter_type = SPATIAL_FILTER_CELL + self.spatial_filter_ID = cell.ID + + # Attach tally to the cell + cell.tallies.append(self) + + # Mesh filter + elif mesh is not None: + self.spatial_filter = mesh + self.spatial_filter_type = SPATIAL_FILTER_MESH + if isinstance(mesh, MeshStructured): + self.spatial_filter_subtype = MESH_STRUCTURED + elif isinstance(mesh, MeshUniform): + self.spatial_filter_subtype = MESH_UNIFORM + self.spatial_filter_ID = mesh.ID + + # Set the strides + N_score = len(self.scores) + self.mesh_stride_z = N_score + self.mesh_stride_y = N_score * mesh.Nz + self.mesh_stride_x = N_score * mesh.Nz * mesh.Ny + + def __repr__(self): + text = super().__repr__() + if self.spatial_filter_type == SPATIAL_FILTER_CELL: + text += f" - Cell: {self.spatial_filter.name}\n" + elif self.spatial_filter_type == SPATIAL_FILTER_MESH: + text += f" - Mesh: {mesh_module.decode_type(self.spatial_filter.type)} (ID {self.spatial_filter.ID})\n" + text += super()._phasespace_filter_text() + text += f" - Bin shape [mu, azi, energy, time, score]: {self.bin_shape} \n" + return text diff --git a/mcdc/mcdc/object_/technique.py b/mcdc/mcdc/object_/technique.py new file mode 100644 index 000000000..1c3bacc1b --- /dev/null +++ b/mcdc/mcdc/object_/technique.py @@ -0,0 +1,81 @@ +from mcdc.object_.base import ObjectSingleton +from mcdc.print_ import print_error + +# ====================================================================================== +# Implicit capture +# ====================================================================================== + + +class ImplicitCapture(ObjectSingleton): + # Annotations for Numba mode + label: str = "implicit_capture" + active: bool + + def __init__(self): + self.active = False + + def __call__(self, active: bool = True): + self.active = active + + +# ====================================================================================== +# Weighted emission +# ====================================================================================== + + +class WeightedEmission(ObjectSingleton): + # Annotations for Numba mode + label: str = "weighted_emission" + + active: bool + weight_target: float + + def __init__(self): + self.active = False + self.weight_target = 0.0 + + def __call__(self, active: bool = True, weight_target: float = 1.0): + self.active = active + self.weight_target = weight_target + + +# ====================================================================================== +# Weight roulette +# ====================================================================================== + + +class WeightRoulette(ObjectSingleton): + # Annotations for Numba mode + label: str = "weight_roulette" + + weight_threshold: float + weight_target: float + + def __init__(self): + self.weight_threshold = 0.0 + self.weight_target = 1.0 + + def __call__(self, weight_threshold: float = 0.0, weight_target: float = 1.0): + if weight_threshold > weight_target: + print_error( + "For weight roulette, weight threshold has to be smaller than the target" + ) + self.weight_threshold = weight_threshold + self.weight_target = weight_target + + +# ====================================================================================== +# Population control +# ====================================================================================== + + +class PopulationControl(ObjectSingleton): + # Annotations for Numba mode + label: str = "population_control" + active: bool + + def __init__(self): + self.active = False + + def __call__(self, active: bool = True): + self.active = active diff --git a/mcdc/mcdc/object_/universe.py b/mcdc/mcdc/object_/universe.py new file mode 100644 index 000000000..ad477525b --- /dev/null +++ b/mcdc/mcdc/object_/universe.py @@ -0,0 +1,202 @@ +from __future__ import annotations +from types import NoneType +from typing import TYPE_CHECKING, Annotated + +if TYPE_CHECKING: + from mcdc.object_.cell import Cell + +#### + +import numpy as np + +from numpy import int64 +from numpy._typing import NDArray + +#### + +from mcdc.constant import INF +from mcdc.object_.base import ObjectNonSingleton +from mcdc.util import flatten + +# ====================================================================================== +# Universe +# ====================================================================================== + + +class Universe(ObjectNonSingleton): + """ + Define a list of cells as a universe. + + Parameters + ---------- + name : str, optional + User label. + cells : list of Cell + List of cells that comprise the universe. + root : bool, optional + Flag to set as the root universe (ID = 0). + + Returns + ------- + Universe + The universe object. + + See Also + -------- + mcdc.Cell : Creates a cell that can be used to define a universe. + """ + + # Annotations for Numba mode + label: str = "universe" + # + name: str + cells: list[Cell] + + def __init__(self, name: str = "", cells: list[Cell] = [], root: bool = False): + # Custom treatment for root universe + if root: + super().__init__(register=False) + self.ID = 0 + else: + super().__init__() + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.ID}" + + self.cells = cells + + def __repr__(self): + text = "\n" + text += f"Universe\n" + if self.ID == 0: + text += f" - ID: {self.ID} (root)\n" + else: + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f"Cells: {[x.ID for x in self.cells]}" + return text + + +# ====================================================================================== +# Lattice +# ====================================================================================== + + +class Lattice(ObjectNonSingleton): + """ + Define a regular lattice of universes. + + Parameters + ---------- + name : str, optional + User label. + x : tuple of (float, float, int), optional + Lattice specification along x: ``(x0, dx, Nx)``. + y : tuple of (float, float, int), optional + Lattice specification along y: ``(y0, dy, Ny)``. + z : tuple of (float, float, int), optional + Lattice specification along z: ``(z0, dz, Nz)``. + universes : list of Universe + Array of universes filling each lattice cell. + + Returns + ------- + Lattice + The lattice object. + + See Also + -------- + mcdc.Universe : Creates a universe to place in a lattice. + """ + + # Annotations for Numba mode + label: str = "lattice" + # + name: str + x0: float + dx: float + Nx: int + y0: float + dy: float + Ny: int + z0: float + dz: float + Nz: int + universe_IDs: Annotated[NDArray[int64], ("Nx", "Ny", "Nz")] + + def __init__( + self, + name: str = "", + x: tuple[float, float, int] | NoneType = None, + y: tuple[float, float, int] | NoneType = None, + z: tuple[float, float, int] | NoneType = None, + universes: list[Universe] = None, + ): + super().__init__() + + # Set name + if name != "": + self.name = name + else: + self.name = f"{self.label}_{self.ID}" + + # Default uniform grids + self.x0 = -INF + self.dx = 2 * INF + self.Nx = 1 + self.y0 = -INF + self.dy = 2 * INF + self.Ny = 1 + self.z0 = -INF + self.dz = 2 * INF + self.Nz = 1 + self.t0 = 0.0 # Placeholder time grid is needed to use mesh indexing function + self.dt = INF + self.Nt = 1 + + # Set the grid + if x is not None: + self.x0 = x[0] + self.dx = x[1] + self.Nx = x[2] + if y is not None: + self.y0 = y[0] + self.dy = y[1] + self.Ny = y[2] + if z is not None: + self.z0 = z[0] + self.dz = z[1] + self.Nz = z[2] + + # Set universe IDs + get_ID = np.vectorize(lambda obj: obj.ID) + universe_IDs = get_ID(universes) + ax_expand = [] + if x is None: + ax_expand.append(2) + if y is None: + ax_expand.append(1) + if z is None: + ax_expand.append(0) + for ax in ax_expand: + universe_IDs = np.expand_dims(universe_IDs, axis=ax) + + # Change indexing structure: [z(flip), y(flip), x] --> [x, y, z] + universe_IDs = np.transpose(universe_IDs) + universe_IDs = np.flip(universe_IDs, axis=1) + universe_IDs = np.flip(universe_IDs, axis=2) + self.universe_IDs = np.array(universe_IDs) + + def __repr__(self): + text = "\n" + text += f"Lattice\n" + text += f" - ID: {self.ID}\n" + text += f" - Name: {self.name}\n" + text += f" - (x0, dx, Nx): ({self.x0}, {self.dx}, {self.Nx})\n" + text += f" - (y0, dy, Ny): ({self.y0}, {self.dy}, {self.Ny})\n" + text += f" - (z0, dz, Nz): ({self.z0}, {self.dz}, {self.Nz})\n" + text += f"Universes: {set([x.ID for x in list(flatten(self.universes))])}" + return text diff --git a/mcdc/mcdc/object_/util.py b/mcdc/mcdc/object_/util.py new file mode 100644 index 000000000..19243753d --- /dev/null +++ b/mcdc/mcdc/object_/util.py @@ -0,0 +1,560 @@ +import numpy as np + +from mcdc.constant import INF + + +def cmf_from_pmf(pmf): + cmf = np.zeros(len(pmf) + 1) + + # Build CMF incrementally + total = 0.0 + for idx in range(len(pmf)): + total += pmf[idx] + cmf[idx + 1] = total + + # Normalize this segment so CDF ends at 1 + norm = cmf[-1] + pmf /= norm + cmf /= norm + + return pmf, cmf + + +def cdf_from_pdf(value, pdf): + cdf = np.zeros_like(pdf) + + # Build CDF incrementally with trapezoidal integration + for idx in range(len(pdf) - 1): + cdf[idx + 1] = ( + cdf[idx] + (pdf[idx] + pdf[idx + 1]) * (value[idx + 1] - value[idx]) * 0.5 + ) + + # Normalize this segment so CDF ends at 1 + norm = cdf[-1] + pdf /= norm + cdf /= norm + + return pdf, cdf + + +def multi_cdf_from_pdf(offset, value, pdf): + cdf = np.zeros_like(pdf) + + for i in range(len(offset)): + start = offset[i] + end = offset[i + 1] if i < len(offset) - 1 else len(pdf) + + # Build CDF incrementally with trapezoidal integration + for idx in range(start, end - 1): + cdf[idx + 1] = ( + cdf[idx] + + (pdf[idx] + pdf[idx + 1]) * (value[idx + 1] - value[idx]) * 0.5 + ) + + # Normalize this segment so CDF ends at 1 + norm = cdf[end - 1] + pdf[start:end] /= norm + cdf[start:end] /= norm + + return pdf, cdf + + +def is_sorted(a): + return np.all(a[:-1] <= a[1:]) + + +# Natural isotopic abundance data from +# https://www.nndc.bnl.gov/walletcards/search.html +ISOTOPIC_ABUNDANCE = { + "H": { + "H1": (99.972 + 99.999) / 2, + "H2": (0.001 + 0.028) / 2, + }, + "He": { + "He3": 0.0002, + "He4": 99.9998, + }, + "Li": { + "Li6": (1.9 + 7.8) / 2, + "Li7": (92.2 + 98.1) / 2, + }, + "Be": { + "Be9": 100.0, + }, + "B": { + "B10": (18.9 + 20.4) / 2, + "B11": (79.6 + 81.1) / 2, + }, + "C": { + "C12": (98.84 + 99.04) / 2, + "C13": (0.96 + 1.16) / 2, + }, + "N": { + "N14": (99.578 + 99.663) / 2, + "N15": (0.337 + 0.422) / 2, + }, + "O": { + "O16": (99.738 + 99.776) / 2, + "O17": (0.0367 + 0.04) / 2, + "O18": (0.187 + 0.222) / 2, + }, + "F": { + "F19": 100.0, + }, + "Ne": { + "Ne20": 90.48, + "Ne21": 0.27, + "Ne22": 9.25, + }, + "Na": { + "Na23": 100.0, + }, + "Mg": { + "Mg24": (78.88 + 79.05) / 2, + "Mg25": (9.988 + 10.034) / 2, + "Mg26": (10.96 + 11.09) / 2, + }, + "Al": { + "Al27": 100.0, + }, + "Si": { + "Si28": (92.191 + 92.318) / 2, + "Si29": (4.645 + 4.699) / 2, + "Si30": (3.037 + 3.11) / 2, + }, + "P": { + "P31": 100.0, + }, + "S": { + "S32": (94.41 + 95.29) / 2, + "S33": (0.729 + 0.797) / 2, + "S34": (3.96 + 4.77) / 2, + "S36": (0.0129 + 0.0187) / 2, + }, + "Cl": { + "Cl35": (75.5 + 76.1) / 2, + "Cl37": (23.9 + 24.5) / 2, + }, + "Ar": { + "Ar36": 0.3336, + "Ar38": 0.0629, + "Ar40": 99.6035, + }, + "K": { + "K39": 93.2581, + "K40": 0.0117, + "K41": 6.7302, + }, + "Ca": { + "Ca40": 96.941, + "Ca42": 0.647, + "Ca43": 0.135, + "Ca44": 2.086, + "Ca46": 0.004, + "Ca48": 0.187, + }, + "Sc": { + "Sc45": 100.0, + }, + "Ti": { + "Ti46": 8.25, + "Ti47": 7.44, + "Ti48": 73.72, + "Ti49": 5.41, + "Ti50": 5.18, + }, + "V": { + "V50": 0.25, + "V51": 99.75, + }, + "Cr": { + "Cr50": 4.345, + "Cr52": 83.789, + "Cr53": 9.501, + "Cr54": 2.365, + }, + "Mn": { + "Mn55": 100.0, + }, + "Fe": { + "Fe54": 5.845, + "Fe56": 91.754, + "Fe57": 2.119, + "Fe58": 0.282, + }, + "Co": { + "Co59": 100.0, + }, + "Ni": { + "Ni58": 68.0769, + "Ni60": 26.2231, + "Ni61": 1.1399, + "Ni62": 3.6345, + "Ni64": 0.9256, + }, + "Cu": { + "Cu63": 69.15, + "Cu65": 30.85, + }, + "Zn": { + "Zn64": 49.17, + "Zn66": 27.73, + "Zn67": 4.04, + "Zn68": 18.45, + "Zn70": 0.61, + }, + "Ga": { + "Ga69": 60.108, + "Ga71": 39.892, + }, + "Ge": { + "Ge70": 20.52, + "Ge72": 27.45, + "Ge73": 7.76, + "Ge74": 36.52, + "Ge76": 7.75, + }, + "As": { + "As75": 100.0, + }, + "Se": { + "Se74": 0.86, + "Se76": 9.23, + "Se77": 7.6, + "Se78": 23.69, + "Se80": 49.8, + "Se82": 8.82, + }, + "Br": { + "Br79": (50.5 + 50.8) / 2, + "Br81": (49.2 + 49.5) / 2, + }, + "Kr": { + "Kr78": 0.355, + "Kr80": 2.286, + "Kr82": 11.593, + "Kr83": 11.5, + "Kr84": 56.987, + "Kr86": 17.279, + }, + "Rb": { + "Rb85": 72.17, + "Rb87": 27.83, + }, + "Sr": { + "Sr84": 0.56, + "Sr86": 9.86, + "Sr87": 7.0, + "Sr88": 82.58, + }, + "Y": { + "Y89": 100.0, + }, + "Zr": { + "Zr90": 51.45, + "Zr91": 11.22, + "Zr92": 17.15, + "Zr94": 17.38, + "Zr96": 2.8, + }, + "Nb": { + "Nb93": 100.0, + }, + "Mo": { + "Mo92": 14.649, + "Mo94": 9.187, + "Mo95": 15.873, + "Mo96": 16.673, + "Mo97": 9.582, + "Mo98": 24.292, + "Mo100": 9.744, + }, + "Ru": { + "Ru96": 5.54, + "Ru98": 1.87, + "Ru99": 12.76, + "Ru100": 12.6, + "Ru101": 17.06, + "Ru102": 31.55, + "Ru104": 18.62, + }, + "Rh": { + "Rh103": 100.0, + }, + "Pd": { + "Pd102": 1.02, + "Pd104": 11.14, + "Pd105": 22.33, + "Pd106": 27.33, + "Pd108": 26.46, + "Pd110": 11.72, + }, + "Ag": { + "Ag107": 51.839, + "Ag109": 48.161, + }, + "Cd": { + "Cd106": 1.245, + "Cd108": 0.888, + "Cd110": 12.47, + "Cd111": 12.795, + "Cd112": 24.109, + "Cd113": 12.227, + "Cd114": 28.754, + "Cd116": 7.512, + }, + "In": { + "In113": 4.281, + "In115": 95.719, + }, + "Sn": { + "Sn112": 0.97, + "Sn114": 0.66, + "Sn115": 0.34, + "Sn116": 14.54, + "Sn117": 7.68, + "Sn118": 24.22, + "Sn119": 8.59, + "Sn120": 32.58, + "Sn122": 4.63, + "Sn124": 5.79, + }, + "Sb": { + "Sb121": 57.21, + "Sb123": 42.79, + }, + "Te": { + "Te120": 0.09, + "Te122": 2.55, + "Te123": 0.89, + "Te124": 4.74, + "Te125": 7.07, + "Te126": 8.84, + "Te128": 31.74, + "Te130": 34.08, + }, + "I": { + "I127": 100.0, + }, + "Xe": { + "Xe124": 0.095, + "Xe126": 0.089, + "Xe128": 1.91, + "Xe129": 26.4, + "Xe130": 4.071, + "Xe131": 21.232, + "Xe132": 26.909, + "Xe134": 10.436, + "Xe136": 8.857, + }, + "Cs": { + "Cs133": 100.0, + }, + "Ba": { + "Ba130": 0.11, + "Ba132": 0.1, + "Ba134": 2.42, + "Ba135": 6.59, + "Ba136": 7.85, + "Ba137": 11.23, + "Ba138": 71.7, + }, + "La": { + "La138": 0.08881, + "La139": 99.91119, + }, + "Ce": { + "Ce136": 0.186, + "Ce138": 0.251, + "Ce140": 88.449, + "Ce142": 11.114, + }, + "Pr": { + "Pr141": 100.0, + }, + "Nd": { + "Nd142": 27.153, + "Nd143": 12.173, + "Nd144": 23.798, + "Nd145": 8.293, + "Nd146": 17.189, + "Nd148": 5.756, + "Nd150": 5.638, + }, + "Sm": { + "Sm144": 3.08, + "Sm147": 15.0, + "Sm149": 13.82, + "Sm150": 7.37, + "Sm152": 26.74, + "Sm154": 22.74, + }, + "Eu": { + "Eu151": 47.81, + "Eu153": 52.19, + }, + "Gd": { + "Gd152": 0.2, + "Gd154": 2.18, + "Gd155": 14.8, + "Gd156": 20.47, + "Gd157": 15.65, + "Gd158": 24.84, + "Gd160": 21.86, + }, + "Tb": { + "Tb159": 100.0, + }, + "Dy": { + "Dy156": 0.056, + "Dy158": 0.095, + "Dy160": 2.329, + "Dy161": 18.889, + "Dy162": 25.475, + "Dy163": 24.896, + "Dy164": 28.26, + }, + "Ho": { + "Ho165": 100.0, + }, + "Er": { + "Er162": 0.139, + "Er164": 1.601, + "Er166": 33.503, + "Er167": 22.869, + "Er168": 26.978, + "Er170": 14.91, + }, + "Tm": { + "Tm169": 100.0, + }, + "Yb": { + "Yb168": 0.123, + "Yb170": 2.982, + "Yb171": 14.086, + "Yb172": 21.686, + "Yb173": 16.103, + "Yb174": 32.025, + "Yb176": 12.995, + }, + "Lu": { + "Lu175": 97.401, + "Lu176": 2.599, + }, + "Hf": { + "Hf174": 0.16, + "Hf176": 5.26, + "Hf177": 18.6, + "Hf178": 27.28, + "Hf179": 13.62, + "Hf180": 35.08, + }, + "Ta": { + "Ta180": 0.01201, + "Ta181": 99.98799, + }, + "W": { + "W180": 0.12, + "W182": 26.5, + "W183": 14.31, + "W184": 30.64, + "W186": 28.43, + }, + "Re": { + "Re185": 37.4, + "Re187": 62.6, + }, + "Os": { + "Os184": 0.02, + "Os186": 1.59, + "Os187": 1.97, + "Os188": 13.24, + "Os189": 16.15, + "Os190": 26.26, + "Os192": 40.78, + }, + "Ir": { + "Ir191": 37.3, + "Ir193": 62.7, + }, + "Pt": { + "Pt190": 0.012, + "Pt192": 0.782, + "Pt194": 32.864, + "Pt195": 33.77, + "Pt196": 25.21, + "Pt198": 7.356, + }, + "Au": { + "Au197": 100.0, + }, + "Hg": { + "Hg196": 0.15, + "Hg198": 10.04, + "Hg199": 16.94, + "Hg200": 23.14, + "Hg201": 13.17, + "Hg202": 29.74, + "Hg204": 6.82, + }, + "Tl": { + "Tl203": (29.44 + 29.59) / 2, + "Tl205": (70.41 + 70.56) / 2, + }, + "Pb": { + "Pb204": 1.4, + "Pb206": 24.1, + "Pb207": 22.1, + "Pb208": 52.4, + }, + "Bi": { + "Bi209": 100.0, + }, + "Th": { + "Th230": 0.02, + "Th232": 99.98, + }, + "Pa": { + "Pa231": 100.0, + }, + "U": { + "U234": 0.0054, + "U235": 0.7204, + "U238": 99.2742, + }, +} + + +def move_object(object_, velocities, durations): + object_.moving = True + object_.N_move = len(durations) + 1 + object_.N_move_grid = len(durations) + 2 + + if isinstance(velocities, np.ndarray): + velocities = velocities.tolist() + durations = durations.tolist() + + # Add the statics for the rest of the simulation + move_velocities = velocities + move_velocities.append([0.0, 0.0, 0.0]) + object_.move_velocities = np.array(move_velocities) + # + move_durations = durations + move_durations.append(INF) + object_.move_durations = np.array(move_durations) + + # Set time grid and translations + object_.move_time_grid = np.zeros(object_.N_move_grid) + object_.move_translations = np.zeros((object_.N_move_grid, 3)) + for n in range(object_.N_move): + t_start = object_.move_time_grid[n] + object_.move_time_grid[n + 1] = t_start + object_.move_durations[n] + + trans_start = object_.move_translations[n] + object_.move_translations[n + 1] = ( + trans_start + object_.move_velocities[n] * object_.move_durations[n] + ) + + +def subtype_size(main_list, subtype: str): + return len([x for x in main_list if x.label == subtype]) diff --git a/mcdc/mcdc/output.py b/mcdc/mcdc/output.py new file mode 100644 index 000000000..24530c461 --- /dev/null +++ b/mcdc/mcdc/output.py @@ -0,0 +1,336 @@ +import h5py +import importlib.metadata +import numpy as np + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.print_ as print_module + +from mcdc.constant import ( + MESH_UNIFORM, + MESH_STRUCTURED, + SPATIAL_FILTER_MESH, +) + +# ====================================================================================== +# Main output +# ====================================================================================== + + +def generate_output(mcdc, data): + from mcdc import simulation + + if not mcdc["mpi_master"]: + return + + settings = mcdc["settings"] + + # Header + if settings["use_progress_bar"]: + print_module.print_msg("") + print_module.print_msg(" Generating output HDF5 files...") + + # Create the file + file = h5py.File(settings["output_name"] + ".h5", "w") + + # Version + file["version"] = importlib.metadata.version("mcdc") + + # Settings + create_object_dataset(file, "settings", simulation.settings) + + # No need to output tally if time census-based tally is used + if mcdc["settings"]["use_census_based_tally"]: + return + + # Tallies + create_tally_dataset(file, mcdc, data) + + # Eigenvalues + if mcdc["settings"]["neutron_eigenvalue_mode"]: + N_cycle = mcdc["settings"]["N_cycle"] + file.create_dataset( + "k_cycle", data=mcdc_get.simulation.k_cycle_chunk(0, N_cycle, mcdc, data) + ) + file.create_dataset("k_mean", data=mcdc["k_avg_running"]) + file.create_dataset("k_sdev", data=mcdc["k_sdv_running"]) + file.create_dataset("global_tally/neutron/mean", data=mcdc["n_avg"]) + file.create_dataset("global_tally/neutron/sdev", data=mcdc["n_sdv"]) + file.create_dataset("global_tally/neutron/max", data=mcdc["n_max"]) + file.create_dataset("global_tally/precursor/mean", data=mcdc["C_avg"]) + file.create_dataset("global_tally/precursor/sdev", data=mcdc["C_sdv"]) + file.create_dataset("global_tally/precursor/max", data=mcdc["C_max"]) + if mcdc["settings"]["use_gyration_radius"]: + file.create_dataset( + "gyration_radius", + data=mcdc_get.simulation.gyration_radius_chunk(0, N_cycle, mcdc, data), + ) + + # Save particle? + if mcdc["settings"]["save_particle"]: + # Gather source bank + # TODO: Parallel HDF5 and mitigation of large data passing + N = mcdc["bank_source"]["size"][0] + neutrons = MPI.COMM_WORLD.gather(mcdc["bank_source"]["particles"][:N]) + + # Remove unwanted particle fields + neutrons = np.concatenate(neutrons[:]) + + # Create dataset + with h5py.File(mcdc["setting"]["output_name"] + ".h5", "a") as f: + file.create_dataset("particles", data=neutrons[:]) + file.create_dataset("particles_size", data=len(neutrons[:])) + + # Close the file + file.close() + + +# ====================================================================================== +# Input objects +# ====================================================================================== + + +def create_object_dataset(file, group_name, object_): + for name in [ + x + for x in dir(object_) + if (not x.startswith("__") and not callable(getattr(object_, x))) + ]: + file[f"{group_name}/{name}"] = getattr(object_, name) + + +# ====================================================================================== +# Runtimes +# ====================================================================================== + + +def create_runtime_datasets(mcdc): + import h5py + import mcdc.config as config + + if not mcdc["mpi_master"]: + return + + base_name = mcdc["settings"]["output_name"] + + main_output = h5py.File(f"{base_name}.h5", "a") + create_runtime_dataset(main_output, mcdc) + main_output.close() + + if config.args.runtime_output: + runtime_output = h5py.File(f"{base_name}.h5", "w") + create_runtime_dataset(runtime_output, mcdc) + runtime_output.close() + + +def create_runtime_dataset(file, mcdc): + for name in [ + "total", + "preparation", + "simulation", + "output", + "bank_management", + ]: + file.create_dataset(f"runtime/{name}", data=np.array([mcdc["runtime_" + name]])) + + +# ====================================================================================== +# Tally +# ====================================================================================== + + +def create_tally_dataset(file, mcdc, data): + from mcdc.constant import TALLY_TRACKLENGTH, TALLY_COLLISION + from mcdc.object_.tally import decode_score_type + + # Loop over all tally types + for tally in mcdc["tallies"]: + tally_name = tally["name"] + + # Filter grids + file.create_dataset( + f"tallies/{tally_name}/grid/mu", data=mcdc_get.tally.mu_all(tally, data) + ) + file.create_dataset( + f"tallies/{tally_name}/grid/azi", + data=mcdc_get.tally.azi_all(tally, data), + ) + file.create_dataset( + f"tallies/{tally_name}/grid/energy", + data=mcdc_get.tally.energy_all(tally, data), + ) + file.create_dataset( + f"tallies/{tally_name}/grid/time", + data=mcdc_get.tally.time_all(tally, data), + ) + + # Mesh grid (TODO: Make mesh dataset in a separate group) + mesh_filtered_tally = None + if tally["child_type"] == TALLY_TRACKLENGTH: + mesh_filtered_tally = mcdc["tracklength_tallies"][tally["child_ID"]] + elif tally["child_type"] == TALLY_COLLISION: + mesh_filtered_tally = mcdc["collision_tallies"][tally["child_ID"]] + + if ( + mesh_filtered_tally is not None + and mesh_filtered_tally["spatial_filter_type"] == SPATIAL_FILTER_MESH + ): + mesh_base = mcdc["meshes"][mesh_filtered_tally["spatial_filter_ID"]] + mesh_type = mesh_base["child_type"] + mesh_ID = mesh_base["child_ID"] + if mesh_type == MESH_UNIFORM: + mesh = mcdc["uniform_meshes"][mesh_ID] + x = np.linspace( + mesh["x0"], mesh["x0"] + mesh["dx"] * mesh["Nx"], mesh["Nx"] + 1 + ) + y = np.linspace( + mesh["y0"], mesh["y0"] + mesh["dy"] * mesh["Ny"], mesh["Ny"] + 1 + ) + z = np.linspace( + mesh["z0"], mesh["z0"] + mesh["dz"] * mesh["Nz"], mesh["Nz"] + 1 + ) + elif mesh_type == MESH_STRUCTURED: + mesh = mcdc["structured_meshes"][mesh_ID] + x = mcdc_get.structured_mesh.x_all(mesh, data) + y = mcdc_get.structured_mesh.y_all(mesh, data) + z = mcdc_get.structured_mesh.z_all(mesh, data) + file.create_dataset(f"tallies/{tally_name}/grid/x", data=x) + file.create_dataset(f"tallies/{tally_name}/grid/y", data=y) + file.create_dataset(f"tallies/{tally_name}/grid/z", data=z) + + # Get and reshape tally + N_bin = tally["bin_length"] + start_mean = tally["bin_sum_offset"] + start_sdev = tally["bin_sum_square_offset"] + mean = data[start_mean : start_mean + N_bin] + sdev = data[start_sdev : start_sdev + N_bin] + shape = tuple([int(x) for x in mcdc_get.tally.bin_shape_all(tally, data)]) + mean = mean.reshape(shape) + sdev = sdev.reshape(shape) + + # Roll tally so that score is in the front + roll_reference = 4 + if ( + mesh_filtered_tally is not None + and mesh_filtered_tally["spatial_filter_type"] == SPATIAL_FILTER_MESH + ): + roll_reference = 7 + mean = np.rollaxis(mean, roll_reference, 0) + sdev = np.rollaxis(sdev, roll_reference, 0) + + # Iterate over scores + for i in range(tally["scores_length"]): + score_type = mcdc_get.tally.scores(i, tally, data) + score_mean = np.squeeze(mean[i]) + score_sdev = np.squeeze(sdev[i]) + score_name = decode_score_type(score_type, lower_case=True) + group_name = f"tallies/{tally_name}/{score_name}/" + file.create_dataset(group_name + "mean", data=score_mean) + file.create_dataset(group_name + "sdev", data=score_sdev) + + +def generate_census_based_tally(mcdc, data): + idx_batch = mcdc["idx_batch"] + idx_census = mcdc["idx_census"] + base_name = mcdc["settings"]["output_name"] + + # Create or get the file + file_name = f"{base_name}-batch_{idx_batch}-census_{idx_census}.h5" + file = h5py.File(file_name, "w") + create_tally_dataset(file, mcdc, data) + file.close() + + +def replace_dataset(file, field, data): + if field in file: + del file[field] + file.create_dataset(field, data=data) + + +def recombine_tallies(): + """Combine the tally output into a single file""" + import h5py + from mpi4py import MPI + from mcdc.object_.tally import decode_score_type + + if MPI.COMM_WORLD.Get_rank() > 0: + return + + # Get simulation and settings + from mcdc.object_.simulation import simulation + + settings = simulation.settings + if not settings.use_census_based_tally: + print("Census-based tally is not used, nothing to recombine.") + + # Settings parameters + base_name = settings.output_name + N_census = settings.N_census + N_batch = settings.N_batch + frequency = settings.census_tally_frequency + Nt = frequency * (N_census - 1) + + # Append the tally dataset structure to the main output + main_file = h5py.File(f"{base_name}.h5", "a") + reference_file = h5py.File(f"{base_name}-batch_0-census_0.h5", "r") + tally_group = main_file.create_group("tallies") + for tally in simulation.tallies: + name = f"tallies/{tally.name}" + reference_file.copy(name, tally_group) + reference_file.close() + + # Set the time grid + time_grid = np.zeros(Nt + 1) + for i in range(N_census - 1): + start = settings.census_time[i - 1] if i > 0 else 0.0 + end = settings.census_time[i] + new_grid = np.linspace(start, end, frequency + 1) + offset = i * frequency + 1 + time_grid[offset : offset + frequency] = new_grid[1:] + for tally in simulation.tallies: + name = f"tallies/{tally.name}/grid/time" + replace_dataset(main_file, name, time_grid) + + # Combine the tallies + for tally in simulation.tallies: + # The combined shape + shape = tally.bin_shape + shape[3] = Nt + + for score in tally.scores: + score_name = f"tallies/{tally.name}/{decode_score_type(score, True)}" + + mean = np.zeros(shape) + sdev = np.zeros(shape) + + # Selective squeeze + axes_to_squeeze = [x for x, size in enumerate(shape) if size == 1 and x > 3] + mean = np.squeeze(mean, axis=tuple(axes_to_squeeze)) + sdev = np.squeeze(sdev, axis=tuple(axes_to_squeeze)) + + for i_census in range(N_census - 1): + # Accumulate sum and sum of square + for i_batch in range(N_batch): + file_name = f"{base_name}-batch_{i_batch}-census_{i_census}.h5" + file = h5py.File(file_name, "r") + offset = i_census * frequency + + score = file[f"{score_name}/mean"][()] + mean[:, :, :, offset : offset + frequency] += score + sdev[:, :, :, offset : offset + frequency] += score * score + + file.close() + + # Squeeze + mean = np.squeeze(mean) + sdev = np.squeeze(sdev) + + # Compute statistics + mean /= N_batch + sdev = np.sqrt((sdev / N_batch - np.square(mean)) / (N_batch - 1)) + + replace_dataset(main_file, f"{score_name}/mean", mean) + replace_dataset(main_file, f"{score_name}/sdev", sdev) + + main_file.close() diff --git a/mcdc/mcdc/print_.py b/mcdc/mcdc/print_.py new file mode 100644 index 000000000..7a381d0fd --- /dev/null +++ b/mcdc/mcdc/print_.py @@ -0,0 +1,248 @@ +import numba as nb +import sys +from mpi4py import MPI +from colorama import Fore, Back, Style + +master = MPI.COMM_WORLD.Get_rank() == 0 + + +import numba as nb +import sys + +from colorama import Fore, Style + +import mcdc.mcdc_get as mcdc_get + + +def print_1d_array(arr): + N = len(arr) + if N > 5: + return f"(size={len(arr)}): [{arr[0]:.5g}, {arr[1]:.5g}, ..., {arr[-2]:.5g}, {arr[-1]:.5g}]" + else: + text = f"(size={len(arr)}): [" + for i in range(N): + text += f"{arr[i]:.5g}, " + if N > 0: + text = text[:-2] + text += "]" + return text + + +def print_error(text): + print(Fore.RED + f"[ERROR]: {text}\n") + print(Style.RESET_ALL) + sys.stdout.flush() + sys.exit() + + +def print_warning(text): + print(Fore.YELLOW + f"[WARNING]: {text}\n") + print(Style.RESET_ALL) + sys.stdout.flush() + + +def print_banner(): + print( + "\n" + + r" __ __ ____ __ ____ ____ " + + "\n" + + r" | \/ |/ ___|/ /_ _ \ / ___|" + + "\n" + + r" | |\/| | | /_ / | | | | " + + "\n" + + r" | | | | |___ / /| |_| | |___ " + + "\n" + + r" |_| |_|\____|// |____/ \____|" + + "\n" + ) + sys.stdout.flush() + + +def print_configuration(): + mode = "Python" if nb.config.DISABLE_JIT else "Numba" + mpi_size = MPI.COMM_WORLD.Get_size() + + text = "" + text += f" Mode | {mode}\n" + text += f" MPI Processes | {mpi_size}\n" + print(text) + sys.stdout.flush() + + +def print_eigenvalue_header(simulation): + if simulation["settings"]["use_gyration_radius"]: + print("\n # k GyRad. k (avg) ") + print(" ==== ======= ====== ===================") + else: + print("\n # k k (avg) ") + print(" ==== ======= ===================") + sys.stdout.flush() + + +def print_batch_header(i, N): + print(f"\nBatch {i}/{N}") + sys.stdout.flush() + + +def print_time(tag, t, percent): + if t >= 24 * 60 * 60: + print(" %s | %.2f days (%.1f%%)" % (tag, t / 24 / 60 / 60), percent) + elif t >= 60 * 60: + print(" %s | %.2f hours (%.1f%%)" % (tag, t / 60 / 60, percent)) + elif t >= 60: + print(" %s | %.2f minutes (%.1f%%)" % (tag, t / 60, percent)) + else: + print(" %s | %.2f seconds (%.1f%%)" % (tag, t, percent)) + + +def print_runtime(simulation): + total = simulation["runtime_total"] + preparation = simulation["runtime_preparation"] + simulation = simulation["runtime_simulation"] + output = simulation["runtime_output"] + print("\n Runtime report:") + print_time("Total ", total, 100) + print_time("Preparation", preparation, preparation / total * 100) + print_time("Simulation ", simulation, simulation / total * 100) + print_time("Output ", output, output / total * 100) + print("\n") + sys.stdout.flush() + + +def print_structure(struct): + dtype = struct.dtype + for name in dtype.names: + print(f"{name} = {struct[name]}") + + +# TODO: below is not evaulated yet during the refactor + + +def print_msg(msg): + if master: + print(msg) + sys.stdout.flush() + + +def print_error(msg): + print("ERROR: %s\n" % msg) + sys.stdout.flush() + sys.exit() + + +def print_warning(msg): + if master: + print(Fore.RED + "Warning: %s\n" % msg) + print(Style.RESET_ALL) + sys.stdout.flush() + + +def print_progress(percent, simulation): + if master: + sys.stdout.write("\r") + if not simulation["settings"]["neutron_eigenvalue_mode"]: + if simulation["settings"]["N_census"] == 1: + sys.stdout.write( + " [%-28s] %d%%" % ("=" * int(percent * 28), percent * 100.0) + ) + else: + idx = simulation["idx_census"] + 1 + N = simulation["settings"]["N_census"] + sys.stdout.write( + " Census %i/%i: [%-28s] %d%%" + % (idx, N, "=" * int(percent * 28), percent * 100.0) + ) + else: + if simulation["settings"]["use_gyration_radius"]: + sys.stdout.write( + " [%-40s] %d%%" % ("=" * int(percent * 40), percent * 100.0) + ) + else: + sys.stdout.write( + " [%-32s] %d%%" % ("=" * int(percent * 32), percent * 100.0) + ) + sys.stdout.flush() + + +def print_header_eigenvalue(simulation): + if master: + if simulation["settings"]["use_gyration_radius"]: + print("\n # k GyRad. k (avg) ") + print(" ==== ======= ====== ===================") + else: + print("\n # k k (avg) ") + print(" ==== ======= ===================") + + +def print_header_batch(i, N): + if master: + print(f"\nBatch {i+1}/{N}") + sys.stdout.flush() + + +def print_progress_eigenvalue(simulation, data): + if master: + idx_cycle = simulation["idx_cycle"] + k_eff = simulation["k_eff"] + k_avg = simulation["k_avg_running"] + k_sdv = simulation["k_sdv_running"] + gr = mcdc_get.simulation.gyration_radius(idx_cycle, simulation, data) + if simulation["settings"]["use_progress_bar"]: + sys.stdout.write("\r") + sys.stdout.write("\033[K") + if simulation["settings"]["use_gyration_radius"]: + if not simulation["cycle_active"]: + print(" %-4i %.5f %6.2f" % (idx_cycle + 1, k_eff, gr)) + else: + print( + " %-4i %.5f %6.2f %.5f +/- %.5f" + % (idx_cycle + 1, k_eff, gr, k_avg, k_sdv) + ) + else: + if not simulation["cycle_active"]: + print(" %-4i %.5f" % (idx_cycle + 1, k_eff)) + else: + print( + " %-4i %.5f %.5f +/- %.5f" % (idx_cycle + 1, k_eff, k_avg, k_sdv) + ) + + +def print_runtime(simulation): + t_total = simulation["runtime_total"] + t_preparation = simulation["runtime_preparation"] + t_simulation = simulation["runtime_simulation"] + t_output = simulation["runtime_output"] + if master: + print("\n Runtime report:") + print_time("Total ", t_total, 100) + print_time("Preparation", t_preparation, t_preparation / t_total * 100) + print_time("Simulation ", t_simulation, t_simulation / t_total * 100) + print_time("Output ", t_output, t_output / t_total * 100) + print("\n") + sys.stdout.flush() + + +def print_time(tag, t, percent): + if t >= 24 * 60 * 60: + print(" %s | %.2f days (%.1f%%)" % (tag, t / 24 / 60 / 60), percent) + elif t >= 60 * 60: + print(" %s | %.2f hours (%.1f%%)" % (tag, t / 60 / 60, percent)) + elif t >= 60: + print(" %s | %.2f minutes (%.1f%%)" % (tag, t / 60, percent)) + else: + print(" %s | %.2f seconds (%.1f%%)" % (tag, t, percent)) + + +def print_bank(bank, show_content=False): + tag = bank["tag"] + size = bank["size"] + particles = bank["particles"] + + print("\n=============") + print("Particle bank") + print(" tag :", tag) + print(" size :", size, "of", len(bank["particles"])) + if show_content and size > 0: + for i in range(size): + print(" ", particles[i]) + print("\n") diff --git a/mcdc/mcdc/transport/__init__.py b/mcdc/mcdc/transport/__init__.py new file mode 100644 index 000000000..c2e7b1250 --- /dev/null +++ b/mcdc/mcdc/transport/__init__.py @@ -0,0 +1,4 @@ +import mcdc.transport.geometry as geometry +import mcdc.transport.particle_bank as particle_bank +import mcdc.transport.simulation as simulation +import mcdc.transport.util as util diff --git a/mcdc/mcdc/transport/data.py b/mcdc/mcdc/transport/data.py new file mode 100644 index 000000000..443dc6293 --- /dev/null +++ b/mcdc/mcdc/transport/data.py @@ -0,0 +1,59 @@ +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get + +from mcdc.constant import ( + DATA_POLYNOMIAL, + DATA_TABLE, + INTERPOLATION_LINEAR, + INTERPOLATION_LOG, +) +from mcdc.transport.util import find_bin, linear_interpolation, log_interpolation + + +@njit +def evaluate_data(x, data_base, simulation, data): + data_type = data_base["child_type"] + ID = data_base["child_ID"] + if data_type == DATA_TABLE: + table = simulation["table_data"][ID] + return evaluate_table(x, table, data) + elif data_type == DATA_POLYNOMIAL: + polynomial = simulation["polynomial_data"][ID] + return evaluate_polynomial(x, polynomial, data) + else: + return 0.0 + + +@njit +def evaluate_table(x, table, data): + offset = table["x_offset"] + length = table["x_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.table_data.x_all(table, data) + + idx = find_bin(x, grid) + x1 = grid[idx] + x2 = grid[idx + 1] + y1 = mcdc_get.table_data.y(idx, table, data) + y2 = mcdc_get.table_data.y(idx + 1, table, data) + + if table["interpolation"] == INTERPOLATION_LINEAR: + return linear_interpolation(x, x1, x2, y1, y2) + elif table["interpolation"] == INTERPOLATION_LOG: + return log_interpolation(x, x1, x2, y1, y2) + + +@njit +def evaluate_polynomial(x, polynomial, data): + offset = polynomial["coefficients_offset"] + length = polynomial["coefficients_length"] + coeffs = data[offset : offset + length] + # Above is equivalent to: coeffs = mcdc_get.polynomial_data.coefficients_all(polynomial, data) + + total = 0.0 + for i in range(len(coeffs)): + total += coeffs[i] * x**i + return total diff --git a/mcdc/mcdc/transport/distribution.py b/mcdc/mcdc/transport/distribution.py new file mode 100644 index 000000000..a2bd93e87 --- /dev/null +++ b/mcdc/mcdc/transport/distribution.py @@ -0,0 +1,643 @@ +import math + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.transport.rng as rng + +from mcdc.constant import ( + DISTRIBUTION_EVAPORATION, + DISTRIBUTION_KALBACH_MANN, + DISTRIBUTION_LEVEL_SCATTERING, + DISTRIBUTION_MAXWELLIAN, + DISTRIBUTION_MULTITABLE, + DISTRIBUTION_N_BODY, + DISTRIBUTION_TABULATED, + DISTRIBUTION_TABULATED_ENERGY_ANGLE, + PI, +) +from mcdc.transport.data import evaluate_table +from mcdc.transport.util import find_bin, linear_interpolation + +# ====================================================================================== +# General distribution samplers +# ====================================================================================== + + +@njit +def sample_distribution(E, distribution, rng_state, simulation, data): + return _sample_distribution(E, distribution, rng_state, simulation, data, False) + + +@njit +def sample_distribution_with_scale(E, distribution, rng_state, simulation, data): + return _sample_distribution(E, distribution, rng_state, simulation, data, True) + + +@njit +def _sample_distribution(E, distribution, rng_state, simulation, data, scale): + distribution_type = distribution["child_type"] + ID = distribution["child_ID"] + + if distribution_type == DISTRIBUTION_TABULATED: + table = simulation["tabulated_distributions"][ID] + return sample_tabulated(table, rng_state, data) + + elif distribution_type == DISTRIBUTION_MULTITABLE: + multi_table = simulation["multi_table_distributions"][ID] + return _sample_multi_table(E, rng_state, multi_table, data, scale) + + elif distribution_type == DISTRIBUTION_LEVEL_SCATTERING: + level_scattering = simulation["level_scattering_distributions"][ID] + return sample_level_scattering(E, level_scattering) + + elif distribution_type == DISTRIBUTION_EVAPORATION: + evaporation = simulation["evaporation_distributions"][ID] + return sample_evaporation(E, rng_state, evaporation, simulation, data) + + elif distribution_type == DISTRIBUTION_MAXWELLIAN: + maxwellian = simulation["maxwellian_distributions"][ID] + return sample_maxwellian(E, rng_state, maxwellian, simulation, data) + + # TODO: Should not get here + else: + return -1.0 + + +@njit +def sample_correlated_distribution(E, distribution, rng_state, simulation, data): + return _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, False + ) + + +@njit +def sample_correlated_distribution_with_scale( + E, distribution, rng_state, simulation, data +): + return _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, True + ) + + +@njit +def _sample_correlated_distribution( + E, distribution, rng_state, simulation, data, scale +): + distribution_type = distribution["child_type"] + ID = distribution["child_ID"] + + if distribution_type == DISTRIBUTION_KALBACH_MANN: + kalbach_mann = simulation["kalbach_mann_distributions"][ID] + return sample_kalbach_mann(E, rng_state, kalbach_mann, data) + + elif distribution_type == DISTRIBUTION_TABULATED_ENERGY_ANGLE: + table = simulation["tabulated_energy_angle_distributions"][ID] + return sample_tabulated_energy_angle(E, rng_state, table, data) + + elif distribution_type == DISTRIBUTION_N_BODY: + nbody = simulation["nbody_distributions"][ID] + E_out = sample_tabulated(nbody, rng_state, data) + mu = sample_isotropic_cosine(rng_state) + return E_out, mu + + # TODO: Should not get here + else: + return -1.0, -1.0 + + +# ====================================================================================== +# Distribution samplers +# ====================================================================================== + + +@njit +def sample_uniform(low, high, rng_state): + return low + rng.lcg(rng_state) * (high - low) + + +@njit +def sample_isotropic_cosine(rng_state): + return 2.0 * rng.lcg(rng_state) - 1.0 + + +@njit +def sample_isotropic_direction(rng_state): + # Sample polar cosine and azimuthal angle uniformly + mu = sample_isotropic_cosine(rng_state) + azi = 2.0 * PI * rng.lcg(rng_state) + + # Convert to Cartesian coordinates + c = (1.0 - mu**2) ** 0.5 + y = math.cos(azi) * c + z = math.sin(azi) * c + x = mu + return x, y, z + + +@njit +def sample_direction(polar_cosine, azimuthal, polar_coordinate, rng_state): + # Sample polar cosine and azimuthal angle + mu = sample_uniform(polar_cosine[0], polar_cosine[1], rng_state) + azi = sample_uniform(azimuthal[0], azimuthal[1], rng_state) + + # Apply polar coordinate + wx = polar_coordinate[0] + wy = polar_coordinate[1] + wz = polar_coordinate[2] + if abs(wz) >= 0.999: + inv = 1.0 / math.sqrt(wx * wx + wy * wy) + + ux = -wy * inv + uy = wx * inv + uz = 0.0 + + vx = -wz * wx * inv + vy = -wz * wy * inv + vz = math.sqrt(wx * wx + wy * wy) + else: + # Axis nearly parallel to z + ux, uy, uz = 1.0, 0.0, 0.0 + vx, vy, vz = 0.0, 1.0, 0.0 + + # Rotate into lab frame + s = math.sqrt(max(0.0, 1.0 - mu * mu)) + cphi = math.cos(azi) + sphi = math.sin(azi) + dx = s * cphi * ux + s * sphi * vx + mu * wx + dy = s * cphi * uy + s * sphi * vy + mu * wy + dz = s * cphi * uz + s * sphi * vz + mu * wz + + return dx, dy, dz + + +@njit +def sample_tabulated(table, rng_state, data): + xi = rng.lcg(rng_state) + + offset = table["cdf_offset"] + length = table["cdf_length"] + cdf = data[offset : offset + length] + # Above is equivalent to: cdf = mcdc_get.tabulated_distribution.cdf_all(table, data) + + idx = find_bin(xi, cdf) + cdf_low = mcdc_get.tabulated_distribution.cdf(idx, table, data) + cdf_high = mcdc_get.tabulated_distribution.cdf(idx + 1, table, data) + value_low = mcdc_get.tabulated_distribution.value(idx, table, data) + value_high = mcdc_get.tabulated_distribution.value(idx + 1, table, data) + return linear_interpolation(xi, cdf_low, cdf_high, value_low, value_high) + + +@njit +def sample_pmf(pmf, rng_state, data): + xi = rng.lcg(rng_state) + + offset = pmf["cmf_offset"] + length = pmf["cmf_length"] + cmf = data[offset : offset + length] + # Above is equivalent to: cmf = mcdc_get.pmf_distribution.cmf_all(pmf, data) + + idx = find_bin(xi, cmf) + return mcdc_get.pmf_distribution.value(idx, pmf, data) + + +@njit +def sample_white_direction(nx, ny, nz, rng_state): + # Sample polar cosine + mu = math.sqrt(rng.lcg(rng_state)) + + # Sample azimuthal direction + azi = 2.0 * PI * rng.lcg(rng_state) + cos_azi = math.cos(azi) + sin_azi = math.sin(azi) + Ac = (1.0 - mu**2) ** 0.5 + + if nz != 1.0: + B = (1.0 - nz**2) ** 0.5 + C = Ac / B + + x = nx * mu + (nx * nz * cos_azi - ny * sin_azi) * C + y = ny * mu + (ny * nz * cos_azi + nx * sin_azi) * C + z = nz * mu - cos_azi * Ac * B + + # If dir = 0i + 0j + k, interchange z and y in the formula + else: + B = (1.0 - ny**2) ** 0.5 + C = Ac / B + + x = nx * mu + (nx * ny * cos_azi - nz * sin_azi) * C + z = nz * mu + (nz * ny * cos_azi + nx * sin_azi) * C + y = ny * mu - cos_azi * Ac * B + return x, y, z + + +@njit +def sample_multi_table(E, rng_state, multi_table, data): + return _sample_multi_table(E, rng_state, multi_table, data, False) + + +@njit +def _sample_multi_table(E, rng_state, multi_table, data, scale): + offset = multi_table["grid_offset"] + length = multi_table["grid_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.multi_table_distribution.grid_all(multi_table, data) + + # Edge cases + if E < grid[0]: + idx = 0 + scale = False + elif E > grid[-1]: + idx = len(grid) - 1 + scale = False + else: + # Interpolation factor + idx = find_bin(E, grid) + E0 = grid[idx] + E1 = grid[idx + 1] + f = (E - E0) / (E1 - E0) + + # Min and max values for scaling + val_min = 0.0 + val_max = 1.0 + if scale: + # First table + start = int( + mcdc_get.multi_table_distribution.offset(idx, multi_table, data) + ) + end = int( + mcdc_get.multi_table_distribution.offset(idx + 1, multi_table, data) + ) + val0_min = mcdc_get.multi_table_distribution.value(start, multi_table, data) + val0_max = mcdc_get.multi_table_distribution.value( + end - 1, multi_table, data + ) + + # Second table + start = end + if idx + 2 == len(grid): + end = multi_table["value_length"] + else: + end = int( + mcdc_get.multi_table_distribution.offset(idx + 2, multi_table, data) + ) + val1_min = mcdc_get.multi_table_distribution.value(start, multi_table, data) + val1_max = mcdc_get.multi_table_distribution.value( + end - 1, multi_table, data + ) + + # Both + val_min = val0_min + f * (val1_min - val0_min) + val_max = val0_max + f * (val1_max - val0_max) + + # Sample which table to choose + if rng.lcg(rng_state) < f: + idx += 1 + + # Get the table range + start = int(mcdc_get.multi_table_distribution.offset(idx, multi_table, data)) + if idx + 1 == len(grid): + end = multi_table["value_length"] + else: + end = int(mcdc_get.multi_table_distribution.offset(idx + 1, multi_table, data)) + size = end - start + + # The CDF + offset = multi_table["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: cdf = mcdc_get.multi_table_distribution.cdf_chunk(start, size, multi_table, data) + + # Generate random numbers + xi = rng.lcg(rng_state) + + # Sample bin index + idx = find_bin(xi, cdf) + c = cdf[idx] + + # Get the other values + idx += start # Apply the offset as these are not chunk-extracted like the cdf + p0 = mcdc_get.multi_table_distribution.pdf(idx, multi_table, data) + p1 = mcdc_get.multi_table_distribution.pdf(idx + 1, multi_table, data) + val0 = mcdc_get.multi_table_distribution.value(idx, multi_table, data) + val1 = mcdc_get.multi_table_distribution.value(idx + 1, multi_table, data) + + m = (p1 - p0) / (val1 - val0) + if m == 0.0: + sample = val0 + (xi - c) / p0 + else: + sample = val0 + 1.0 / m * (math.sqrt(p0**2 + 2 * m * (xi - c)) - p0) + + if not scale: + return sample + + # Scale against the bounds + val_low = mcdc_get.multi_table_distribution.value(start, multi_table, data) + val_high = mcdc_get.multi_table_distribution.value(end - 1, multi_table, data) + return val_min + (sample - val_low) / (val_high - val_low) * (val_max - val_min) + + +@njit +def sample_maxwellian(E, rng_state, maxwellian, simulation, data): + # Get nuclear temperature + table = simulation["table_data"][maxwellian["nuclear_temperature_ID"]] + nuclear_temperature = evaluate_table(E, table, data) + restriction_energy = maxwellian["restriction_energy"] + + # Rejection sampling + while True: + xi1 = rng.lcg(rng_state) + xi2 = rng.lcg(rng_state) + xi3 = rng.lcg(rng_state) + cos = math.cos(0.5 * PI * xi3) + cos_square = cos * cos + sample = -nuclear_temperature * (math.log(xi1) + math.log(xi2) * cos_square) + + # Accept sample? + if 0.0 <= sample and sample <= E - restriction_energy: + break + + return sample + + +@njit +def sample_level_scattering(E, level_scattering): + C1 = level_scattering["C1"] + C2 = level_scattering["C2"] + return C2 * (E - C1) + + +@njit +def sample_evaporation(E, rng_state, evaporation, simulation, data): + # Get nuclear temperature + table = simulation["table_data"][evaporation["nuclear_temperature_ID"]] + nuclear_temperature = evaluate_table(E, table, data) + restriction_energy = evaporation["restriction_energy"] + + w = (E - restriction_energy) / nuclear_temperature + g = 1.0 - math.exp(-w) + + # Rejection sampling + while True: + xi1 = rng.lcg(rng_state) + xi2 = rng.lcg(rng_state) + sample = -nuclear_temperature * math.log((1.0 - g * xi1) * (1.0 - g * xi2)) + + # Accept sample? + if 0.0 <= sample and sample <= E - restriction_energy: + break + + return sample + + +@njit +def sample_kalbach_mann(E, rng_state, kalbach_mann, data): + offset = kalbach_mann["energy_offset"] + length = kalbach_mann["energy_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.kalbach_mann_distribution.energy_all(kalbach_mann, data) + + # Random numbers + xi1 = rng.lcg(rng_state) + xi2 = rng.lcg(rng_state) + xi3 = rng.lcg(rng_state) + xi4 = rng.lcg(rng_state) + + # Interpolation factor + idx = find_bin(E, grid) + E0 = grid[idx] + E1 = grid[idx + 1] + f = (E - E0) / (E1 - E0) + + # ================================================================================== + # Min and max energy values for scaling + # ================================================================================== + + # First table + start = int(mcdc_get.kalbach_mann_distribution.offset(idx, kalbach_mann, data)) + end = int(mcdc_get.kalbach_mann_distribution.offset(idx + 1, kalbach_mann, data)) + E0_min = mcdc_get.kalbach_mann_distribution.energy_out(start, kalbach_mann, data) + E0_max = mcdc_get.kalbach_mann_distribution.energy_out(end - 1, kalbach_mann, data) + + # Second table + start = end + if idx + 2 == len(grid): + end = kalbach_mann["energy_length"] + else: + end = int( + mcdc_get.kalbach_mann_distribution.offset(idx + 2, kalbach_mann, data) + ) + E1_min = mcdc_get.kalbach_mann_distribution.energy_out(start, kalbach_mann, data) + E1_max = mcdc_get.kalbach_mann_distribution.energy_out(end - 1, kalbach_mann, data) + + # The combination of the two tables + E_min = E0_min + f * (E1_min - E0_min) + E_max = E0_max + f * (E1_max - E0_max) + + # Sample which table to choose + if xi1 < f: + idx += 1 + + # Get the table range + start = int(mcdc_get.kalbach_mann_distribution.offset(idx, kalbach_mann, data)) + if idx + 1 == len(grid): + end = kalbach_mann["energy_length"] + else: + end = int( + mcdc_get.kalbach_mann_distribution.offset(idx + 1, kalbach_mann, data) + ) + size = end - start + + # The CDF + offset = kalbach_mann["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: cdf = mcdc_get.kalbach_mann_distribution.cdf_chunk(start, size, kalbach_mann, data) + + # Sample bin index + idx = find_bin(xi2, cdf) + c = cdf[idx] + + # Get the other values + idx += start # Apply the offset as these are not chunk-extracted like the cdf + p0 = mcdc_get.kalbach_mann_distribution.pdf(idx, kalbach_mann, data) + p1 = mcdc_get.kalbach_mann_distribution.pdf(idx + 1, kalbach_mann, data) + E0 = mcdc_get.kalbach_mann_distribution.energy_out(idx, kalbach_mann, data) + E1 = mcdc_get.kalbach_mann_distribution.energy_out(idx + 1, kalbach_mann, data) + + # Calculate the outgoing energy (not-scaled) + m = (p1 - p0) / (E1 - E0) + if m == 0.0: + E_hat = E0 + (xi2 - c) / p0 + else: + E_hat = E0 + 1.0 / m * (math.sqrt(p0**2 + 2 * m * (xi2 - c)) - p0) + + # Scale against the bounds + E_low = mcdc_get.kalbach_mann_distribution.energy_out(start, kalbach_mann, data) + E_high = mcdc_get.kalbach_mann_distribution.energy_out(end - 1, kalbach_mann, data) + E_new = E_min + (E_hat - E_low) / (E_high - E_low) * (E_max - E_min) + + # Precompound factor and angular slope + R0 = mcdc_get.kalbach_mann_distribution.precompound_factor(idx, kalbach_mann, data) + R1 = mcdc_get.kalbach_mann_distribution.precompound_factor( + idx + 1, kalbach_mann, data + ) + A0 = mcdc_get.kalbach_mann_distribution.angular_slope(idx, kalbach_mann, data) + A1 = mcdc_get.kalbach_mann_distribution.angular_slope(idx + 1, kalbach_mann, data) + # + mE = (E_hat - E0) / (E1 - E0) + R = R0 + mE * (R1 - R0) + A = A0 + mE * (A1 - A0) + + # Calculate the angular coine + T = (2.0 * xi4 - 1.0) * math.sinh(A) + if xi3 > R: + mu = math.log(T + math.sqrt(T**2 + 1.0)) / A + else: + mu = math.log(xi4 * math.exp(A) + (1.0 - xi4) * math.exp(-A)) / A + + return E_new, mu + + +@njit +def sample_tabulated_energy_angle(E, rng_state, table, data): + offset = table["energy_offset"] + length = table["energy_length"] + grid = data[offset : offset + length] + # Above is equivalent to: grid = mcdc_get.tabulated_energy_angle_distribution.energy_all(table, data) + + # Random numbers + xi1 = rng.lcg(rng_state) + xi2 = rng.lcg(rng_state) + xi3 = rng.lcg(rng_state) + + # Interpolation factor + idx = find_bin(E, grid) + E0 = grid[idx] + E1 = grid[idx + 1] + f = (E - E0) / (E1 - E0) + + # ================================================================================== + # Min and max energy values for scaling + # ================================================================================== + + # First table + start = int(mcdc_get.tabulated_energy_angle_distribution.offset(idx, table, data)) + end = int(mcdc_get.tabulated_energy_angle_distribution.offset(idx + 1, table, data)) + E0_min = mcdc_get.tabulated_energy_angle_distribution.energy_out(start, table, data) + E0_max = mcdc_get.tabulated_energy_angle_distribution.energy_out( + end - 1, table, data + ) + + # Second table + start = end + if idx + 2 == len(grid): + end = table["energy_length"] + else: + end = int( + mcdc_get.tabulated_energy_angle_distribution.offset(idx + 2, table, data) + ) + E1_min = mcdc_get.tabulated_energy_angle_distribution.energy_out(start, table, data) + E1_max = mcdc_get.tabulated_energy_angle_distribution.energy_out( + end - 1, table, data + ) + + # The combination of the two tables + E_min = E0_min + f * (E1_min - E0_min) + E_max = E0_max + f * (E1_max - E0_max) + + # Sample which table to choose + if xi1 < f: + idx += 1 + + # Get the table range + start = int(mcdc_get.tabulated_energy_angle_distribution.offset(idx, table, data)) + if idx + 1 == len(grid): + end = table["energy_length"] + else: + end = int( + mcdc_get.tabulated_energy_angle_distribution.offset(idx + 1, table, data) + ) + size = end - start + + # The CDF + offset = table["cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: + # cdf = mcdc_get.tabulated_energy_angle_distribution.cdf_chunk( + # start, size, table, data + # ) + + # Sample bin index + idx = find_bin(xi2, cdf) + c = cdf[idx] + + # Get the other values + idx_local = ( + idx + start + ) # Apply the offset as these are not chunk-extracted like the cdf + p0 = mcdc_get.tabulated_energy_angle_distribution.pdf(idx_local, table, data) + p1 = mcdc_get.tabulated_energy_angle_distribution.pdf(idx_local + 1, table, data) + E0 = mcdc_get.tabulated_energy_angle_distribution.energy_out(idx_local, table, data) + E1 = mcdc_get.tabulated_energy_angle_distribution.energy_out( + idx_local + 1, table, data + ) + + # Calculate the outgoing energy (not-scaled) + m = (p1 - p0) / (E1 - E0) + if m == 0.0: + E_hat = E0 + (xi2 - c) / p0 + else: + E_hat = E0 + 1.0 / m * (math.sqrt(p0**2 + 2 * m * (xi2 - c)) - p0) + + # Scale against the bounds + E_low = mcdc_get.tabulated_energy_angle_distribution.energy_out(start, table, data) + E_high = mcdc_get.tabulated_energy_angle_distribution.energy_out( + end - 1, table, data + ) + E_new = E_min + (E_hat - E_low) / (E_high - E_low) * (E_max - E_min) + + # Determine angular table index + if xi2 - cdf[idx] > cdf[idx + 1] - xi2: + idx += 1 + + # Get the angular table range + start = int( + mcdc_get.tabulated_energy_angle_distribution.cosine_offset_(idx, table, data) + ) + if idx + 1 == len(grid): + end = table["cosine_length"] + else: + end = int( + mcdc_get.tabulated_energy_angle_distribution.cosine_offset_( + idx + 1, table, data + ) + ) + size = end - start + + # The CDF + offset = table["cosine_cdf_offset"] + cdf = data[start + offset : start + offset + size] + # Above is equivalent to: + # cdf = mcdc_get.tabulated_energy_angle_distribution.cosine_cdf_chunk( + # start, size, table, data + # ) + + # Sample bin index + idx = find_bin(xi3, cdf) + c = cdf[idx] + + # Get the other values + idx += start # Apply the offset as these are not chunk-extracted like the cdf + p0 = mcdc_get.tabulated_energy_angle_distribution.cosine_pdf(idx, table, data) + p1 = mcdc_get.tabulated_energy_angle_distribution.cosine_pdf(idx + 1, table, data) + mu0 = mcdc_get.tabulated_energy_angle_distribution.cosine(idx, table, data) + mu1 = mcdc_get.tabulated_energy_angle_distribution.cosine(idx + 1, table, data) + + m = (p1 - p0) / (mu1 - mu0) + if m == 0.0: + mu = mu0 + (xi3 - c) / p0 + else: + mu = mu0 + 1.0 / m * (math.sqrt(p0**2 + 2 * m * (xi3 - c)) - p0) + + return E_new, mu diff --git a/mcdc/mcdc/transport/geometry/__init__.py b/mcdc/mcdc/transport/geometry/__init__.py new file mode 100644 index 000000000..a785177fd --- /dev/null +++ b/mcdc/mcdc/transport/geometry/__init__.py @@ -0,0 +1,11 @@ +import mcdc.transport.geometry.interface as interface + +from .interface import ( + inspect_geometry, + locate_particle, + get_cell, + check_cell, + distance_to_nearest_surface, + surface_crossing, + check_coincidence, +) diff --git a/mcdc/mcdc/transport/geometry/interface.py b/mcdc/mcdc/transport/geometry/interface.py new file mode 100644 index 000000000..0de3f79aa --- /dev/null +++ b/mcdc/mcdc/transport/geometry/interface.py @@ -0,0 +1,475 @@ +import math +import numpy as np + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.literals as literals +import mcdc.transport.mesh as mesh +import mcdc.transport.physics as physics +import mcdc.transport.tally as tally_module +import mcdc.transport.util as util + +from mcdc.constant import * +from mcdc.transport.geometry.surface import get_distance, check_sense, reflect + +# ====================================================================================== +# Geometry inspection +# ====================================================================================== + + +@njit +def inspect_geometry(particle_container, simulation, data): + """ + Full geometry inspection of the particle: + - Set particle top cell and material IDs (if not lost) + - Set surface ID (if surface hit) + - Set particle boundary event (surface or lattice crossing, or lost) + - Return distance to boundary (surface or lattice) + """ + particle = particle_container[0] + + # Store particle global coordinate + # (particle will be temporarily translated and rotated) + x_global = particle["x"] + y_global = particle["y"] + z_global = particle["z"] + t_global = particle["t"] + ux_global = particle["ux"] + uy_global = particle["uy"] + uz_global = particle["uz"] + speed = physics.particle_speed(particle_container, simulation, data) + + # Default returns + distance = INF + event = EVENT_NONE + + # Find top cell from root universe if unknown + if particle["cell_ID"] == -1: + particle["cell_ID"] = get_cell( + particle_container, UNIVERSE_ROOT, simulation, data + ) + + # Particle is lost? + if particle["cell_ID"] == -1: + event = EVENT_LOST + + # The top cell + cell = simulation["cells"][particle["cell_ID"]] + + # Recursively check cells until material cell is found (or the particle is lost) + while event != EVENT_LOST: + # Distance to nearest surface + d_surface, surface_ID = distance_to_nearest_surface( + particle_container, cell, simulation, data + ) + + # Check if smaller + if d_surface < distance - COINCIDENCE_TOLERANCE: + distance = d_surface + event = EVENT_SURFACE_CROSSING + particle["surface_ID"] = surface_ID + + # Check if coincident + elif check_coincidence(d_surface, distance): + # Add event if not there yet + if not event & EVENT_SURFACE_CROSSING: + event += EVENT_SURFACE_CROSSING + particle["surface_ID"] = surface_ID + # If surface crossing is already there, prioritize the outer surface ID + + # Material cell? + if cell["fill_type"] == FILL_MATERIAL: + particle["material_ID"] = cell["fill_ID"] + break + + else: + # Cell is filled with universe or lattice + + # Apply translation + if cell["fill_translated"]: + particle["x"] -= cell["translation"][0] + particle["y"] -= cell["translation"][1] + particle["z"] -= cell["translation"][2] + + # Apply rotation + if cell["fill_rotated"]: + _rotate_particle(particle_container, cell["rotation"]) + + # Universe cell? + if cell["fill_type"] == FILL_UNIVERSE: + # Get universe ID + universe_ID = cell["fill_ID"] + + # Lattice cell? + elif cell["fill_type"] == FILL_LATTICE: + # Get lattice + lattice = simulation["lattices"][cell["fill_ID"]] + + # Distance to lattice grid + d_lattice = mesh.uniform.get_crossing_distance( + particle_container, speed, lattice + ) + + # Check if smaller + if d_lattice < distance - COINCIDENCE_TOLERANCE: + distance = d_lattice + event = EVENT_LATTICE_CROSSING + particle["surface_ID"] = -1 + + # Check if coincident + if check_coincidence(d_lattice, distance): + # Add event if not there yet + if not event & EVENT_LATTICE_CROSSING: + event += EVENT_LATTICE_CROSSING + + # Get universe + ix, iy, iz = mesh.uniform.get_indices(particle_container, lattice) + if ix == -1 or iy == -1 or iz == -1: + event = EVENT_LOST + continue + universe_ID = int( + mcdc_get.lattice.universe_IDs(ix, iy, iz, lattice, data) + ) + + # Lattice-translate the particle + particle["x"] -= lattice["x0"] + (ix + 0.5) * lattice["dx"] + particle["y"] -= lattice["y0"] + (iy + 0.5) * lattice["dy"] + particle["z"] -= lattice["z0"] + (iz + 0.5) * lattice["dz"] + + # Get inner cell + cell_ID = get_cell(particle_container, universe_ID, simulation, data) + if cell_ID > -1: + cell = simulation["cells"][cell_ID] + else: + event = EVENT_LOST + + # Reassign the global coordinate + particle["x"] = x_global + particle["y"] = y_global + particle["z"] = z_global + particle["t"] = t_global + particle["ux"] = ux_global + particle["uy"] = uy_global + particle["uz"] = uz_global + + # Report lost particle + if event == EVENT_LOST: + report_lost_particle(particle_container, simulation) + + # Assign particle event + particle["event"] = event + + return distance + + +@njit +def locate_particle(particle_container, simulation, data): + """ + Set particle cell and material IDs + Return False if particle is lost + + This is similar to inspect_geometry, except that distance to nearest surface + or/and lattice grid and the respective boundary event are not determined. + """ + particle = particle_container[0] + + # Store particle global coordinate + # (particle will be temporarily translated and rotated) + x_global = particle["x"] + y_global = particle["y"] + z_global = particle["z"] + t_global = particle["t"] + ux_global = particle["ux"] + uy_global = particle["uy"] + uz_global = particle["uz"] + + particle_is_lost = False + + # Find top cell from root universe if unknown + if particle["cell_ID"] == -1: + particle["cell_ID"] = get_cell( + particle_container, UNIVERSE_ROOT, simulation, data + ) + + # Particle is lost? + if particle["cell_ID"] == -1: + particle_is_lost = True + + # The top cell + cell = simulation["cells"][particle["cell_ID"]] + + # Recursively check cells until material cell is found (or the particle is lost) + while not particle_is_lost: + # Material cell? + if cell["fill_type"] == FILL_MATERIAL: + particle["material_ID"] = cell["fill_ID"] + break + + else: + # Cell is filled with universe or lattice + + # Apply translation + if cell["fill_translated"]: + particle["x"] -= cell["translation"][0] + particle["y"] -= cell["translation"][1] + particle["z"] -= cell["translation"][2] + + # Apply rotation + if cell["fill_rotated"]: + _rotate_particle(particle_container, cell["rotation"]) + + # Universe cell? + if cell["fill_type"] == FILL_UNIVERSE: + # Get universe ID + universe_ID = cell["fill_ID"] + + # Lattice cell? + elif cell["fill_type"] == FILL_LATTICE: + # Get lattice + lattice = simulation["lattices"][cell["fill_ID"]] + + # Get universe + ix, iy, iz = mesh.uniform.get_indices(particle_container, lattice) + if ix == -1 or iy == -1 or iz == -1: + particle_is_lost = True + continue + universe_ID = int( + mcdc_get.lattice.universe_IDs(ix, iy, iz, lattice, data) + ) + + # Lattice-translate the particle + particle["x"] -= lattice["x0"] + (ix + 0.5) * lattice["dx"] + particle["y"] -= lattice["y0"] + (iy + 0.5) * lattice["dy"] + particle["z"] -= lattice["z0"] + (iz + 0.5) * lattice["dz"] + + # Get inner cell + cell_ID = get_cell(particle_container, universe_ID, simulation, data) + if cell_ID > -1: + cell = simulation["cells"][cell_ID] + else: + particle_is_lost = True + + # Reassign the global coordinate + particle["x"] = x_global + particle["y"] = y_global + particle["z"] = z_global + particle["t"] = t_global + particle["ux"] = ux_global + particle["uy"] = uy_global + particle["uz"] = uz_global + + # Report lost particle + if particle_is_lost: + report_lost_particle(particle_container, simulation) + + return not particle_is_lost + + +@njit +def _rotate_particle(particle_container, rotation): + # Particle initial coordinate + particle = particle_container[0] + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Rotation matrix + xx, xy, xz, yx, yy, yz, zx, zy, zz = _rotation_matrix(rotation) + + # Rotate + x_rotated = x * xx + y * xy + z * xz + y_rotated = x * yx + y * yy + z * yz + z_rotated = x * zx + y * zy + z * zz + ux_rotated = ux * xx + uy * xy + uz * xz + uy_rotated = ux * yx + uy * yy + uz * yz + uz_rotated = ux * zx + uy * zy + uz * zz + + # Assign the rotated coordinate + particle["x"] = x_rotated + particle["y"] = y_rotated + particle["z"] = z_rotated + particle["ux"] = ux_rotated + particle["uy"] = uy_rotated + particle["uz"] = uz_rotated + + +@njit +def _rotation_matrix(rotation): + phi = rotation[0] + theta = rotation[1] + psi = rotation[2] + + xx = math.cos(theta) * math.cos(psi) + xy = -math.cos(phi) * math.sin(psi) + math.sin(phi) * math.sin(theta) * math.cos( + psi + ) + xz = math.sin(phi) * math.sin(psi) + math.cos(phi) * math.sin(theta) * math.cos(psi) + + yx = math.cos(theta) * math.sin(psi) + yy = math.cos(phi) * math.cos(psi) + math.sin(phi) * math.sin(theta) * math.sin(psi) + yz = -math.sin(phi) * math.cos(psi) + math.cos(phi) * math.sin(theta) * math.sin( + psi + ) + + zx = -math.sin(theta) + zy = math.sin(phi) * math.cos(theta) + zz = math.cos(phi) * math.cos(theta) + + return xx, xy, xz, yx, yy, yz, zx, zy, zz + + +# ====================================================================================== +# Particle locator +# ====================================================================================== + + +@njit +def get_cell(particle_container, universe_ID, simulation, data): + """ + Find and return particle cell ID in the given universe + Return -1 if particle is lost + """ + particle = particle_container[0] + universe = simulation["universes"][universe_ID] + + # Check over all cells in the universe + for i in range(universe["N_cell"]): + cell_ID = int(mcdc_get.universe.cell_IDs(i, universe, data)) + cell = simulation["cells"][cell_ID] + if check_cell(particle_container, cell, simulation, data): + return cell_ID + + # Particle is not found + return -1 + + +@njit +def check_cell(particle_container, cell, simulation, data): + """ + Check if the particle is inside the cell + """ + particle = particle_container[0] + + # Access RPN data + N_token = cell["region_RPN_tokens_length"] + if N_token == 0: + return True + + # Create local value array + value = util.local_array(literals.rpn_evaluation_buffer_size(), np.bool_) + N_value = 0 + + # Particle parameters + speed = physics.particle_speed(particle_container, simulation, data) + + # March forward through RPN tokens + for idx in range(N_token): + token = int(mcdc_get.cell.region_RPN_tokens(idx, cell, data)) + + if token >= 0: + surface = simulation["surfaces"][token] + value[N_value] = check_sense(particle_container, speed, surface, data) + N_value += 1 + + elif token == BOOL_NOT: + value[N_value - 1] = not value[N_value - 1] + + elif token == BOOL_AND: + value[N_value - 2] = value[N_value - 2] & value[N_value - 1] + N_value -= 1 + + elif token == BOOL_OR: + value[N_value - 2] = value[N_value - 2] | value[N_value - 1] + N_value -= 1 + + return value[0] + + +@njit +def report_lost_particle(particle_container, simulation): + """ + Report lost particle and terminate it + """ + particle = particle_container[0] + + x = particle["x"] + y = particle["y"] + z = particle["z"] + t = particle["t"] + idx_batch = simulation["idx_batch"] + idx_census = simulation["idx_census"] + idx_work = simulation["idx_work"] + print("A particle is lost at (", x, y, z, t, ")") + print(" (batch/census/work) indices: (", idx_batch, idx_census, idx_work, ")") + particle["alive"] = False + + +# ====================================================================================== +# Nearest distance search +# ====================================================================================== + + +@njit +def distance_to_nearest_surface(particle_container, cell, simulation, data): + """ + Determine the nearest cell surface and the distance to it + """ + distance = INF + surface_ID = -1 + + # Particle parameters + speed = physics.particle_speed(particle_container, simulation, data) + + # Iterate over all surfaces and find the minimum distance + for i in range(cell["N_surface"]): + candidate_surface_ID = int(mcdc_get.cell.surface_IDs(i, cell, data)) + surface = simulation["surfaces"][candidate_surface_ID] + d = get_distance(particle_container, speed, surface, data) + if d < distance: + distance = d + surface_ID = surface["ID"] + + return distance, surface_ID + + +@njit +def surface_crossing(P_arr, simulation, data): + P = P_arr[0] + + # Apply BC + surface = simulation["surfaces"][P["surface_ID"]] + BC = surface["boundary_condition"] + if BC == BC_VACUUM: + P["alive"] = False + elif BC == BC_REFLECTIVE: + reflect(P_arr, surface) + + # Score tally + for i in range(surface["N_tally"]): + tally_ID = int(mcdc_get.surface.tally_IDs(i, surface, data)) + tally = simulation["surface_tallies"][tally_ID] + tally_module.score.surface_tally(P_arr, surface, tally, simulation, data) + + # Need to check new cell later? + if P["alive"] and not BC == BC_REFLECTIVE: + P["cell_ID"] = -1 + P["material_ID"] = -1 + + +# ====================================================================================== +# Miscellanies +# ====================================================================================== + + +@njit +def check_coincidence(value_1, value_2): + """ + Check if two values are within coincidence tolerance + """ + return abs(value_1 - value_2) < COINCIDENCE_TOLERANCE diff --git a/mcdc/mcdc/transport/geometry/surface/__init__.py b/mcdc/mcdc/transport/geometry/surface/__init__.py new file mode 100644 index 000000000..5ef475f8e --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/__init__.py @@ -0,0 +1,7 @@ +from .interface import ( + check_sense, + evaluate, + get_normal_component, + reflect, + get_distance, +) diff --git a/mcdc/mcdc/transport/geometry/surface/cylinder_x.py b/mcdc/mcdc/transport/geometry/surface/cylinder_x.py new file mode 100644 index 000000000..a186d0951 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/cylinder_x.py @@ -0,0 +1,123 @@ +""" +Cylinder-X: Infinite cylinder parallel to the x-axis + +f(y, z) = yy + zz + Hy + Iz + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + y = particle["y"] + z = particle["z"] + + # Surface parameters + H = surface["H"] + I = surface["I"] + J = surface["J"] + + return y**2 + z**2 + H * y + I * z + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle parameters + uy = particle["uy"] + uz = particle["uz"] + + # Surface normal + dy = 2 * particle["y"] + surface["H"] + dz = 2 * particle["z"] + surface["I"] + norm = (dy**2 + dz**2) ** 0.5 + ny = dy / norm + nz = dz / norm + + # Reflect + c = 2.0 * (ny * uy + nz * uz) + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Surface normal + dy = 2 * particle["y"] + surface["H"] + dz = 2 * particle["z"] + surface["I"] + norm = (dy**2 + dz**2) ** 0.5 + ny = dy / norm + nz = dz / norm + + # Particle parameters + uy = particle["uy"] + uz = particle["uz"] + + return ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + y = particle["y"] + z = particle["z"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + H = surface["H"] + I = surface["I"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + a = uy * uy + uz * uz + b = 2 * (y * uy + z * uz) + H * uy + I * uz + c = f + + determinant = b * b - 4.0 * a * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 * a + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/mcdc/transport/geometry/surface/cylinder_y.py b/mcdc/mcdc/transport/geometry/surface/cylinder_y.py new file mode 100644 index 000000000..3d8cdc6e5 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/cylinder_y.py @@ -0,0 +1,123 @@ +""" +Cylinder-Y: Infinite cylinder parallel to the y-axis + +f(x, z) = xx + zz + Gx + Iz + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + z = particle["z"] + + # Surface parameters + G = surface["G"] + I = surface["I"] + J = surface["J"] + + return x**2 + z**2 + G * x + I * z + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle parameters + ux = particle["ux"] + uz = particle["uz"] + + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dz = 2 * particle["z"] + surface["I"] + norm = (dx**2 + dz**2) ** 0.5 + nx = dx / norm + nz = dz / norm + + # Reflect + c = 2.0 * (nx * ux + nz * uz) + particle["ux"] -= c * nx + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dz = 2 * particle["z"] + surface["I"] + norm = (dx**2 + dz**2) ** 0.5 + nx = dx / norm + nz = dz / norm + + # Particle parameters + ux = particle["ux"] + uz = particle["uz"] + + return nx * ux + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + z = particle["z"] + ux = particle["ux"] + uz = particle["uz"] + + # Surface coefficients + G = surface["G"] + I = surface["I"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + a = ux * ux + uz * uz + b = 2 * (x * ux + z * uz) + G * ux + I * uz + c = f + + determinant = b * b - 4.0 * a * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 * a + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/mcdc/transport/geometry/surface/cylinder_z.py b/mcdc/mcdc/transport/geometry/surface/cylinder_z.py new file mode 100644 index 000000000..c3f019a59 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/cylinder_z.py @@ -0,0 +1,123 @@ +""" +Cylinder-Z: Infinite cylinder parallel to the z-axis + +f(x, y) = xx + yy + Gx + Hy + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + y = particle["y"] + + # Surface parameters + G = surface["G"] + H = surface["H"] + J = surface["J"] + + return x**2 + y**2 + G * x + H * y + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dy = 2 * particle["y"] + surface["H"] + norm = (dx**2 + dy**2) ** 0.5 + nx = dx / norm + ny = dy / norm + + # Reflect + c = 2.0 * (nx * ux + ny * uy) + particle["ux"] -= c * nx + particle["uy"] -= c * ny + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dy = 2 * particle["y"] + surface["H"] + norm = (dx**2 + dy**2) ** 0.5 + nx = dx / norm + ny = dy / norm + + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + + return nx * ux + ny * uy + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + ux = particle["ux"] + uy = particle["uy"] + + # Surface coefficients + G = surface["G"] + H = surface["H"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + a = ux * ux + uy * uy + b = 2 * (x * ux + y * uy) + G * ux + H * uy + c = f + + determinant = b * b - 4.0 * a * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 * a + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/mcdc/transport/geometry/surface/interface.py b/mcdc/mcdc/transport/geometry/surface/interface.py new file mode 100644 index 000000000..e3adf14d7 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/interface.py @@ -0,0 +1,422 @@ +""" +Surface operations based on the quadric equation: + f(x,y,z) = Axx + Byy + Czz + Dxy + Exz + Fyz + Gx + Hy + Iz + J +""" + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.transport.geometry.surface.plane_x as plane_x +import mcdc.transport.geometry.surface.plane_y as plane_y +import mcdc.transport.geometry.surface.plane_z as plane_z +import mcdc.transport.geometry.surface.plane as plane +import mcdc.transport.geometry.surface.cylinder_x as cylinder_x +import mcdc.transport.geometry.surface.cylinder_y as cylinder_y +import mcdc.transport.geometry.surface.cylinder_z as cylinder_z +import mcdc.transport.geometry.surface.sphere as sphere +import mcdc.transport.geometry.surface.quadric as quadric +import mcdc.transport.geometry.surface.torus_z as torus_z + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + COINCIDENCE_TOLERANCE_TIME, + INF, + SURFACE_PLANE_X, + SURFACE_PLANE_Y, + SURFACE_PLANE_Z, + SURFACE_PLANE, + SURFACE_CYLINDER_X, + SURFACE_CYLINDER_Y, + SURFACE_CYLINDER_Z, + SURFACE_CYLINDER, + SURFACE_SPHERE, + SURFACE_QUADRIC, + SURFACE_CONE_X, + SURFACE_CONE_Y, + SURFACE_CONE_Z, + SURFACE_TORUS_Z, +) +from mcdc.transport.util import find_bin_with_rules + + +@njit +def check_sense(particle_container, speed, surface, data): + """ + Check on which side of the surface the particle is + - Return True if on positive side + - Return False otherwise + Particle direction and speed are used to tiebreak coincidence. + """ + particle = particle_container[0] + result = evaluate(particle_container, surface, data) + + # Check if coincident on the surface + if abs(result) < COINCIDENCE_TOLERANCE: + # Determine sense based on the direction + return ( + get_normal_component(particle_container, speed, surface, data) + > 0.0 # TODO: Do we need to include COINCIDENCE TOLERANCE here? + ) + + return result > 0.0 + + +@njit +def evaluate(particle_container, surface, data): + """ + Evaluate the surface equation wrt the particle coordinate + """ + particle = particle_container[0] + if surface["moving"]: + # Temporarily translate particle position + x_original = particle["x"] + y_original = particle["y"] + z_original = particle["z"] + idx = _get_move_idx(particle["t"], surface, data) + _translate_particle_position(particle_container, surface, idx, data) + + if surface["linear"]: + if surface["type"] == SURFACE_PLANE_X: + result = plane_x.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Y: + result = plane_y.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Z: + result = plane_z.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_PLANE: + result = plane.evaluate(particle_container, surface) + else: + if surface["type"] == SURFACE_CYLINDER_X: + result = cylinder_x.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Y: + result = cylinder_y.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Z: + result = cylinder_z.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + result = quadric.evaluate(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + result = quadric.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + result = sphere.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_QUADRIC: + result = quadric.evaluate(particle_container, surface) + elif surface["type"] == SURFACE_TORUS_Z: + result = torus_z.evaluate(particle_container, surface) + + if surface["moving"]: + # Restore particle position + particle["x"] = x_original + particle["y"] = y_original + particle["z"] = z_original + + return result + + +@njit +def get_normal_component(particle_container, speed, surface, data): + """ + Get the surface outward-normal component of the particle + This is the dot product of the particle and the surface outward-normal directions. + Particle speed is needed if the surface is moving to get the relative direction. + """ + particle = particle_container[0] + if surface["moving"]: + # Temporarily translate particle parameters + x_original = particle["x"] + y_original = particle["y"] + z_original = particle["z"] + ux_original = particle["ux"] + uy_original = particle["uy"] + uz_original = particle["uz"] + idx = _get_move_idx(particle["t"], surface, data) + _translate_particle_position(particle_container, surface, idx, data) + _translate_particle_direction(particle_container, speed, surface, idx, data) + + if surface["linear"]: + if surface["type"] == SURFACE_PLANE_X: + result = plane_x.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Y: + result = plane_y.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Z: + result = plane_z.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_PLANE: + result = plane.get_normal_component(particle_container, surface) + else: + if surface["type"] == SURFACE_CYLINDER_X: + result = cylinder_x.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Y: + result = cylinder_y.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Z: + result = cylinder_z.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + result = quadric.get_normal_component(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + result = quadric.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + result = sphere.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_QUADRIC: + result = quadric.get_normal_component(particle_container, surface) + elif surface["type"] == SURFACE_TORUS_Z: + result = torus_z.get_normal_component(particle_container, surface) + + if surface["moving"]: + # Restore particle parameters + particle["x"] = x_original + particle["y"] = y_original + particle["z"] = z_original + particle["ux"] = ux_original + particle["uy"] = uy_original + particle["uz"] = uz_original + + return result + + +@njit +def reflect(particle_container, surface): + """ + Reflect the particle off the surface + """ + particle = particle_container[0] + if surface["linear"]: + if surface["type"] == SURFACE_PLANE_X: + return plane_x.reflect(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Y: + return plane_y.reflect(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Z: + return plane_z.reflect(particle_container, surface) + elif surface["type"] == SURFACE_PLANE: + return plane.reflect(particle_container, surface) + else: + if surface["type"] == SURFACE_CYLINDER_X: + return cylinder_x.reflect(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Y: + return cylinder_y.reflect(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Z: + return cylinder_z.reflect(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + return quadric.reflect(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + return quadric.reflect(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + return sphere.reflect(particle_container, surface) + elif surface["type"] == SURFACE_QUADRIC: + return quadric.reflect(particle_container, surface) + elif surface["type"] == SURFACE_TORUS_Z: + return torus_z.reflect(particle_container, surface) + + +@njit +def get_distance(particle_container, speed, surface, data): + """ + Get particle distance to surface + + Particle speed is needed if the surface is moving. + """ + particle = particle_container[0] + if surface["moving"]: + return _get_distance_moving(particle_container, speed, surface, data) + else: + return _get_distance_static(particle_container, surface) + + +@njit +def _get_distance_static(particle_container, surface): + """ + Get particle distance to static surface + """ + particle = particle_container[0] + if surface["linear"]: + if surface["type"] == SURFACE_PLANE_X: + return plane_x.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Y: + return plane_y.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_PLANE_Z: + return plane_z.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_PLANE: # SHOULD BE REVIEWED + return plane.get_distance(particle_container, surface) + else: + return INF + else: + if surface["type"] == SURFACE_CYLINDER_X: + return cylinder_x.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Y: + return cylinder_y.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER_Z: + return cylinder_z.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_CYLINDER: + return quadric.get_distance(particle_container, surface) + elif ( + surface["type"] == SURFACE_QUADRIC + or surface["type"] == SURFACE_CONE_X + or surface["type"] == SURFACE_CONE_Y + or surface["type"] == SURFACE_CONE_Z + ): + return quadric.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_SPHERE: + return sphere.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_QUADRIC: + return quadric.get_distance(particle_container, surface) + elif surface["type"] == SURFACE_TORUS_Z: + return torus_z.get_distance(particle_container, surface) + + +@njit +def _get_distance_moving(particle_container, speed, surface, data): + """ + Get particle distance to moving surface + """ + particle = particle_container[0] + # Store original particle parameters (will be temporarily changed) + x_original = particle["x"] + y_original = particle["y"] + z_original = particle["z"] + ux_original = particle["ux"] + uy_original = particle["uy"] + uz_original = particle["uz"] + t_original = particle["t"] + + # Move interval index + idx = _get_move_idx(particle["t"], surface, data) + + # Distance accumulator + total_distance = 0.0 + + # Evaluate the current and the subsequent intervals until intersecting + while idx < surface["N_move"]: + # Translate particle position and direction + _translate_particle_position(particle_container, surface, idx, data) + _translate_particle_direction(particle_container, speed, surface, idx, data) + + # Get distance + distance = _get_distance_static(particle_container, surface) + + # Intersection within the interval? + distance_time = distance / speed + dt = mcdc_get.surface.move_time_grid(idx + 1, surface, data) - particle["t"] + if distance_time < dt: + # Restore particle parameters + particle["x"] = x_original + particle["y"] = y_original + particle["z"] = z_original + particle["ux"] = ux_original + particle["uy"] = uy_original + particle["uz"] = uz_original + particle["t"] = t_original + + # Return the total distance + return total_distance + distance + + # Accumulate distance + total_distance += dt * speed + + # Modify the particle + particle["x"] = x_original + total_distance * ux_original + particle["y"] = y_original + total_distance * uy_original + particle["z"] = z_original + total_distance * uz_original + particle["ux"] = ux_original + particle["uy"] = uy_original + particle["uz"] = uz_original + particle["t"] = mcdc_get.surface.move_time_grid(idx + 1, surface, data) + + # Check next interval + idx += 1 + + # Restore particle parameters + particle["x"] = x_original + particle["y"] = y_original + particle["z"] = z_original + particle["ux"] = ux_original + particle["uy"] = uy_original + particle["uz"] = uz_original + particle["t"] = t_original + + # No intersection + return INF + + +# ====================================================================================== +# Private +# ====================================================================================== + + +@njit +def _get_move_idx(t, surface, data): + """ + Get moving interval index wrt the given time + """ + time_grid = data[ + surface["move_time_grid_offset"] : ( + surface["move_time_grid_offset"] + surface["N_move_grid"] + ) + ] + # Above is equivalent to: time_grid = mcdc_get.surface.move_time_grid_all(surface, data) + tolerance = COINCIDENCE_TOLERANCE_TIME + go_lower = False + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower) + + # Coinciding cases + if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: + idx += 1 + + return idx + + +@njit +def _translate_particle_position(particle_container, surface, idx, data): + """ + Translate particle position wrt the given surface moving interval index + """ + particle = particle_container[0] + + # Surface move translations + start = surface["move_translations_offset"] + idx * 3 + trans_0 = data[start : start + 3] + # Above is equivalent to: trans_0 = mcdc_get.surface.move_translations_vector(idx, surface, data) + + # Surface move velocities + start = surface["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.surface.move_velocities_vector(idx, surface, data) + + # Surface move time grid + time_0 = mcdc_get.surface.move_time_grid(idx, surface, data) + + # Translate the particle + t_local = particle["t"] - time_0 + particle["x"] -= trans_0[0] + V[0] * t_local + particle["y"] -= trans_0[1] + V[1] * t_local + particle["z"] -= trans_0[2] + V[2] * t_local + + +@njit +def _translate_particle_direction(particle_container, speed, surface, idx, data): + """ + Translate particle direction wrt the given surface moving interval index + """ + particle = particle_container[0] + + # Surface move velocities + start = surface["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.surface.move_velocities_vector(idx, surface, data) + + # Translate the particle + particle["ux"] -= V[0] / speed + particle["uy"] -= V[1] / speed + particle["uz"] -= V[2] / speed diff --git a/mcdc/mcdc/transport/geometry/surface/plane.py b/mcdc/mcdc/transport/geometry/surface/plane.py new file mode 100644 index 000000000..a33c24a88 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/plane.py @@ -0,0 +1,88 @@ +""" +Plane: General linear surface + +f(x, y, z) = Gx + Hy + Iz + J +""" + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + y = particle["y"] + z = particle["z"] + + # Surface parameters + G = surface["G"] + H = surface["H"] + I = surface["I"] + J = surface["J"] + + return G * x + H * y + I * z + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface normal + nx = surface["G"] + ny = surface["H"] + nz = surface["I"] + + # Reflect + c = 2.0 * (nx * ux + ny * uy + nz * uz) + particle["ux"] -= c * nx + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Surface normal + nx = surface["G"] + ny = surface["H"] + nz = surface["I"] + + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + return nx * ux + ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Parallel? + normal_component = get_normal_component(particle_container, surface) + if abs(normal_component) == 0.0: + return INF + + # Coincident? + f = evaluate(particle_container, surface) + if abs(f) < COINCIDENCE_TOLERANCE: + return INF + + # Calculate distance + distance = -f / normal_component + + # Moving away? + if distance < 0.0: + return INF + + return distance diff --git a/mcdc/mcdc/transport/geometry/surface/plane_x.py b/mcdc/mcdc/transport/geometry/surface/plane_x.py new file mode 100644 index 000000000..d442326aa --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/plane_x.py @@ -0,0 +1,53 @@ +""" +Plane-X: Plane perpendicular to the x-axis + +f(x) = x + J +""" + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + return particle["x"] + surface["J"] + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + particle["ux"] = -particle["ux"] + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + return particle["ux"] + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Parallel? + normal_component = get_normal_component(particle_container, surface) + if abs(normal_component) == 0.0: + return INF + + # Coincident? + f = evaluate(particle_container, surface) + if abs(f) < COINCIDENCE_TOLERANCE: + return INF + + # Calculate distance + distance = -f / normal_component + + # Moving away? + if distance < 0.0: + return INF + + return distance diff --git a/mcdc/mcdc/transport/geometry/surface/plane_y.py b/mcdc/mcdc/transport/geometry/surface/plane_y.py new file mode 100644 index 000000000..ba4f5e9c3 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/plane_y.py @@ -0,0 +1,53 @@ +""" +Plane-Y: Plane perpendicular to the y-axis + +f(y) = y + J +""" + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + return particle["y"] + surface["J"] + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + particle["uy"] = -particle["uy"] + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + return particle["uy"] + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Parallel? + normal_component = get_normal_component(particle_container, surface) + if abs(normal_component) == 0.0: + return INF + + # Coincident? + f = evaluate(particle_container, surface) + if abs(f) < COINCIDENCE_TOLERANCE: + return INF + + # Calculate distance + distance = -f / normal_component + + # Moving away? + if distance < 0.0: + return INF + + return distance diff --git a/mcdc/mcdc/transport/geometry/surface/plane_z.py b/mcdc/mcdc/transport/geometry/surface/plane_z.py new file mode 100644 index 000000000..1a03a1fd7 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/plane_z.py @@ -0,0 +1,53 @@ +""" +Plane-Z: Plane perpendicular to the z-axis + +f(z) = z + J +""" + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + return particle["z"] + surface["J"] + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + particle["uz"] = -particle["uz"] + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + return particle["uz"] + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Parallel? + normal_component = get_normal_component(particle_container, surface) + if abs(normal_component) == 0.0: + return INF + + # Coincident? + f = evaluate(particle_container, surface) + if abs(f) < COINCIDENCE_TOLERANCE: + return INF + + # Calculate distance + distance = -f / normal_component + + # Moving away? + if distance < 0.0: + return INF + + return distance diff --git a/mcdc/mcdc/transport/geometry/surface/quadric.py b/mcdc/mcdc/transport/geometry/surface/quadric.py new file mode 100644 index 000000000..0df4db49f --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/quadric.py @@ -0,0 +1,201 @@ +""" +Quadric: General quadric surface + +f(x, y, z) = Axx + Byy + Czz + Dxy + Exz + Fyz + Gx + Hy + Iz + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + y = particle["y"] + z = particle["z"] + + # Surface parameters + A = surface["A"] + B = surface["B"] + C = surface["C"] + D = surface["D"] + E = surface["E"] + F = surface["F"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + J = surface["J"] + + return ( + A * x**2 + + B * y**2 + + C * z**2 + + D * x * y + + E * x * z + + F * y * z + + G * x + + H * y + + I * z + + J + ) + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + D = surface["D"] + E = surface["E"] + F = surface["F"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Surface normal + dx = 2 * A * x + D * y + E * z + G + dy = 2 * B * y + D * x + F * z + H + dz = 2 * C * z + E * x + F * y + I + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + # Reflect + c = 2.0 * (nx * ux + ny * uy + nz * uz) + particle["ux"] -= c * nx + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + D = surface["D"] + E = surface["E"] + F = surface["F"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Surface normal + dx = 2 * A * x + D * y + E * z + G + dy = 2 * B * y + D * x + F * z + H + dz = 2 * C * z + E * x + F * y + I + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + return nx * ux + ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + A = surface["A"] + B = surface["B"] + C = surface["C"] + D = surface["D"] + E = surface["E"] + F = surface["F"] + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + a = ( + A * ux * ux + + B * uy * uy + + C * uz * uz + + D * ux * uy + + E * ux * uz + + F * uy * uz + ) + b = ( + 2 * (A * x * ux + B * y * uy + C * z * uz) + + D * (x * uy + y * ux) + + E * (x * uz + z * ux) + + F * (y * uz + z * uy) + + G * ux + + H * uy + + I * uz + ) + c = f + + determinant = b * b - 4.0 * a * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 * a + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/mcdc/transport/geometry/surface/sphere.py b/mcdc/mcdc/transport/geometry/surface/sphere.py new file mode 100644 index 000000000..09c32261f --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/sphere.py @@ -0,0 +1,134 @@ +""" +Sphere + +f(x, y, z) = xx + yy + zz + Gx + Hy + Iz + J +""" + +import math + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + particle = particle_container[0] + # Particle parameters + x = particle["x"] + y = particle["y"] + z = particle["z"] + + # Surface parameters + G = surface["G"] + H = surface["H"] + I = surface["I"] + J = surface["J"] + + return x**2 + y**2 + z**2 + G * x + H * y + I * z + J + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dy = 2 * particle["y"] + surface["H"] + dz = 2 * particle["z"] + surface["I"] + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + # Reflect + c = 2.0 * (nx * ux + ny * uy + nz * uz) + particle["ux"] -= c * nx + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Surface normal + dx = 2 * particle["x"] + surface["G"] + dy = 2 * particle["y"] + surface["H"] + dz = 2 * particle["z"] + surface["I"] + norm = (dx**2 + dy**2 + dz**2) ** 0.5 + nx = dx / norm + ny = dy / norm + nz = dz / norm + + # Particle parameters + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + return nx * ux + ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + G = surface["G"] + H = surface["H"] + I = surface["I"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Quadratic equation constants + b = 2 * (x * ux + y * uy + z * uz) + G * ux + H * uy + I * uz + c = f + + determinant = b * b - 4.0 * c + + # Roots are complex : no intersection + # Roots are identical: tangent + # ==> return huge number + if determinant <= 0.0: + return INF + else: + # Get the roots + denom = 2.0 + sqrt = math.sqrt(determinant) + root_1 = (-b + sqrt) / denom + root_2 = (-b - sqrt) / denom + + # Coincident? + if coincident: + return max(root_1, root_2) + + # Negative roots, moving away from the surface + if root_1 < 0.0: + root_1 = INF + if root_2 < 0.0: + root_2 = INF + + # Return the smaller root + return min(root_1, root_2) diff --git a/mcdc/mcdc/transport/geometry/surface/torus_z.py b/mcdc/mcdc/transport/geometry/surface/torus_z.py new file mode 100644 index 000000000..8e5ed8183 --- /dev/null +++ b/mcdc/mcdc/transport/geometry/surface/torus_z.py @@ -0,0 +1,219 @@ +""" +Torus: Implicit equation of a torus radially symmetric about the z-axis in the cartesian plane + +f(x, y, z) = ( sqrt[(x - A)^2 + (y - B)^2] - R )^2 + (z - C)^2 - r^2 + +Where R is the radius of the shape as a whole, and r is the radius of the circle that is revolved to create the donut + +Removing the square roots leaves you with the following equation: +f (x, y, z) = ( x^2 + y^2 + z^2 + R^2 - r^2 )^2 - 4R^2 * (x^2 + y^2) +""" + +import math + +import numpy as np + +from numba import njit + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) + + +@njit +def evaluate(particle_container, surface): + """ + Description: + Checking to see if the particle is on the surface in question + + Returns: (float) + - If the return is 0, the particle occupies the exact space of the torus + - If the return is positive, the particle is outside the torus + - If the return is negative, the particle is inside the torus + """ + + # Particle parameters + particle = particle_container[0] + x = particle["x"] + y = particle["y"] + z = particle["z"] + + # Surface parameters + R = surface["R"] + r = surface["r"] + A = surface["A"] + B = surface["B"] + C = surface["C"] + + # Shifting the origin point of the particle into the torus space, and treating the torus as centered on (0,0,0) + x -= A + y -= B + z -= C + + return ((x * x + y * y + z * z + R * R - r * r) ** 2) - ( + 4 * R * R * (x * x + y * y) + ) + + +@njit +def reflect(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + R = surface["R"] + r = surface["r"] + A = surface["A"] + B = surface["B"] + C = surface["C"] + + # Shifting the origin point of the particle into the torus space, and treating the torus as centered on (0,0,0) + x -= A + y -= B + z -= C + + # Taking the partial derivitives of the expanded form of the implicit torus equation + dx = 4 * x * (-(r**2) - (R**2) + (x**2) + (y**2) + (z**2)) + dy = 4 * y * (-(r**2) - (R**2) + (x**2) + (y**2) + (z**2)) + dz = 4 * z * (-(r**2) + (R**2) + (x**2) + (y**2) + (z**2)) + + # Surface Normal + norm = math.sqrt(dx**2 + dy**2 + dz**2) + nx = dx / norm + ny = dy / norm + nz = dz / norm + + # Reflect + c = 2.0 * ( + nx * ux + ny * uy + nz * uz + ) # Magnitutde component of the projection of the particle onto the surface normal + particle["ux"] -= c * nx + particle["uy"] -= c * ny + particle["uz"] -= c * nz + + +@njit +def get_normal_component(particle_container, surface): + particle = particle_container[0] + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + R = surface["R"] + r = surface["r"] + A = surface["A"] + B = surface["B"] + C = surface["C"] + + # Shifting the origin point of the particle into the torus space, and treating the torus as centered on (0,0,0) + x -= A + y -= B + z -= C + + # Taking the partial derivitives of the expanded form of the implicit torus equation + dx = 4 * x * (-(r**2) - (R**2) + (x**2) + (y**2) + (z**2)) + dy = 4 * y * (-(r**2) - (R**2) + (x**2) + (y**2) + (z**2)) + dz = 4 * z * (-(r**2) + (R**2) + (x**2) + (y**2) + (z**2)) + + # Surface Normal + norm = math.sqrt(dx**2 + dy**2 + dz**2) + nx = dx / norm + ny = dy / norm + nz = dz / norm + + return nx * ux + ny * uy + nz * uz + + +@njit +def get_distance(particle_container, surface): + particle = particle_container[0] + + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Surface coefficients + R = surface["R"] + r = surface["r"] + A = surface["A"] + B = surface["B"] + C = surface["C"] + + # Coincident? + f = evaluate(particle_container, surface) + coincident = abs(f) < COINCIDENCE_TOLERANCE + if coincident: + # Moving away or tangent? + if ( + get_normal_component(particle_container, surface) + >= 0.0 - COINCIDENCE_TOLERANCE + ): + return INF + + # Shifting the origin point of the particle into the torus space, and treating the torus as centered on (0,0,0) + x -= A + y -= B + z -= C + + # Dot products that come up frequently in the torus-ray intersection equation + G = ux * ux + uy * uy + uz * uz + H = 2.0 * (x * ux + y * uy + z * uz) + I = x * x + y * y + z * z + + J = ux * ux + uy * uy + K = 2.0 * (x * ux + y * uy) + L = x * x + y * y + + # Quartic coefficients from substituting (i = origin_i + direction_i * t) into each axis for i (x,y,z) + a4 = G * G + a3 = 2.0 * G * H + a2 = H * H + 2.0 * G * (I + R * R - r * r) - 4.0 * R * R * J + a1 = 2.0 * H * (I + R * R - r * r) - 4.0 * R * R * K + a0 = (I + R * R - r * r) ** 2 - 4.0 * R * R * L + + # TODO: May replace with a fully numba-native quartic solver if torus performance becomes important; + # np.roots is sufficient for now. + coefficients = np.array( + [a4 + 0.0j, a3 + 0.0j, a2 + 0.0j, a1 + 0.0j, a0 + 0.0j], + dtype=np.complex128, + ) + roots = np.roots(coefficients) + + min_t = INF + + # TODO: Add stricter coverage/handling for near-tangent and off-axis torus intersections. + # Current root filtering relies on COINCIDENCE_TOLERANCE for near-real and near-zero roots. + for solution in roots: + if abs(solution.imag) >= COINCIDENCE_TOLERANCE: + continue + + root = solution.real + if coincident: + if root <= COINCIDENCE_TOLERANCE: + continue + elif root < 0.0: + continue + + if root < min_t: + min_t = root + + if min_t == INF: + return INF + + return min_t diff --git a/mcdc/mcdc/transport/literals.py b/mcdc/mcdc/transport/literals.py new file mode 100644 index 000000000..bfca9dd77 --- /dev/null +++ b/mcdc/mcdc/transport/literals.py @@ -0,0 +1,3 @@ +# The following is automatically generated by code_factory.py + +rpn_evaluation_buffer_size = 2 diff --git a/mcdc/mcdc/transport/mesh/__init__.py b/mcdc/mcdc/transport/mesh/__init__.py new file mode 100644 index 000000000..c4836d342 --- /dev/null +++ b/mcdc/mcdc/transport/mesh/__init__.py @@ -0,0 +1,8 @@ +import mcdc.transport.mesh.uniform as uniform +import mcdc.transport.mesh.structured as structured +from mcdc.transport.mesh.interface import ( + get_indices, + get_x, + get_y, + get_z, +) diff --git a/mcdc/mcdc/transport/mesh/interface.py b/mcdc/mcdc/transport/mesh/interface.py new file mode 100644 index 000000000..2e4fda69f --- /dev/null +++ b/mcdc/mcdc/transport/mesh/interface.py @@ -0,0 +1,63 @@ +from numba import njit + +from mcdc import mcdc_get +from mcdc.constant import MESH_STRUCTURED, MESH_UNIFORM +import mcdc.transport.mesh.structured as structured +import mcdc.transport.mesh.uniform as uniform + + +@njit +def get_indices(particle_container, mesh_base, simulation, data): + mesh_type = mesh_base["child_type"] + mesh_ID = mesh_base["child_ID"] + + if mesh_type == MESH_UNIFORM: + mesh = simulation["uniform_meshes"][mesh_ID] + return uniform.get_indices(particle_container, mesh) + elif mesh_type == MESH_STRUCTURED: + mesh = simulation["structured_meshes"][mesh_ID] + return structured.get_indices(particle_container, mesh, data) + + return -1, -1, -1 + + +@njit +def get_x(index, mesh_base, simulation, data): + mesh_type = mesh_base["child_type"] + mesh_ID = mesh_base["child_ID"] + + if mesh_type == MESH_UNIFORM: + mesh = simulation["uniform_meshes"][mesh_ID] + return mesh["x0"] + mesh["dx"] * index + elif mesh_type == MESH_STRUCTURED: + mesh = simulation["structured_meshes"][mesh_ID] + return mcdc_get.structured_mesh.x(index, mesh, data) + return 0.0 + + +@njit +def get_y(index, mesh_base, simulation, data): + mesh_type = mesh_base["child_type"] + mesh_ID = mesh_base["child_ID"] + + if mesh_type == MESH_UNIFORM: + mesh = simulation["uniform_meshes"][mesh_ID] + return mesh["y0"] + mesh["dy"] * index + elif mesh_type == MESH_STRUCTURED: + mesh = simulation["structured_meshes"][mesh_ID] + return mcdc_get.structured_mesh.y(index, mesh, data) + return 0.0 + + +@njit +def get_z(index, mesh_base, simulation, data): + mesh_type = mesh_base["child_type"] + mesh_ID = mesh_base["child_ID"] + + if mesh_type == MESH_UNIFORM: + mesh = simulation["uniform_meshes"][mesh_ID] + return mesh["z0"] + mesh["dz"] * index + elif mesh_type == MESH_STRUCTURED: + mesh = simulation["structured_meshes"][mesh_ID] + return mcdc_get.structured_mesh.z(index, mesh, data) + return 0.0 diff --git a/mcdc/mcdc/transport/mesh/structured.py b/mcdc/mcdc/transport/mesh/structured.py new file mode 100644 index 000000000..9afe50a8a --- /dev/null +++ b/mcdc/mcdc/transport/mesh/structured.py @@ -0,0 +1,110 @@ +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get + +from mcdc.constant import COINCIDENCE_TOLERANCE, COINCIDENCE_TOLERANCE_TIME, INF +from mcdc.transport.util import find_bin_with_rules + + +@njit +def get_indices(particle_container, mesh, data): + """ + Get mesh indices given the particle coordinate + """ + particle = particle_container[0] + + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + grid_x = data[mesh["x_offset"] : (mesh["x_offset"] + mesh["x_length"])] + # Above is equivalent to: grid_x = mcdc_get.structured_mesh.x_all(mesh, data) + grid_y = data[mesh["y_offset"] : (mesh["y_offset"] + mesh["y_length"])] + # Above is equivalent to: grid_y = mcdc_get.structured_mesh.y_all(mesh, data) + grid_z = data[mesh["z_offset"] : (mesh["z_offset"] + mesh["z_length"])] + # Above is equivalent to: grid_z = mcdc_get.structured_mesh.z_all(mesh, data) + + tolerance = COINCIDENCE_TOLERANCE + ux_go_lower = ux < 0.0 + uy_go_lower = uy < 0.0 + uz_go_lower = uz < 0.0 + + ix = find_bin_with_rules(x, grid_x, tolerance, ux_go_lower) + iy = find_bin_with_rules(y, grid_y, tolerance, uy_go_lower) + iz = find_bin_with_rules(z, grid_z, tolerance, uz_go_lower) + + return ix, iy, iz + + +@njit +def get_crossing_distance(particle_arr, speed, mesh): + """ + Get distance for the particle, moving with the given speed, + to cross the nearest grid of the mesh + """ + particle = particle_arr[0] + + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Mesh parameters + Nx = mesh["Nx"] + Ny = mesh["Ny"] + Nz = mesh["Nz"] + + # Check if particle is outside the mesh grid and moving away + if ( + (x < mesh["x"][0] + COINCIDENCE_TOLERANCE and ux < 0.0) + or (x > mesh["x"][Nx] - COINCIDENCE_TOLERANCE and ux > 0.0) + or (y < mesh["y"][0] + COINCIDENCE_TOLERANCE and uy < 0.0) + or (y > mesh["y"][Ny] - COINCIDENCE_TOLERANCE and uy > 0.0) + or (z < mesh["z"][0] + COINCIDENCE_TOLERANCE and uz < 0.0) + or (z > mesh["z"][Nz] - COINCIDENCE_TOLERANCE and uz > 0.0) + ): + return INF + + d = INF + d = min(d, _grid_distance(x, ux, mesh["x"], Nx + 1, COINCIDENCE_TOLERANCE)) + d = min(d, _grid_distance(y, uy, mesh["y"], Ny + 1, COINCIDENCE_TOLERANCE)) + d = min(d, _grid_distance(z, uz, mesh["z"], Nz + 1, COINCIDENCE_TOLERANCE)) + return d + + +@njit +def _grid_distance(value, direction, grid, length, tolerance): + """ + Get distance to nearest grid given a value and direction + + Direction is used to tiebreak when the value is at a grid point + (within tolerance). + Note: It assumes that a grid must be hit + """ + if direction == 0.0: + return INF + + idx = binary_search_with_length(value, grid, length) + + if direction > 0.0: + idx += 1 + + # Coinciding cases + if abs(grid[idx] - value) < tolerance: + if direction > 0.0: + idx += 1 + else: + idx -= 1 + + dist = (grid[idx] - value) / direction + + return dist diff --git a/mcdc/mcdc/transport/mesh/uniform.py b/mcdc/mcdc/transport/mesh/uniform.py new file mode 100644 index 000000000..0384fabe6 --- /dev/null +++ b/mcdc/mcdc/transport/mesh/uniform.py @@ -0,0 +1,170 @@ +import math + +from numba import njit + +from mcdc.constant import COINCIDENCE_TOLERANCE, INF + + +@njit +def get_indices(particle_container, mesh): + """ + Get mesh indices given the particle coordinate + """ + particle = particle_container[0] + + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Mesh parameters + x0 = mesh["x0"] + y0 = mesh["y0"] + z0 = mesh["z0"] + dx = mesh["dx"] + dy = mesh["dy"] + dz = mesh["dz"] + Nx = mesh["Nx"] + Ny = mesh["Ny"] + Nz = mesh["Nz"] + x_last = x0 + Nx * dx + y_last = y0 + Ny * dy + z_last = z0 + Nz * dz + + # x-axis + if ( + # Outside the mesh + x < x0 - COINCIDENCE_TOLERANCE + or x > x_last + COINCIDENCE_TOLERANCE + # At the outermost-grid but moving away + or (abs(x - x0) < COINCIDENCE_TOLERANCE and ux < 0.0) + or (abs(x - x_last) < COINCIDENCE_TOLERANCE and ux > 0.0) + ): + ix = -1 + else: + ix = _grid_index(x, ux, x0, dx, COINCIDENCE_TOLERANCE) + + # y-axis + if ( + # Outside the mesh + y < y0 - COINCIDENCE_TOLERANCE + or y > y_last + COINCIDENCE_TOLERANCE + # At the outermost-grid but moving away + or (abs(y - y0) < COINCIDENCE_TOLERANCE and uy < 0.0) + or (abs(y - y_last) < COINCIDENCE_TOLERANCE and uy > 0.0) + ): + iy = -1 + else: + iy = _grid_index(y, uy, y0, dy, COINCIDENCE_TOLERANCE) + + # z-axis + if ( + # Outside the mesh + z < z0 - COINCIDENCE_TOLERANCE + or z > z_last + COINCIDENCE_TOLERANCE + # At the outermost-grid but moving away + or (abs(z - z0) < COINCIDENCE_TOLERANCE and uz < 0.0) + or (abs(z - z_last) < COINCIDENCE_TOLERANCE and uz > 0.0) + ): + iz = -1 + else: + iz = _grid_index(z, uz, z0, dz, COINCIDENCE_TOLERANCE) + + return ix, iy, iz + + +@njit +def get_crossing_distance(particle_container, speed, mesh): + """ + Get distance for the particle, moving with the given speed, + to cross the nearest grid of the mesh + """ + particle = particle_container[0] + + # Particle coordinate + x = particle["x"] + y = particle["y"] + z = particle["z"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Mesh parameters + x0 = mesh["x0"] + y0 = mesh["y0"] + z0 = mesh["z0"] + dx = mesh["dx"] + dy = mesh["dy"] + dz = mesh["dz"] + Nx = mesh["Nx"] + Ny = mesh["Ny"] + Nz = mesh["Nz"] + x_last = x0 + Nx * dx + y_last = y0 + Ny * dy + z_last = z0 + Nz * dz + + # Check if particle is outside the mesh grid and moving away + if ( + (x < x0 + COINCIDENCE_TOLERANCE and ux < 0.0) + or (x > x_last - COINCIDENCE_TOLERANCE and ux > 0.0) + or (y < y0 + COINCIDENCE_TOLERANCE and uy < 0.0) + or (y > y_last - COINCIDENCE_TOLERANCE and uy > 0.0) + or (z < z0 + COINCIDENCE_TOLERANCE and uz < 0.0) + or (z > z_last - COINCIDENCE_TOLERANCE and uz > 0.0) + ): + return INF + + d = INF + d = min(d, _grid_distance(x, ux, x0, dx, COINCIDENCE_TOLERANCE)) + d = min(d, _grid_distance(y, uy, y0, dy, COINCIDENCE_TOLERANCE)) + d = min(d, _grid_distance(z, uz, z0, dz, COINCIDENCE_TOLERANCE)) + return d + + +@njit +def _grid_index(value, direction, start, width, tolerance): + """ + Get grid index given the value and the direction + + Direction is used to tiebreak when the value is at a grid point + (within tolerance). + Note: It assumes the value is inside the grid. + """ + idx = int(math.floor((value + tolerance - start) / width)) + + # Coinciding cases + if abs(start + width * idx - value) < tolerance: + if direction < 0.0: + idx -= 1 + + return idx + + +@njit +def _grid_distance(value, direction, start, width, tolerance): + """ + Get distance to nearest grid given a value and direction + + Direction is used to tiebreak when the value is at a grid point + (within tolerance). + Note: It assumes that a grid must be hit + """ + if direction == 0.0: + return INF + + idx = int(math.floor((value + tolerance - start) / width)) + + # Coinciding cases + if abs(start + width * idx - value) < tolerance: + if direction < 0.0: + idx -= 1 + + if direction > 0.0: + idx += 1 + + dist = (start + width * idx - value) / direction + + return dist diff --git a/mcdc/mcdc/transport/mpi.py b/mcdc/mcdc/transport/mpi.py new file mode 100644 index 000000000..35b38c41b --- /dev/null +++ b/mcdc/mcdc/transport/mpi.py @@ -0,0 +1,33 @@ +import math + +from numba import njit + + +@njit +def distribute_work(N_work, simulation): + size = simulation["mpi_size"] + rank = simulation["mpi_rank"] + + # Total number of work + work_size_total = N_work + + # Evenly distribute work + work_size = math.floor(N_work / size) + + # Starting index (based on even distribution) + work_start = work_size * rank + + # Count reminder + rem = N_work % size + + # Assign reminder and update starting index + if rank < rem: + work_size += 1 + work_start += rank + else: + work_start += rem + + # Store the workload specification + simulation["mpi_work_start"] = work_start + simulation["mpi_work_size"] = work_size + simulation["mpi_work_size_total"] = work_size_total diff --git a/mcdc/mcdc/transport/particle.py b/mcdc/mcdc/transport/particle.py new file mode 100644 index 000000000..e524e1c21 --- /dev/null +++ b/mcdc/mcdc/transport/particle.py @@ -0,0 +1,51 @@ +from numba import njit + +#### + +import mcdc.transport.physics as physics +import mcdc.transport.rng as rng + + +@njit +def move(particle_container, distance, simulation, data): + particle = particle_container[0] + ut = 1.0 / physics.particle_speed(particle_container, simulation, data) + + particle["x"] += particle["ux"] * distance + particle["y"] += particle["uy"] * distance + particle["z"] += particle["uz"] * distance + particle["t"] += ut * distance + + +@njit +def copy(target_particle_container, source_particle_container): + target_particle = target_particle_container[0] + source_particle = source_particle_container[0] + + target_particle["x"] = source_particle["x"] + target_particle["y"] = source_particle["y"] + target_particle["z"] = source_particle["z"] + target_particle["t"] = source_particle["t"] + target_particle["ux"] = source_particle["ux"] + target_particle["uy"] = source_particle["uy"] + target_particle["uz"] = source_particle["uz"] + target_particle["g"] = source_particle["g"] + target_particle["E"] = source_particle["E"] + target_particle["w"] = source_particle["w"] + target_particle["particle_type"] = source_particle["particle_type"] + target_particle["rng_seed"] = source_particle["rng_seed"] + + +@njit +def copy_as_child(child_particle_container, parent_particle_container): + parent_particle = parent_particle_container[0] + child_particle = child_particle_container[0] + + copy(child_particle_container, parent_particle_container) + + # Set child RNG seed based of the parent + parent_seed = parent_particle["rng_seed"] + child_particle["rng_seed"] = rng.split_seed(parent_seed, rng.SEED_SPLIT_PARTICLE) + + # Evolve parent seed + rng.lcg(parent_particle_container) diff --git a/mcdc/mcdc/transport/particle_bank.py b/mcdc/mcdc/transport/particle_bank.py new file mode 100644 index 000000000..9b32f9cd8 --- /dev/null +++ b/mcdc/mcdc/transport/particle_bank.py @@ -0,0 +1,407 @@ +import numpy as np + +from mpi4py import MPI +from numba import ( + njit, + objmode, +) + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.transport.mpi as mpi +import mcdc.transport.particle as particle_module +import mcdc.transport.technique as technique +import mcdc.transport.util as util + +from mcdc.constant import * +from mcdc.print_ import print_error + +# ============================================================================= +# Bank size +# ============================================================================= + + +@njit +def get_bank_size(bank): + return bank["size"][0] + + +@njit +def set_bank_size(bank, value): + bank["size"][0] = value + + +@njit +def add_bank_size(bank, value): + util.atomic_add(bank["size"], 0, value) + + +# ============================================================================= +# Bank and pop particle +# ============================================================================= + + +@njit +def _bank_particle(particle_container, bank): + # Check if bank is full + if get_bank_size(bank) == bank["particle_data"].shape[0]: + report_full_bank(bank) + + # Set particle data + idx = get_bank_size(bank) + particle_module.copy(bank["particle_data"][idx : idx + 1], particle_container) + + +@njit +def bank_active_particle(particle_container, program): + simulation = util.access_simulation(program) + bank = simulation["bank_active"] + _bank_particle(particle_container, bank) + + # Increment bank size + add_bank_size(bank, 1) + + +@njit +def bank_census_particle(particle_container, program): + simulation = util.access_simulation(program) + bank = simulation["bank_census"] + _bank_particle(particle_container, bank) + + # Increment bank size + add_bank_size(bank, 1) + + +@njit +def bank_future_particle(particle_container, program): + simulation = util.access_simulation(program) + bank = simulation["bank_future"] + _bank_particle(particle_container, bank) + + # Increment bank size + add_bank_size(bank, 1) + + +@njit +def bank_source_particle(particle_container, simulation): + bank = simulation["bank_source"] + _bank_particle(particle_container, bank) + + # Increment bank size + # Note that we don't use the atomic operation in add_bank_size function + # as source particle banking is not thread-parallelized + bank["size"][0] += 1 + + +@njit +def pop_particle(particle_container, bank): + # Check if bank is empty + if get_bank_size(bank) == 0: + report_empty_bank(bank) + + # Set particle data + idx = get_bank_size(bank) - 1 + particle_module.copy(particle_container, bank["particle_data"][idx : idx + 1]) + + # Decrement bank size + add_bank_size(bank, -1) + + # Set default IDs and event for the live particle + particle = particle_container[0] + particle["alive"] = True + particle["material_ID"] = -1 + particle["cell_ID"] = -1 + particle["surface_ID"] = -1 + particle["event"] = -1 + + +@njit +def report_full_bank(bank): + with objmode(): + print_error("Particle %s bank is full." % bank["tag"]) + + +@njit +def report_empty_bank(bank): + with objmode(): + print_error("Attempting to get a particle from an empty %s bank." % bank["tag"]) + + +# ====================================================================================== +# Future bank management +# ====================================================================================== + + +@njit +def promote_future_particles(program, data): + simulation = util.access_simulation(program) + + # Get the banks + future_bank = simulation["bank_future"] + + # Get the next census time + idx = simulation["idx_census"] + 1 + next_census_time = mcdc_get.settings.census_time(idx, simulation["settings"], data) + + # Particle container + particle_container = util.local_array(1, type_.particle_data) + particle = particle_container[0] + + # Loop over all particles in future bank + initial_size = get_bank_size(future_bank) + for i in range(initial_size): + # Get the next future particle index + # NOTE: future bank size decreases as particles are promoted to census bank + idx = i - (initial_size - get_bank_size(future_bank)) + particle_module.copy( + particle_container, future_bank["particle_data"][idx : idx + 1] + ) + + # Promote the future particle to census bank + if particle["t"] < next_census_time: + + bank_census_particle(particle_container, program) + add_bank_size(future_bank, -1) + + # Consolidate the emptied space in the future bank + j = get_bank_size(future_bank) + particle_module.copy( + future_bank["particle_data"][idx : idx + 1], + future_bank["particle_data"][j : j + 1], + ) + + +# ====================================================================================== +# All-bank management +# ====================================================================================== + + +@njit +def manage_particle_banks(simulation): + master = simulation["mpi_master"] + serial = simulation["mpi_size"] == 1 + + # TIMER: bank management + time_start = 0.0 + if master: + with objmode(time_start="float64"): + time_start = MPI.Wtime() + time_spent = -time_start + + # Reset source bank + set_bank_size(simulation["bank_source"], 0) + + # Normalize weight + if simulation["settings"]["neutron_eigenvalue_mode"]: + normalize_weight( + simulation["bank_census"], simulation["settings"]["N_particle"] + ) + + # Population control + if simulation["population_control"]["active"]: + technique.population_control(simulation) + else: + # Swap census and source bank + source_bank = simulation["bank_source"] + census_bank = simulation["bank_census"] + + size = get_bank_size(census_bank) + if size >= source_bank["particle_data"].shape[0]: + report_full_bank(source_bank) + + # TODO: better alternative? + source_bank["particle_data"][:size] = census_bank["particle_data"][:size] + set_bank_size(source_bank, size) + + # Redistribute work and rebalance bank size across MPI ranks + if serial: + mpi.distribute_work(get_bank_size(simulation["bank_source"]), simulation) + else: + bank_rebalance(simulation) + + # Reset census bank + set_bank_size(simulation["bank_census"], 0) + + # TIMER: bank management + time_end = 0.0 + if master: + with objmode(time_end="float64"): + time_end = MPI.Wtime() + time_spent += time_end + if master: + simulation["runtime_bank_management"] += time_spent + + +# ====================================================================================== +# Bank size parallel rebalance +# ====================================================================================== + + +@njit +def bank_rebalance(simulation): + # Scan the bank + idx_start, N_local, N = bank_scanning(simulation["bank_source"], simulation) + idx_end = idx_start + N_local + mpi.distribute_work(N, simulation) + + # Abort if source bank is empty + if N == 0: + return + + # Rebalance not needed if there is only one rank + if simulation["mpi_size"] <= 1: + return + + # Some constants + work_start = simulation["mpi_work_start"] + work_end = work_start + simulation["mpi_work_size"] + left = simulation["mpi_rank"] - 1 + right = simulation["mpi_rank"] + 1 + + # Flags if need to receive from or sent to the neighbors + send_to_left = idx_start < work_start + receive_from_left = idx_start > work_start + send_to_right = idx_end > work_end + receive_from_right = idx_end < work_end + + # Flags if need to receive first + receive_first = False + if receive_from_left: + receive_first = idx_start >= work_end + if receive_from_right: + receive_first = idx_end <= work_start + + # MPI nearest-neighbor send/receive + buff = np.zeros( + simulation["bank_source"]["particle_data"].shape[0], dtype=type_.particle_data + ) + + with objmode(size="int64"): + # Create MPI-supported numpy object + size = get_bank_size(simulation["bank_source"]) + bank = np.array(simulation["bank_source"]["particle_data"][:size]) + + if receive_first: + if receive_from_left: + bank = np.insert(bank, 0, MPI.COMM_WORLD.recv(source=left)) + receive_from_left = False + if receive_from_right: + bank = np.append(bank, MPI.COMM_WORLD.recv(source=right)) + receive_from_right = False + + if send_to_left: + n = work_start - idx_start + send_to_left_status = MPI.COMM_WORLD.isend(bank[:n], dest=left) + bank = bank[n:] + if send_to_right: + n = idx_end - work_end + send_to_right_status = MPI.COMM_WORLD.isend(bank[-n:], dest=right) + bank = bank[:-n] + + if receive_from_left: + bank = np.insert(bank, 0, MPI.COMM_WORLD.recv(source=left)) + if receive_from_right: + bank = np.append(bank, MPI.COMM_WORLD.recv(source=right)) + + # Wait until sent massage is received + if send_to_left: + send_to_left_status.Wait() + if send_to_right: + send_to_right_status.Wait() + + # Set output buffer + size = bank.shape[0] + for i in range(size): + buff[i] = bank[i] + + # Set source bank from buffer + set_bank_size(simulation["bank_source"], size) + for i in range(size): + simulation["bank_source"]["particle_data"][i] = buff[i] + + +# ====================================================================================== +# MPI collective operations +# ====================================================================================== + + +@njit +def bank_scanning(bank, simulation): + N_local = get_bank_size(bank) + + # Starting index + buff = np.zeros(1, dtype=np.int64) + with objmode(): + MPI.COMM_WORLD.Exscan(np.array([N_local]), buff, MPI.SUM) + idx_start = buff[0] + + # Global size + buff[0] += N_local + with objmode(): + MPI.COMM_WORLD.Bcast(buff, simulation["mpi_size"] - 1) + N_global = buff[0] + + return idx_start, N_local, N_global + + +@njit +def bank_scanning_weight(bank, simulation): + # Local weight CDF + N_local = get_bank_size(bank) + w_cdf = np.zeros(N_local + 1) + for i in range(N_local): + w_cdf[i + 1] = w_cdf[i] + bank["particle_data"][i]["w"] + W_local = w_cdf[-1] + + # Starting weight + buff = np.zeros(1, dtype=np.float64) + with objmode(): + MPI.COMM_WORLD.Exscan(np.array([W_local]), buff, MPI.SUM) + w_start = buff[0] + w_cdf += w_start + + # Global weight + buff[0] = w_cdf[-1] + with objmode(): + MPI.COMM_WORLD.Bcast(buff, simulation["mpi_size"] - 1) + W_global = buff[0] + + return w_start, w_cdf, W_global + + +@njit +def normalize_weight(bank, norm): + # Get total weight + W = total_weight(bank) + + # Normalize weight + for i in range(get_bank_size(bank)): + bank["particle_data"][i]["w"] *= norm / W + + +@njit +def total_weight(bank): + # Local total weight + W_local = np.zeros(1) + for i in range(get_bank_size(bank)): + W_local[0] += bank["particle_data"][i]["w"] + + # MPI Allreduce + buff = np.zeros(1, np.float64) + with objmode(): + MPI.COMM_WORLD.Allreduce(W_local, buff, MPI.SUM) + return buff[0] + + +@njit +def total_size(bank): + # Local total weight + local_size = np.ones(1, np.int64) * bank["size"] + + # MPI Allreduce + buff = np.zeros(1, np.int64) + with objmode(): + MPI.COMM_WORLD.Allreduce(local_size, buff, MPI.SUM) + return buff[0] diff --git a/mcdc/mcdc/transport/physics/__init__.py b/mcdc/mcdc/transport/physics/__init__.py new file mode 100644 index 000000000..66133009c --- /dev/null +++ b/mcdc/mcdc/transport/physics/__init__.py @@ -0,0 +1,9 @@ +from .interface import ( + particle_speed, + macro_xs, + neutron_production_xs, + collision_distance, + collision, +) +import mcdc.transport.physics.electron as electron +import mcdc.transport.physics.neutron as neutron diff --git a/mcdc/mcdc/transport/physics/electron/__init__.py b/mcdc/mcdc/transport/physics/electron/__init__.py new file mode 100644 index 000000000..b41d10753 --- /dev/null +++ b/mcdc/mcdc/transport/physics/electron/__init__.py @@ -0,0 +1,2 @@ +from .interface import particle_speed, macro_xs, collision +import mcdc.transport.physics.electron.native as native diff --git a/mcdc/mcdc/transport/physics/electron/interface.py b/mcdc/mcdc/transport/physics/electron/interface.py new file mode 100644 index 000000000..d94a9759e --- /dev/null +++ b/mcdc/mcdc/transport/physics/electron/interface.py @@ -0,0 +1,34 @@ +from numba import njit + +#### + +import mcdc.transport.physics.electron.native as native + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container, simulation, data): + return native.particle_speed(particle_container) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + return native.macro_xs(reaction_type, particle_container, simulation, data) + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, collision_data_container, program, data): + native.collision(particle_container, collision_data_container, program, data) diff --git a/mcdc/mcdc/transport/physics/electron/native.py b/mcdc/mcdc/transport/physics/electron/native.py new file mode 100644 index 000000000..4c296ceb5 --- /dev/null +++ b/mcdc/mcdc/transport/physics/electron/native.py @@ -0,0 +1,560 @@ +import math +import numpy as np + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.rng as rng +import mcdc.transport.util as util + +from mcdc.constant import ( + ELECTRON_CUTOFF_ENERGY, + ELECTRON_MASS, + ELECTRON_REACTION_BREMSSTRAHLUNG, + ELECTRON_REACTION_EXCITATION, + ELECTRON_REACTION_ELASTIC_SCATTERING, + ELECTRON_REACTION_IONIZATION, + ELECTRON_REACTION_TOTAL, + LIGHT_SPEED, + PI, +) +from mcdc.transport.data import evaluate_data +from mcdc.transport.distribution import ( + sample_distribution, + sample_multi_table, +) +from mcdc.transport.physics.util import ( + evaluate_electron_xs_energy_grid, + scatter_direction, +) +from mcdc.transport.util import linear_interpolation + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container): + particle = particle_container[0] + E = particle["E"] + mass = ELECTRON_MASS + return LIGHT_SPEED * math.sqrt(E * (E + 2.0 * mass)) / (E + mass) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + particle = particle_container[0] + material = simulation["native_materials"][particle["material_ID"]] + E = particle["E"] + + total = 0.0 + for i in range(material["N_element"]): + element_ID = int(mcdc_get.native_material.element_IDs(i, material, data)) + element = simulation["elements"][element_ID] + + element_density = mcdc_get.native_material.element_densities(i, material, data) + xs = total_micro_xs(reaction_type, E, element, data) + total += element_density * xs + + return total + + +@njit +def total_micro_xs(reaction_type, E, element, data): + idx, E0, E1 = evaluate_electron_xs_energy_grid(E, element, data) + if reaction_type == ELECTRON_REACTION_TOTAL: + xs0 = mcdc_get.element.electron_total_xs(idx, element, data) + xs1 = mcdc_get.element.electron_total_xs(idx + 1, element, data) + elif reaction_type == ELECTRON_REACTION_IONIZATION: + xs0 = mcdc_get.element.electron_ionization_xs(idx, element, data) + xs1 = mcdc_get.element.electron_ionization_xs(idx + 1, element, data) + elif reaction_type == ELECTRON_REACTION_ELASTIC_SCATTERING: + xs0 = mcdc_get.element.electron_elastic_xs(idx, element, data) + xs1 = mcdc_get.element.electron_elastic_xs(idx + 1, element, data) + elif reaction_type == ELECTRON_REACTION_EXCITATION: + xs0 = mcdc_get.element.electron_excitation_xs(idx, element, data) + xs1 = mcdc_get.element.electron_excitation_xs(idx + 1, element, data) + elif reaction_type == ELECTRON_REACTION_BREMSSTRAHLUNG: + xs0 = mcdc_get.element.electron_bremsstrahlung_xs(idx, element, data) + xs1 = mcdc_get.element.electron_bremsstrahlung_xs(idx + 1, element, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +@njit +def reaction_micro_xs(E, reaction_base, element, data): + idx, E0, E1 = evaluate_electron_xs_energy_grid(E, element, data) + + # Apply offset + offset = reaction_base["xs_offset_"] + if idx < offset: + return 0.0 + else: + idx -= offset + + xs0 = mcdc_get.electron_reaction.xs(idx, reaction_base, data) + xs1 = mcdc_get.electron_reaction.xs(idx + 1, reaction_base, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, collision_data_container, program, data): + simulation = util.access_simulation(program) + particle = particle_container[0] + collision_data = collision_data_container[0] + material = simulation["native_materials"][particle["material_ID"]] + + # Particle properties + E = particle["E"] + + # Check for cutoff energy + if E <= ELECTRON_CUTOFF_ENERGY: + collision_data["energy_deposition"] += E * particle["w"] + particle["alive"] = False + particle["E"] = 0.0 + return + + # ================================================================================== + # Sample colliding element + # ================================================================================== + + SigmaT = macro_xs(ELECTRON_REACTION_TOTAL, particle_container, simulation, data) + + xi = rng.lcg(particle_container) * SigmaT + total = 0.0 + for i in range(material["N_element"]): + element_ID = int(mcdc_get.native_material.element_IDs(i, material, data)) + element = simulation["elements"][element_ID] + + element_density = mcdc_get.native_material.element_densities(i, material, data) + sigmaT = total_micro_xs(ELECTRON_REACTION_TOTAL, E, element, data) + + total += element_density * sigmaT + + if total > xi: + break + + # ================================================================================== + # Sample and perform reaction + # ================================================================================== + + sigma_ionization = total_micro_xs(ELECTRON_REACTION_IONIZATION, E, element, data) + sigma_elastic = total_micro_xs( + ELECTRON_REACTION_ELASTIC_SCATTERING, E, element, data + ) + sigma_bremsstrahlung = total_micro_xs( + ELECTRON_REACTION_BREMSSTRAHLUNG, E, element, data + ) + sigma_excitation = total_micro_xs(ELECTRON_REACTION_EXCITATION, E, element, data) + + xi = rng.lcg(particle_container) * sigmaT + total = 0.0 + + # Ionization + total += sigma_ionization + if xi < total: + total -= sigma_ionization + for i in range(element["N_electron_ionization_reaction"]): + reaction_ID = int( + mcdc_get.element.electron_ionization_reaction_IDs(i, element, data) + ) + reaction = simulation["electron_ionization_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = simulation["electron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, element, data) + + if xi < total: + ionization( + reaction, + particle_container, + collision_data_container, + element, + program, + data, + ) + return + + # Elastic scattering + total += sigma_elastic + if xi < total: + total -= sigma_elastic + for i in range(element["N_electron_elastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.element.electron_elastic_scattering_reaction_IDs( + i, element, data + ) + ) + reaction = simulation["electron_elastic_scattering_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = simulation["electron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, element, data) + + if xi < total: + elastic_scattering( + reaction, particle_container, element, simulation, data + ) + return + + # Bremsstrahlung + total += sigma_bremsstrahlung + if xi < total: + total -= sigma_bremsstrahlung + for i in range(element["N_electron_bremsstrahlung_reaction"]): + reaction_ID = int( + mcdc_get.element.electron_bremsstrahlung_reaction_IDs(i, element, data) + ) + reaction = simulation["electron_bremsstrahlung_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = simulation["electron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, element, data) + + if xi < total: + bremsstrahlung( + reaction, + particle_container, + collision_data_container, + simulation, + data, + ) + return + + # Excitation + total += sigma_excitation + if xi < total: + total -= sigma_excitation + for i in range(element["N_electron_excitation_reaction"]): + reaction_ID = int( + mcdc_get.element.electron_excitation_reaction_IDs(i, element, data) + ) + reaction = simulation["electron_excitation_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = simulation["electron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, element, data) + + if xi < total: + excitation( + reaction, + particle_container, + collision_data_container, + simulation, + data, + ) + return + + +# ====================================================================================== +# Elastic scattering +# ====================================================================================== + + +@njit +def elastic_scattering(reaction, particle_container, element, simulation, data): + particle = particle_container[0] + + # Current energy + E = particle["E"] + + # ------------------------------------------------------------------------- + # Total elastic xs + # ------------------------------------------------------------------------- + + reaction_base_ID = int(reaction["parent_ID"]) + reaction_base = simulation["electron_reactions"][reaction_base_ID] + xs_total = reaction_micro_xs(E, reaction_base, element, data) + + # If large-angle, xs from data table + xs_large = elastic_large_xs(E, reaction, simulation, data) + + # Important to check because of numerical issues + if xs_large < 0.0: + xs_large = 0.0 + if xs_large > xs_total: + xs_large = xs_total + + prob_large = xs_large / xs_total + mu_cut = float(reaction["mu_cut"]) + + xi = rng.lcg(particle_container) + + if xi < prob_large: + # --------------------------------------------------------------------- + # Large-angle elastic scattering + # --------------------------------------------------------------------- + + multi_table = simulation["multi_table_distributions"][reaction["mu_ID"]] + mu0 = sample_multi_table(E, particle_container, multi_table, data) + + else: + # --------------------------------------------------------------------- + # Small-angle elastic scattering (Coulomb tail sampling) + # --------------------------------------------------------------------- + + Z = int(element["atomic_number"]) + mu0 = sample_small_angle_mu_coulomb(E, Z, particle_container, mu_cut) + + # Update direction + azi = 2.0 * PI * rng.lcg(particle_container) + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu0, azi) + + particle["ux"] = ux_new + particle["uy"] = uy_new + particle["uz"] = uz_new + + +@njit +def compute_scattering_eta(E, Z): + pc = math.sqrt(E * (E + 2.0 * ELECTRON_MASS)) + beta = pc / (E + ELECTRON_MASS) + tau = E / ELECTRON_MASS + FINE_STRUCTURE_CONSTANT = 7.2973525693e-3 + + r = (FINE_STRUCTURE_CONSTANT * ELECTRON_MASS) / (0.885 * pc) + z_sq = float(Z) ** (2.0 / 3.0) + bracket = 1.13 + 3.76 * ((FINE_STRUCTURE_CONSTANT * float(Z)) / beta) ** 2 + rel = math.sqrt(tau / (tau + 1.0)) + + return 0.25 * (r * r) * z_sq * bracket * rel + + +@njit +def sample_small_angle_mu_coulomb(E, Z, rng_state, mu_cut): + eta = compute_scattering_eta(E, Z) + + x_cut = 1.0 - mu_cut + u = rng.lcg(rng_state) + + denom = (1.0 / eta) - (1.0 / (eta + x_cut)) + inv = (1.0 / eta) - u * denom + x = (1.0 / inv) - eta + + return 1.0 - x + + +@njit +def elastic_large_xs(E, reaction, simulation, data): + data_base = simulation["data"][int(reaction["xs_large_ID"])] + return evaluate_data(E, data_base, simulation, data) + + +# ====================================================================================== +# Excitation +# ====================================================================================== + + +@njit +def excitation( + reaction, particle_container, collision_data_container, simulation, data +): + particle = particle_container[0] + collision_data = collision_data_container[0] + + # Current energy + E = particle["E"] + + dE = evaluate_eloss(E, reaction, simulation, data) + + # Calculate outgoing energy + E_out = E - dE + + # Check for cutoff + if E_out <= ELECTRON_CUTOFF_ENERGY: + collision_data["energy_deposition"] += E * particle["w"] + particle["E"] = 0.0 + particle["alive"] = False + return + + # If above cutoff, just deposit dE + particle["E"] = E_out + collision_data["energy_deposition"] += dE * particle["w"] + + +@njit +def evaluate_eloss(E, reaction, simulation, data): + data_base = simulation["data"][int(reaction["eloss_ID"])] + return evaluate_data(E, data_base, simulation, data) + + +# ====================================================================================== +# Bremsstrahlung +# ====================================================================================== + + +@njit +def bremsstrahlung( + reaction, particle_container, collision_data_container, simulation, data +): + particle = particle_container[0] + collision_data = collision_data_container[0] + + # Current energy + E = particle["E"] + + dE = evaluate_eloss(E, reaction, simulation, data) + E_out = E - dE + + # Check for cutoff + if E_out <= ELECTRON_CUTOFF_ENERGY: + collision_data["energy_deposition"] += E_out * particle["w"] + particle["E"] = 0.0 + particle["alive"] = False + return + + # If above cutoff, allow photon to escape and update electron energy + particle["E"] = E_out + + +# ====================================================================================== +# Ionization +# ====================================================================================== + + +@njit +def ionization( + reaction, particle_container, collision_data_container, element, program, data +): + simulation = util.access_simulation(program) + particle = particle_container[0] + collision_data = collision_data_container[0] + + # Current energy + E = particle["E"] + + # Sample subshell + N = int(reaction["N_subshell"]) + xs_vals = np.empty(N, dtype=np.float64) + + total = 0.0 + for i in range(N): + xs_sub_ID = int( + mcdc_get.electron_ionization_reaction.subshell_x_IDs(i, reaction, data) + ) + xs_sub_table = simulation["data"][xs_sub_ID] + xs_sub_i = evaluate_data(E, xs_sub_table, simulation, data) + xs_vals[i] = xs_sub_i + total += xs_sub_i + + xi = rng.lcg(particle_container) * total + total_acc = 0.0 + chosen = 0 + for i in range(N): + total_acc += xs_vals[i] + if total_acc >= xi: + chosen = i + break + + # Binding energy + B = mcdc_get.element.electron_ionization_subshell_binding_energy( + chosen, element, data + ) + if E <= B: + collision_data["energy_deposition"] += E * particle["w"] + particle["alive"] = False + particle["E"] = 0.0 + return + + # Sample secondary energy + dist_ID = int( + mcdc_get.electron_ionization_reaction.subshell_product_IDs( + chosen, reaction, data + ) + ) + dist_base = simulation["distributions"][dist_ID] + T_delta = sample_distribution(E, dist_base, particle_container, simulation, data) + + # Primary outgoing energy + E_out = E - B - T_delta + particle["E"] = E_out + + collision_data["energy_deposition"] += B * particle["w"] + + primary_alive_after = True + if E_out <= ELECTRON_CUTOFF_ENERGY: + collision_data["energy_deposition"] += E_out * particle["w"] + particle["E"] = 0.0 + particle["alive"] = False + primary_alive_after = False + + if T_delta <= ELECTRON_CUTOFF_ENERGY: + collision_data["energy_deposition"] += T_delta * particle["w"] + return + + # Sample delta direction + ux_delta, uy_delta, uz_delta = sample_delta_direction( + T_delta, E, particle_container + ) + + # Momentum conservation if primary survives + if primary_alive_after: + p_before = math.sqrt(E * (E + 2.0 * ELECTRON_MASS)) + p_delta = math.sqrt(T_delta * (T_delta + 2.0 * ELECTRON_MASS)) + + ux_before = particle["ux"] + uy_before = particle["uy"] + uz_before = particle["uz"] + + # Momentum vectors after collision + px_after = p_before * ux_before - p_delta * ux_delta + py_after = p_before * uy_before - p_delta * uy_delta + pz_after = p_before * uz_before - p_delta * uz_delta + + # Normalize and set primary's new direction + norm_sq = px_after * px_after + py_after * py_after + pz_after * pz_after + if norm_sq > 0.0: + norm = math.sqrt(norm_sq) + particle["ux"] = px_after / norm + particle["uy"] = py_after / norm + particle["uz"] = pz_after / norm + + # Add secondary particle to bank + particle_container_new = np.zeros(1, type_.particle_data) + particle_new = particle_container_new[0] + particle_module.copy_as_child(particle_container_new, particle_container) + + particle_new["E"] = T_delta + particle_new["ux"] = ux_delta + particle_new["uy"] = uy_delta + particle_new["uz"] = uz_delta + particle_new["w"] = particle["w"] + + particle_bank_module.bank_active_particle(particle_container_new, program) + + +@njit +def compute_mu_delta(T_delta, T_prim): + pd = math.sqrt(T_delta * (T_delta + 2.0 * ELECTRON_MASS)) + pp = math.sqrt(T_prim * (T_prim + 2.0 * ELECTRON_MASS)) + mu = (T_delta * (T_prim + 2.0 * ELECTRON_MASS)) / (pd * pp) + + # Check in case of numerical issues + if mu < -1.0: + mu = -1.0 + if mu > 1.0: + mu = 1.0 + + return mu + + +@njit +def sample_delta_direction(T_delta, T_prim, particle_container): + particle = particle_container[0] + mu = compute_mu_delta(T_delta, T_prim) + azi = 2.0 * PI * rng.lcg(particle_container) + return scatter_direction(particle["ux"], particle["uy"], particle["uz"], mu, azi) diff --git a/mcdc/mcdc/transport/physics/interface.py b/mcdc/mcdc/transport/physics/interface.py new file mode 100644 index 000000000..ef3cef349 --- /dev/null +++ b/mcdc/mcdc/transport/physics/interface.py @@ -0,0 +1,86 @@ +import math + +from numba import njit + +### + +import mcdc.transport.rng as rng +import mcdc.transport.physics.electron as electron +import mcdc.transport.physics.neutron as neutron + +from mcdc.constant import * + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container, simulation, data): + particle = particle_container[0] + if particle["particle_type"] == PARTICLE_NEUTRON: + return neutron.particle_speed(particle_container, simulation, data) + elif particle["particle_type"] == PARTICLE_ELECTRON: + return electron.particle_speed(particle_container, simulation, data) + return -1.0 + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + particle = particle_container[0] + if particle["particle_type"] == PARTICLE_NEUTRON: + return neutron.macro_xs(reaction_type, particle_container, simulation, data) + elif particle["particle_type"] == PARTICLE_ELECTRON: + return electron.macro_xs(reaction_type, particle_container, simulation, data) + return -1.0 + + +@njit +def neutron_production_xs(reaction_type, particle_container, simulation, data): + particle = particle_container[0] + if particle["particle_type"] == PARTICLE_NEUTRON: + return neutron.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + return -1.0 + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision_distance(particle_container, simulation, data): + particle = particle_container[0] + + # Get total cross-section + SigmaT = 0.0 + if particle["particle_type"] == PARTICLE_NEUTRON: + SigmaT = macro_xs(NEUTRON_REACTION_TOTAL, particle_container, simulation, data) + elif particle["particle_type"] == PARTICLE_ELECTRON: + SigmaT = macro_xs(ELECTRON_REACTION_TOTAL, particle_container, simulation, data) + + # Vacuum material? + if SigmaT == 0.0: + return INF + + # Sample collision distance + xi = rng.lcg(particle_container) + distance = -math.log(xi) / SigmaT + return distance + + +@njit +def collision(particle_container, collision_data_container, program, data): + particle = particle_container[0] + + if particle["particle_type"] == PARTICLE_NEUTRON: + neutron.collision(particle_container, collision_data_container, program, data) + elif particle["particle_type"] == PARTICLE_ELECTRON: + electron.collision(particle_container, collision_data_container, program, data) diff --git a/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/interface-checkpoint.py b/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/interface-checkpoint.py new file mode 100644 index 000000000..29f78602f --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/interface-checkpoint.py @@ -0,0 +1,61 @@ +from numba import njit + +#### + +import mcdc.transport.physics.neutron.multigroup as multigroup +import mcdc.transport.physics.neutron.native as native +import mcdc.transport.util as util + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.particle_speed(particle_container, simulation, data) + else: + return native.particle_speed(particle_container) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.macro_xs(reaction_type, particle_container, simulation, data) + else: + return native.macro_xs(reaction_type, particle_container, simulation, data) + + +@njit +def neutron_production_xs(reaction_type, particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + else: + return native.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, collision_data_container, program, data): + simulation = util.access_simulation(program) + + if simulation["settings"]["neutron_multigroup_mode"]: + multigroup.collision( + particle_container, collision_data_container, program, data + ) + else: + native.collision(particle_container, collision_data_container, program, data) diff --git a/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/native-checkpoint.py b/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/native-checkpoint.py new file mode 100644 index 000000000..2c2b3f729 --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/.ipynb_checkpoints/native-checkpoint.py @@ -0,0 +1,763 @@ +import math +import numpy as np + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.rng as rng + +from mcdc.constant import ( + ANGLE_DISTRIBUTED, + ANGLE_ENERGY_CORRELATED, + ANGLE_ISOTROPIC, + BOLTZMANN_K, + THERMAL_THRESHOLD_FACTOR, + LIGHT_SPEED, + NEUTRON_MASS, + PI, + PI_HALF, + PI_SQRT, + NEUTRON_REACTION_INELASTIC_SCATTERING, + NEUTRON_REACTION_TOTAL, + NEUTRON_REACTION_CAPTURE, + NEUTRON_REACTION_ELASTIC_SCATTERING, + NEUTRON_REACTION_FISSION, + REFERENCE_FRAME_COM, +) +from mcdc.transport.data import evaluate_data +from mcdc.transport.distribution import ( + sample_correlated_distribution, + sample_distribution, + sample_isotropic_cosine, + sample_isotropic_direction, + sample_multi_table, +) +from mcdc.transport.physics.util import ( + evaluate_neutron_xs_energy_grid, + scatter_direction, +) +from mcdc.transport.util import find_bin, linear_interpolation + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container): + particle = particle_container[0] + E = particle["E"] + mass = NEUTRON_MASS + return LIGHT_SPEED * math.sqrt(E * (E + 2.0 * mass)) / (E + mass) + + +@njit +def particle_energy_from_speed(speed): + beta = speed / LIGHT_SPEED + gamma = 1.0 / math.sqrt(1.0 - beta * beta) + mass = NEUTRON_MASS + return mass * (gamma - 1.0) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, mcdc, data): + particle = particle_container[0] + material = mcdc["native_materials"][particle["material_ID"]] + E = particle["E"] + + total = 0.0 + + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) + xs = total_micro_xs(reaction_type, E, nuclide, data) + + total += nuclide_density * xs + + return total + + +@njit +def total_micro_xs(reaction_type, E, nuclide, data): + idx, E0, E1 = evaluate_neutron_xs_energy_grid(E, nuclide, data) + if reaction_type == NEUTRON_REACTION_TOTAL: + xs0 = mcdc_get.nuclide.neutron_total_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_total_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + xs0 = mcdc_get.nuclide.neutron_elastic_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_elastic_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_CAPTURE: + xs0 = mcdc_get.nuclide.neutron_capture_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_capture_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_INELASTIC_SCATTERING: + xs0 = mcdc_get.nuclide.neutron_inelastic_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_inelastic_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_FISSION: + xs0 = mcdc_get.nuclide.neutron_fission_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_fission_xs(idx + 1, nuclide, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +@njit +def reaction_micro_xs(E, reaction_base, nuclide, data): + idx, E0, E1 = evaluate_neutron_xs_energy_grid(E, nuclide, data) + + # Apply offset + offset = reaction_base["xs_offset_"] + if idx < offset: + return 0.0 + else: + idx -= offset + + xs0 = mcdc_get.neutron_reaction.xs(idx, reaction_base, data) + xs1 = mcdc_get.neutron_reaction.xs(idx + 1, reaction_base, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +@njit +def neutron_production_xs(reaction_type, particle_container, mcdc, data): + particle = particle_container[0] + material_base = mcdc["materials"][particle["material_ID"]] + material = mcdc["native_materials"][material_base["child_ID"]] + + if reaction_type == NEUTRON_REACTION_TOTAL: + elastic_type = NEUTRON_REACTION_ELASTIC_SCATTERING + inelastic_type = NEUTRON_REACTION_INELASTIC_SCATTERING + fission_type = NEUTRON_REACTION_FISSION + elastic_xs = neutron_production_xs(elastic_type, particle_container, mcdc, data) + inelastic_xs = neutron_production_xs( + inelastic_type, particle_container, mcdc, data + ) + fission_xs = neutron_production_xs(fission_type, particle_container, mcdc, data) + return elastic_xs + inelastic_xs + fission_xs + + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + return macro_xs(reaction_type, particle_container, mcdc, data) + + elif reaction_type == NEUTRON_REACTION_CAPTURE: + return 0.0 + + elif reaction_type == NEUTRON_REACTION_INELASTIC_SCATTERING: + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities( + i, material, data + ) + + for j in range(nuclide["N_neutron_inelastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( + j, nuclide, data + ) + ) + reaction_base = mcdc["neutron_reactions"][reaction_ID] + reaction = mcdc["neutron_inelastic_scattering_reactions"][ + reaction_base["child_ID"] + ] + + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu = reaction["multiplicity"] + total += nuclide_density * nu * xs + + return total + + elif reaction_type == NEUTRON_REACTION_FISSION: + if not material_base["fissionable"]: + return 0.0 + + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + if not nuclide["fissionable"]: + continue + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities( + i, material, data + ) + + for j in range(nuclide["N_neutron_fission_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_fission_reaction_IDs(j, nuclide, data) + ) + reaction_base = mcdc["neutron_reactions"][reaction_ID] + reaction = mcdc["neutron_fission_reactions"][reaction_base["child_ID"]] + + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu_p = neutron_fission_prompt_multiplicity(E, nuclide, mcdc, data) + nu_d = neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data) + nu = nu_d + nu_p + total += nuclide_density * nu * xs + + return total + + else: + return -1.0 + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, mcdc, data): + particle = particle_container[0] + material = mcdc["native_materials"][particle["material_ID"]] + + # Particle properties + E = particle["E"] + + # ================================================================================== + # Sample colliding nuclide + # ================================================================================== + + SigmaT = macro_xs(NEUTRON_REACTION_TOTAL, particle_container, mcdc, data) + + # Implicit capture + if mcdc["implicit_capture"]["active"]: + SigmaC = macro_xs(NEUTRON_REACTION_CAPTURE, particle_container, mcdc, data) + particle["w"] *= (SigmaT - SigmaC) / SigmaT + SigmaT -= SigmaC + + xi = rng.lcg(particle_container) * SigmaT + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) + sigmaT = total_micro_xs(NEUTRON_REACTION_TOTAL, E, nuclide, data) + + if mcdc["implicit_capture"]["active"]: + sigmaC = total_micro_xs(NEUTRON_REACTION_CAPTURE, E, nuclide, data) + particle["w"] *= (sigmaT - sigmaC) / sigmaT + sigmaT -= sigmaC + + SigmaT_nuclide = nuclide_density * sigmaT + total += SigmaT_nuclide + + if total > xi: + break + + # ================================================================================== + # Sample and perform reaction + # ================================================================================== + + sigma_elastic = total_micro_xs( + NEUTRON_REACTION_ELASTIC_SCATTERING, E, nuclide, data + ) + sigma_inelastic = total_micro_xs( + NEUTRON_REACTION_INELASTIC_SCATTERING, E, nuclide, data + ) + sigma_fission = total_micro_xs(NEUTRON_REACTION_FISSION, E, nuclide, data) + + xi = rng.lcg(particle_container) * sigmaT + + # Elastic scattering + total = sigma_elastic + if xi < total: + total -= sigma_elastic + for i in range(nuclide["N_neutron_elastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_elastic_scattering_reaction_IDs( + i, nuclide, data + ) + ) + reaction = mcdc["neutron_elastic_scattering_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, nuclide, data) + if xi < total: + elastic_scattering(reaction, particle_container, nuclide, mcdc, data) + return + + # Capture + if not mcdc["implicit_capture"]["active"]: + sigma_capture = total_micro_xs(NEUTRON_REACTION_CAPTURE, E, nuclide, data) + total += sigma_capture + if xi < total: + particle["alive"] = False + return + + # Inelastic scattering + total += sigma_inelastic + if xi < total: + total -= sigma_inelastic + + for i in range(nuclide["N_neutron_inelastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( + i, nuclide, data + ) + ) + reaction = mcdc["neutron_inelastic_scattering_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + total += xs + if xi < total: + inelastic_scattering(reaction, particle_container, nuclide, mcdc, data) + return + + # Fission (arive here only if nuclide is fissionable) + total += sigma_fission + if xi < total: + total -= sigma_fission + for i in range(nuclide["N_neutron_fission_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_fission_reaction_IDs(i, nuclide, data) + ) + reaction = mcdc["neutron_fission_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, nuclide, data) + if xi < total: + fission(reaction, particle_container, nuclide, mcdc, data) + return + + +# ====================================================================================== +# Elastic scattering +# ====================================================================================== + + +@njit +def elastic_scattering(reaction, particle_container, nuclide, mcdc, data): + # Particle attributes + particle = particle_container[0] + E = particle["E"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Sample nucleus thermal velocity + A = nuclide["atomic_weight_ratio"] + temperature = nuclide["temperature"] + if E > THERMAL_THRESHOLD_FACTOR * BOLTZMANN_K * temperature: + Vx = 0.0 + Vy = 0.0 + Vz = 0.0 + else: + Vx, Vy, Vz = sample_nucleus_velocity(A, particle_container) + + # ========================================================================= + # COM kinematics + # ========================================================================= + + # Particle speed + speed = particle_speed(particle_container) + + # Neutron velocity - LAB + vx = speed * ux + vy = speed * uy + vz = speed * uz + + # COM velocity + COM_x = (vx + A * Vx) / (1.0 + A) + COM_y = (vy + A * Vy) / (1.0 + A) + COM_z = (vz + A * Vz) / (1.0 + A) + + # Neutron velocity - COM + vx = vx - COM_x + vy = vy - COM_y + vz = vz - COM_z + + # Neutron speed - COM + speed = math.sqrt(vx * vx + vy * vy + vz * vz) + + # Neutron initial direction - COM + ux = vx / speed + uy = vy / speed + uz = vz / speed + + # Sample the scattering cosine from the multi-PDF distribution + multi_table = mcdc["multi_table_distributions"][reaction["mu_table_ID"]] + mu0 = sample_multi_table(E, particle_container, multi_table, data) + + # Scatter the direction in COM + azi = 2.0 * PI * rng.lcg(particle_container) + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu0, azi) + + # Neutron final velocity - COM + vx = speed * ux_new + vy = speed * uy_new + vz = speed * uz_new + + # ========================================================================= + # COM to LAB + # ========================================================================= + + # Final velocity - LAB + vx = vx + COM_x + vy = vy + COM_y + vz = vz + COM_z + + # Final energy - LAB + speed = math.sqrt(vx * vx + vy * vy + vz * vz) + particle["E"] = particle_energy_from_speed(speed) + + # Final direction - LAB + particle["ux"] = vx / speed + particle["uy"] = vy / speed + particle["uz"] = vz / speed + + +@njit +def sample_nucleus_velocity(A, particle_container): + particle = particle_container[0] + + # Particle speed + speed = particle_speed(particle_container) + + # Maxwellian parameter + beta = math.sqrt(2.0659834e-11 * A) + # The constant above is + # (1.674927471e-27 kg) / (1.38064852e-19 cm^2 kg s^-2 K^-1) / (293.6 K)/2 + + # Sample nuclide speed candidate V_tilda and + # nuclide-neutron polar cosine candidate mu_tilda via + # rejection sampling + y = beta * speed + while True: + if rng.lcg(particle_container) < 2.0 / (2.0 + PI_SQRT * y): + x = math.sqrt( + -math.log(rng.lcg(particle_container) * rng.lcg(particle_container)) + ) + else: + cos_val = math.cos(PI_HALF * rng.lcg(particle_container)) + x = math.sqrt( + -math.log(rng.lcg(particle_container)) + - math.log(rng.lcg(particle_container)) * cos_val * cos_val + ) + V_tilda = x / beta + mu_tilda = 2.0 * rng.lcg(particle_container) - 1.0 + + # Accept candidate V_tilda and mu_tilda? + if rng.lcg(particle_container) > math.sqrt( + speed * speed + V_tilda * V_tilda - 2.0 * speed * V_tilda * mu_tilda + ) / (speed + V_tilda): + break + + # Set nuclide velocity - LAB + azi = 2.0 * PI * rng.lcg(particle_container) + ux, uy, uz = scatter_direction( + particle["ux"], particle["uy"], particle["uz"], mu_tilda, azi + ) + Vx = ux * V_tilda + Vy = uy * V_tilda + Vz = uz * V_tilda + + return Vx, Vy, Vz + + +# ====================================================================================== +# Inelastic scattering +# ====================================================================================== + + +@njit +def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): + # Particle attributes + particle = particle_container[0] + E = particle["E"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Kill the current particle + particle["alive"] = False + + # Number of secondaries and spectra + N = reaction["multiplicity"] + N_spectrum = reaction["N_spectrum"] + use_all_spectrum = N == N_spectrum + + # Set up secondary partice container + particle_container_new = np.zeros(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Create the secondaries + for n in range(N): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # ============================================================================== + # Sample angle (if not energy-correlated) + # ============================================================================== + + angle_type = reaction["angle_type"] + if angle_type == ANGLE_ENERGY_CORRELATED: + pass + elif angle_type == ANGLE_ISOTROPIC: + mu = sample_isotropic_cosine(particle_container_new) + elif angle_type == ANGLE_DISTRIBUTED: + distribution_base = mcdc["distributions"][reaction["mu_ID"]] + multi_table = mcdc["multi_table_distributions"][ + distribution_base["child_ID"] + ] + mu = sample_multi_table(E, particle_container_new, multi_table, data) + + # ============================================================================== + # Sample energy (also angle if correlated) + # ============================================================================== + + # Get energy spectrum + if use_all_spectrum: + ID = int( + mcdc_get.neutron_inelastic_scattering_reaction.energy_spectrum_IDs( + n, reaction, data + ) + ) + spectrum_base = mcdc["distributions"][ID] + else: + probability_grid = mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability_grid_all( + reaction, data + ) + probability_idx = find_bin(E, probability_grid) + xi = rng.lcg(particle_container_new) + total = 0.0 + for j in range(N_spectrum): + probability = ( + mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability( + probability_idx, j, reaction, data + ) + ) + total += probability + if xi < total: + ID = int( + mcdc_get.neutron_inelastic_scattering_reaction.energy_spectrum_IDs( + j, reaction, data + ) + ) + spectrum_base = mcdc["distributions"][ID] + break + + # Sample energy + if not angle_type == ANGLE_ENERGY_CORRELATED: + E_new = sample_distribution( + E, spectrum_base, particle_container_new, mcdc, data, scale=True + ) + else: + E_new, mu = sample_correlated_distribution( + E, spectrum_base, particle_container_new, mcdc, data, scale=True + ) + + # ============================================================================== + # Frame transformation + # ============================================================================== + + reaction_base = mcdc["neutron_reactions"][int(reaction["parent_ID"])] + reference_frame = reaction_base["reference_frame"] + if reference_frame == REFERENCE_FRAME_COM: + A = nuclide["atomic_weight_ratio"] + mu_COM = mu + E_COM = E_new + + E_new = ( + E_COM + (E + 2 * mu_COM * (A + 1) * math.sqrt(E * E_COM)) / (A + 1) ** 2 + ) + mu = mu_COM * math.sqrt(E_COM / E_new) + math.sqrt(E / E_new) / (A + 1) + + azi = 2.0 * PI * rng.lcg(particle_container_new) + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu, azi) + + # Now the secondary angle and energy are finalized + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + particle_new["E"] = E_new + + # ============================================================================== + # Bank the new particle + # ============================================================================== + + # Keep it if it is the last particle + if n == N - 1: + particle["alive"] = True + particle["ux"] = particle_new["ux"] + particle["uy"] = particle_new["uy"] + particle["uz"] = particle_new["uz"] + particle["E"] = particle_new["E"] + else: + particle_bank_module.bank_active_particle(particle_container_new, mcdc) + + +# ====================================================================================== +# Fission +# ====================================================================================== + + +@njit +def fission(reaction, particle_container, nuclide, mcdc, data): + settings = mcdc["settings"] + # Particle properties + particle = particle_container[0] + E = particle["E"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + Z = nuclide["atomic_number"] + A = nuclide["atomic_weight_ratio"] + # Kill the current particle + particle["alive"] = False + + # Adjust production and product weights if weighted emission + weight_production = 1.0 + weight_product = particle["w"] + if mcdc["weighted_emission"]["active"]: + weight_target = mcdc["weighted_emission"]["weight_target"] + weight_production = particle["w"] / weight_target + weight_product = weight_target + + # Fission yields + N_delayed = nuclide["N_neutron_fission_delayed_precursor"] + #nu_p = neutron_fission_prompt_multiplicity(E, nuclide, mcdc, data) + nu_d = neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data) + ZAID = 1000*Z+A + + cgmfwrap.run_event(ZAID,E) + + nu = nu_d + nu_p + # Get number of secondaries + N = int( + math.floor(weight_production * nu / mcdc["k_eff"] + rng.lcg(particle_container)) + ) + + # Set up secondary partice container + particle_container_new = np.zeros(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Create the secondaries + for n in range(N): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # Set weight + particle_new["w"] = weight_product + + # Prompt or delayed? + prompt = True + delayed_group = -1 + xi = rng.lcg(particle_container_new) + total = nu_p + if xi > total: + prompt = False + # Determine delayed group + for j in range(N_delayed): + fraction = mcdc_get.nuclide.neutron_fission_delayed_fractions( + j, nuclide, data + ) + total += fraction + if xi < total: + delayed_group = j + break + + # ============================================================================== + # Sample prompt neutron + # ============================================================================== + + if prompt: + E_new = neutron_energies[n] + ux_new = neutron_dir_cosu[n] + uy_new = neutorn_dir_cosv[n] + uz_new = neutron_dir_cosw[n] + # Now the secondary angle and energy are finalized + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + particle_new["E"] = E_new + + # ============================================================================== + # Sample delayed fission neutron + # ============================================================================== + + else: + # Sample isotropic angle + ux_new, uy_new, uz_new = sample_isotropic_direction(particle_container_new) + + # Sample emission time + decay_rate = mcdc_get.nuclide.neutron_fission_delayed_fractions( + delayed_group, nuclide, data + ) + if not prompt: + xi = rng.lcg(particle_container_new) + particle_new["t"] -= math.log(xi) / decay_rate + + # Eigenvalue mode: bank right away + if settings["eigenvalue_mode"]: + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + continue + # Below is only relevant for fixed-source problem + + # Skip if it's beyond time boundary + if particle_new["t"] > settings["time_boundary"]: + continue + + # Check if it hits current or next census times + hit_current_census = False + hit_future_census = False + idx_census = mcdc["idx_census"] + if settings["N_census"] > 1: + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census, settings, data + ): + hit_current_census = True + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census + 1, settings, data + ): + hit_future_census = True + + # Not hitting census --> add to active bank + if not hit_current_census: + # Keep it if it is the last particle + if n == N - 1: + particle["alive"] = True + particle["ux"] = particle_new["ux"] + particle["uy"] = particle_new["uy"] + particle["uz"] = particle_new["uz"] + particle["t"] = particle_new["t"] + particle["g"] = particle_new["g"] + particle["E"] = particle_new["E"] + particle["w"] = particle_new["w"] + else: + particle_bank_module.bank_active_particle(particle_container_new, mcdc) + + # Hit future census --> add to future bank + elif hit_future_census: + # Particle will participate in the future + particle_bank_module.bank_future_particle(particle_container_new, mcdc) + + # Hit current census --> add to census bank + else: + # Particle will participate after the current census is completed + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + + +@njit +def neutron_fission_prompt_multiplicity(E, nuclide, mcdc, data): + data_base = mcdc["data"][nuclide["neutron_fission_prompt_multiplicity_ID"]] + return evaluate_data(E, data_base, mcdc, data) + + +@njit +def neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data): + data_base = mcdc["data"][nuclide["neutron_fission_delayed_multiplicity_ID"]] + return evaluate_data(E, data_base, mcdc, data) diff --git a/mcdc/mcdc/transport/physics/neutron/__init__.py b/mcdc/mcdc/transport/physics/neutron/__init__.py new file mode 100644 index 000000000..d7357c337 --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/__init__.py @@ -0,0 +1,8 @@ +from .interface import ( + particle_speed, + macro_xs, + neutron_production_xs, + collision, +) +import mcdc.transport.physics.neutron.native as native +import mcdc.transport.physics.neutron.multigroup as multigroup diff --git a/mcdc/mcdc/transport/physics/neutron/interface.py b/mcdc/mcdc/transport/physics/neutron/interface.py new file mode 100644 index 000000000..c2c8febe4 --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/interface.py @@ -0,0 +1,63 @@ +from numba import njit + +#### + +import mcdc.transport.physics.neutron.multigroup as multigroup +import mcdc.transport.physics.neutron.native as native +import mcdc.transport.util as util + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.particle_speed(particle_container, simulation, data) + else: + return native.particle_speed(particle_container) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.macro_xs(reaction_type, particle_container, simulation, data) + else: + return native.macro_xs(reaction_type, particle_container, simulation, data) + + +@njit +def neutron_production_xs(reaction_type, particle_container, simulation, data): + if simulation["settings"]["neutron_multigroup_mode"]: + return multigroup.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + else: + return native.neutron_production_xs( + reaction_type, particle_container, simulation, data + ) + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, collision_data_container, program, data): + simulation = util.access_simulation(program) + + if simulation["settings"]["neutron_multigroup_mode"]: + multigroup.collision( + particle_container, collision_data_container, program, data + ) + else: + native.collision(particle_container, program, data) + # is this a bug?? + #native.collision(particle_container, collision_data_container, program, data) diff --git a/mcdc/mcdc/transport/physics/neutron/multigroup.py b/mcdc/mcdc/transport/physics/neutron/multigroup.py new file mode 100644 index 000000000..d5b6c0a52 --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/multigroup.py @@ -0,0 +1,385 @@ +import numpy as np +import math + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.rng as rng +import mcdc.transport.util as util + +from mcdc.constant import ( + PI, + NEUTRON_REACTION_TOTAL, + NEUTRON_REACTION_CAPTURE, + NEUTRON_REACTION_ELASTIC_SCATTERING, + NEUTRON_REACTION_FISSION, + NEUTRON_REACTION_FISSION_DELAYED, + NEUTRON_REACTION_FISSION_PROMPT, +) +from mcdc.transport.physics.util import scatter_direction +from mcdc.transport.distribution import sample_isotropic_direction + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container, simulation, data): + particle = particle_container[0] + material = simulation["multigroup_materials"][particle["material_ID"]] + return mcdc_get.multigroup_material.mgxs_speed(particle["g"], material, data) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, simulation, data): + particle = particle_container[0] + material = simulation["multigroup_materials"][particle["material_ID"]] + g = particle["g"] + + if reaction_type == NEUTRON_REACTION_TOTAL: + return mcdc_get.multigroup_material.mgxs_total(g, material, data) + elif reaction_type == NEUTRON_REACTION_CAPTURE: + return mcdc_get.multigroup_material.mgxs_capture(g, material, data) + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + return mcdc_get.multigroup_material.mgxs_scatter(g, material, data) + elif reaction_type == NEUTRON_REACTION_FISSION: + return mcdc_get.multigroup_material.mgxs_fission(g, material, data) + return 0.0 + + +@njit +def neutron_production_xs(reaction_type, particle_container, simulation, data): + particle = particle_container[0] + material = simulation["multigroup_materials"][particle["material_ID"]] + g = particle["g"] + + # Total production + if reaction_type == NEUTRON_REACTION_TOTAL: + total = 0.0 + + # Scattering production + nu = mcdc_get.multigroup_material.mgxs_nu_s(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_scatter(g, material, data) + total += nu * xs + + # Fission production + nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) + total += nu * xs + return total + + # Capture production (none) + elif reaction_type == NEUTRON_REACTION_CAPTURE: + return 0.0 + + # Scattering production + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + nu = mcdc_get.multigroup_material.mgxs_nu_s(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_scatter(g, material, data) + return nu * xs + + # Fission production + elif reaction_type == NEUTRON_REACTION_FISSION: + nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) + return nu * xs + + # Prompt fission production + elif reaction_type == NEUTRON_REACTION_FISSION_PROMPT: + nu = mcdc_get.multigroup_material.mgxs_nu_p(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) + return nu * xs + + # Delayed neutron production + elif reaction_type == NEUTRON_REACTION_FISSION_DELAYED: + nu = mcdc_get.multigroup_material.mgxs_nu_d_total(g, material, data) + xs = mcdc_get.multigroup_material.mgxs_fission(g, material, data) + return nu * xs + + # Unsupported default + return 0.0 + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, collision_data_container, program, data): + simulation = util.access_simulation(program) + particle = particle_container[0] + + # Get the reaction cross-sections + SigmaT = macro_xs(NEUTRON_REACTION_TOTAL, particle_container, simulation, data) + SigmaS = macro_xs( + NEUTRON_REACTION_ELASTIC_SCATTERING, particle_container, simulation, data + ) + SigmaC = macro_xs(NEUTRON_REACTION_CAPTURE, particle_container, simulation, data) + SigmaF = macro_xs(NEUTRON_REACTION_FISSION, particle_container, simulation, data) + + # Implicit capture + if simulation["implicit_capture"]["active"]: + particle["w"] *= (SigmaT - SigmaC) / SigmaT + SigmaT -= SigmaC + + # Sample reaction type and perform the reaction + xi = rng.lcg(particle_container) * SigmaT + total = SigmaS + if total > xi: + scattering(particle_container, program, data) + else: + total += SigmaF + if total > xi: + fission(particle_container, program, data) + else: + particle["alive"] = False + + +# ====================================================================================== +# Reactions +# ====================================================================================== + + +@njit +def scattering(particle_container, program, data): + simulation = util.access_simulation(program) + + # Particle attributes + particle = particle_container[0] + g = particle["g"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Material attributes + material = simulation["multigroup_materials"][particle["material_ID"]] + G = material["G"] + + # Kill the current particle + particle["alive"] = False + + # Adjust production and product weights if weighted emission + weight_production = 1.0 + weight_product = particle["w"] + if simulation["weighted_emission"]["active"]: + weight_target = simulation["weighted_emission"]["weight_target"] + weight_production = particle["w"] / weight_target + weight_product = weight_target + + # Get number of secondaries + nu_s = mcdc_get.multigroup_material.mgxs_nu_s(g, material, data) + N = int(math.floor(weight_production * nu_s + rng.lcg(particle_container))) + + # Set up secondary partice container + particle_container_new = util.local_array(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Create the secondaries + for n in range(N): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # Set weight + particle_new["w"] = weight_product + + # Sample scattering angle + mu0 = 2.0 * rng.lcg(particle_container_new) - 1.0 + + # Scatter direction + azi = 2.0 * PI * rng.lcg(particle_container_new) + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu0, azi) + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + + # Get outgoing spectrum + stride = material["G"] + start = material["mgxs_chi_s_offset"] + g * stride + chi_s = data[start : start + stride] + # Above is equivalent to: chi_s = mcdc_get.multigroup_material.mgxs_chi_s_vector(g, material, data) + + # Sample outgoing energy + xi = rng.lcg(particle_container_new) + total = 0.0 + for g_out in range(G): + total += chi_s[g_out] + if total > xi: + break + particle_new["g"] = g_out + + # Bank, but keep it if it is the last particle + if n == N - 1: + particle["alive"] = True + particle["ux"] = particle_new["ux"] + particle["uy"] = particle_new["uy"] + particle["uz"] = particle_new["uz"] + particle["g"] = particle_new["g"] + particle["E"] = particle_new["E"] + particle["w"] = particle_new["w"] + else: + particle_bank_module.bank_active_particle(particle_container_new, program) + + +@njit +def fission(particle_container, program, data): + simulation = util.access_simulation(program) + settings = simulation["settings"] + + # Particle properties + particle = particle_container[0] + g = particle["g"] + + # Material properties + material = simulation["multigroup_materials"][particle["material_ID"]] + G = material["G"] + J = material["J"] + + # Kill the current particle + particle["alive"] = False + + # Adjust production and product weights if weighted emission + weight_production = 1.0 + weight_product = particle["w"] + if simulation["weighted_emission"]["active"]: + weight_target = simulation["weighted_emission"]["weight_target"] + weight_production = particle["w"] / weight_target + weight_product = weight_target + + # Fission yields + nu = mcdc_get.multigroup_material.mgxs_nu_f(g, material, data) + nu_p = mcdc_get.multigroup_material.mgxs_nu_p(g, material, data) + if J > 0: + stride = material["J"] + start = material["mgxs_nu_d_offset"] + g * stride + nu_d = data[start : start + stride] + # Above is equivalent to: nu_d = mcdc_get.multigroup_material.mgxs_nu_d_vector(g, material, data) + + # Get number of secondaries + N = int( + math.floor( + weight_production * nu / simulation["k_eff"] + rng.lcg(particle_container) + ) + ) + + # Set up secondary partice container + particle_container_new = util.local_array(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Create the secondaries + for n in range(N): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # Set weight + particle_new["w"] = weight_product + + # Sample isotropic direction + ux_new, uy_new, uz_new = sample_isotropic_direction(particle_container_new) + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + + # Prompt or delayed? + xi = rng.lcg(particle_container_new) * nu + total = nu_p + if xi < total: + prompt = True + stride = material["G"] + start = material["mgxs_chi_p_offset"] + g * stride + spectrum = data[start : start + stride] + # Above is equivalent to: spectrum = mcdc_get.multigroup_material.mgxs_chi_p_vector(g, material, data) + else: + prompt = False + + # Determine delayed group, decay constant, and spectrum + for j in range(J): + total += nu_d[j] + if xi < total: + stride = material["G"] + start = material["mgxs_chi_d_offset"] + j * stride + spectrum = data[start : start + stride] + # Above is equivalent to: + # spectrum = mcdc_get.multigroup_material.mgxs_chi_d_vector( + # j, material, data + # ) + decay = mcdc_get.multigroup_material.mgxs_decay_rate( + j, material, data + ) + break + + # Sample outgoing energy + xi = rng.lcg(particle_container_new) + tot = 0.0 + for g_out in range(G): + tot += spectrum[g_out] + if tot > xi: + break + particle_new["g"] = g_out + + # Sample emission time + if not prompt: + xi = rng.lcg(particle_container_new) + particle_new["t"] -= math.log(xi) / decay + + # Eigenvalue mode: bank right away + if settings["neutron_eigenvalue_mode"]: + particle_bank_module.bank_census_particle(particle_container_new, program) + continue + # Below is only relevant for fixed-source problem + + # Skip if it's beyond time boundary + if particle_new["t"] > settings["time_boundary"]: + continue + + # Check if it hits current or next census times + hit_current_census = False + hit_future_census = False + idx_census = simulation["idx_census"] + if settings["N_census"] > 1: + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census, settings, data + ): + hit_current_census = True + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census + 1, settings, data + ): + hit_future_census = True + + # Not hitting census --> add to active bank + if not hit_current_census: + # Keep it if it is the last particle + if n == N - 1: + particle["alive"] = True + particle["ux"] = particle_new["ux"] + particle["uy"] = particle_new["uy"] + particle["uz"] = particle_new["uz"] + particle["t"] = particle_new["t"] + particle["g"] = particle_new["g"] + particle["E"] = particle_new["E"] + particle["w"] = particle_new["w"] + else: + particle_bank_module.bank_active_particle( + particle_container_new, program + ) + + # Hit future census --> add to future bank + elif hit_future_census: + # Particle will participate in the future + particle_bank_module.bank_future_particle(particle_container_new, program) + + # Hit current census --> add to census bank + else: + # Particle will participate after the current census is completed + particle_bank_module.bank_census_particle(particle_container_new, program) diff --git a/mcdc/mcdc/transport/physics/neutron/native.py b/mcdc/mcdc/transport/physics/neutron/native.py new file mode 100644 index 000000000..6c40c0e36 --- /dev/null +++ b/mcdc/mcdc/transport/physics/neutron/native.py @@ -0,0 +1,833 @@ +import math +import numpy as np +from numba import njit + + + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.rng as rng +import mcdc.transport.util as util + +from mcdc.constant import ( + ANGLE_DISTRIBUTED, + ANGLE_ENERGY_CORRELATED, + ANGLE_ISOTROPIC, + BOLTZMANN_K, + THERMAL_THRESHOLD_FACTOR, + LIGHT_SPEED, + NEUTRON_MASS, + PI, + PI_HALF, + PI_SQRT, + NEUTRON_REACTION_INELASTIC_SCATTERING, + NEUTRON_REACTION_TOTAL, + NEUTRON_REACTION_CAPTURE, + NEUTRON_REACTION_ELASTIC_SCATTERING, + NEUTRON_REACTION_FISSION, + REFERENCE_FRAME_COM, +) +from mcdc.transport.data import evaluate_data +from mcdc.transport.distribution import ( + sample_correlated_distribution, + sample_distribution, + sample_isotropic_cosine, + sample_isotropic_direction, + sample_multi_table, +) +from mcdc.transport.physics.util import ( + evaluate_neutron_xs_energy_grid, + scatter_direction, +) +from mcdc.transport.util import find_bin, linear_interpolation + +#==== +# Add cgmfwrap.so module. Set CGMF environment variable to the location of cgmfwrap.so +#==== +import sys +import os + +sys.path.append('/nfs/stak/users/agnellj/project/wrapper/cgmfwrap/build') +import cgmfwrap + +# ====================================================================================== +# Particle attributes +# ====================================================================================== + + +@njit +def particle_speed(particle_container): + particle = particle_container[0] + E = particle["E"] + mass = NEUTRON_MASS + return LIGHT_SPEED * math.sqrt(E * (E + 2.0 * mass)) / (E + mass) + + +@njit +def particle_energy_from_speed(speed): + beta = speed / LIGHT_SPEED + gamma = 1.0 / math.sqrt(1.0 - beta * beta) + mass = NEUTRON_MASS + return mass * (gamma - 1.0) + + +# ====================================================================================== +# Material properties +# ====================================================================================== + + +@njit +def macro_xs(reaction_type, particle_container, mcdc, data): + particle = particle_container[0] + material = mcdc["native_materials"][particle["material_ID"]] + E = particle["E"] + + total = 0.0 + + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) + xs = total_micro_xs(reaction_type, E, nuclide, data) + + total += nuclide_density * xs + + return total + + +@njit +def total_micro_xs(reaction_type, E, nuclide, data): + idx, E0, E1 = evaluate_neutron_xs_energy_grid(E, nuclide, data) + if reaction_type == NEUTRON_REACTION_TOTAL: + xs0 = mcdc_get.nuclide.neutron_total_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_total_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + xs0 = mcdc_get.nuclide.neutron_elastic_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_elastic_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_CAPTURE: + xs0 = mcdc_get.nuclide.neutron_capture_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_capture_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_INELASTIC_SCATTERING: + xs0 = mcdc_get.nuclide.neutron_inelastic_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_inelastic_xs(idx + 1, nuclide, data) + elif reaction_type == NEUTRON_REACTION_FISSION: + xs0 = mcdc_get.nuclide.neutron_fission_xs(idx, nuclide, data) + xs1 = mcdc_get.nuclide.neutron_fission_xs(idx + 1, nuclide, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +@njit +def reaction_micro_xs(E, reaction_base, nuclide, data): + idx, E0, E1 = evaluate_neutron_xs_energy_grid(E, nuclide, data) + + # Apply offset + offset = reaction_base["xs_offset_"] + if idx < offset: + return 0.0 + else: + idx -= offset + + xs0 = mcdc_get.neutron_reaction.xs(idx, reaction_base, data) + xs1 = mcdc_get.neutron_reaction.xs(idx + 1, reaction_base, data) + return linear_interpolation(E, E0, E1, xs0, xs1) + + +@njit +def neutron_production_xs(reaction_type, particle_container, mcdc, data): + particle = particle_container[0] + material_base = mcdc["materials"][particle["material_ID"]] + material = mcdc["native_materials"][material_base["child_ID"]] + + if reaction_type == NEUTRON_REACTION_TOTAL: + elastic_type = NEUTRON_REACTION_ELASTIC_SCATTERING + inelastic_type = NEUTRON_REACTION_INELASTIC_SCATTERING + fission_type = NEUTRON_REACTION_FISSION + elastic_xs = neutron_production_xs(elastic_type, particle_container, mcdc, data) + inelastic_xs = neutron_production_xs( + inelastic_type, particle_container, mcdc, data + ) + fission_xs = neutron_production_xs(fission_type, particle_container, mcdc, data) + return elastic_xs + inelastic_xs + fission_xs + + elif reaction_type == NEUTRON_REACTION_ELASTIC_SCATTERING: + return macro_xs(reaction_type, particle_container, mcdc, data) + + elif reaction_type == NEUTRON_REACTION_CAPTURE: + return 0.0 + + elif reaction_type == NEUTRON_REACTION_INELASTIC_SCATTERING: + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities( + i, material, data + ) + + for j in range(nuclide["N_neutron_inelastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( + j, nuclide, data + ) + ) + reaction_base = mcdc["neutron_reactions"][reaction_ID] + reaction = mcdc["neutron_inelastic_scattering_reactions"][ + reaction_base["child_ID"] + ] + + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu = reaction["multiplicity"] + total += nuclide_density * nu * xs + + return total + + elif reaction_type == NEUTRON_REACTION_FISSION: + if not material_base["fissionable"]: + return 0.0 + + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + if not nuclide["fissionable"]: + continue + + E = particle["E"] + nuclide_density = mcdc_get.native_material.nuclide_densities( + i, material, data + ) + + for j in range(nuclide["N_neutron_fission_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_fission_reaction_IDs(j, nuclide, data) + ) + reaction_base = mcdc["neutron_reactions"][reaction_ID] + reaction = mcdc["neutron_fission_reactions"][reaction_base["child_ID"]] + + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + nu_p = neutron_fission_prompt_multiplicity(E, nuclide, mcdc, data) + nu_d = neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data) + nu = nu_d + nu_p + total += nuclide_density * nu * xs + + return total + + else: + return -1.0 + + +# ====================================================================================== +# Collision +# ====================================================================================== + + +@njit +def collision(particle_container, mcdc, data): + particle = particle_container[0] + material = mcdc["native_materials"][particle["material_ID"]] + + # Particle properties + E = particle["E"] + + # ================================================================================== + # Sample colliding nuclide + # ================================================================================== + + SigmaT = macro_xs(NEUTRON_REACTION_TOTAL, particle_container, mcdc, data) + + # Implicit capture + if mcdc["implicit_capture"]["active"]: + SigmaC = macro_xs(NEUTRON_REACTION_CAPTURE, particle_container, mcdc, data) + particle["w"] *= (SigmaT - SigmaC) / SigmaT + SigmaT -= SigmaC + + xi = rng.lcg(particle_container) * SigmaT + total = 0.0 + for i in range(material["N_nuclide"]): + nuclide_ID = int(mcdc_get.native_material.nuclide_IDs(i, material, data)) + nuclide = mcdc["nuclides"][nuclide_ID] + + nuclide_density = mcdc_get.native_material.nuclide_densities(i, material, data) + sigmaT = total_micro_xs(NEUTRON_REACTION_TOTAL, E, nuclide, data) + + if mcdc["implicit_capture"]["active"]: + sigmaC = total_micro_xs(NEUTRON_REACTION_CAPTURE, E, nuclide, data) + particle["w"] *= (sigmaT - sigmaC) / sigmaT + sigmaT -= sigmaC + + SigmaT_nuclide = nuclide_density * sigmaT + total += SigmaT_nuclide + + if total > xi: + break + + # ================================================================================== + # Sample and perform reaction + # ================================================================================== + + sigma_elastic = total_micro_xs( + NEUTRON_REACTION_ELASTIC_SCATTERING, E, nuclide, data + ) + sigma_inelastic = total_micro_xs( + NEUTRON_REACTION_INELASTIC_SCATTERING, E, nuclide, data + ) + sigma_fission = total_micro_xs(NEUTRON_REACTION_FISSION, E, nuclide, data) + + xi = rng.lcg(particle_container) * sigmaT + + # Elastic scattering + total = sigma_elastic + if xi < total: + total -= sigma_elastic + for i in range(nuclide["N_neutron_elastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_elastic_scattering_reaction_IDs( + i, nuclide, data + ) + ) + reaction = mcdc["neutron_elastic_scattering_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, nuclide, data) + if xi < total: + elastic_scattering(reaction, particle_container, nuclide, mcdc, data) + return + + # Capture + if not mcdc["implicit_capture"]["active"]: + sigma_capture = total_micro_xs(NEUTRON_REACTION_CAPTURE, E, nuclide, data) + total += sigma_capture + if xi < total: + particle["alive"] = False + return + + # Inelastic scattering + total += sigma_inelastic + if xi < total: + total -= sigma_inelastic + + for i in range(nuclide["N_neutron_inelastic_scattering_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_inelastic_scattering_reaction_IDs( + i, nuclide, data + ) + ) + reaction = mcdc["neutron_inelastic_scattering_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + xs = reaction_micro_xs(E, reaction_base, nuclide, data) + total += xs + if xi < total: + inelastic_scattering(reaction, particle_container, nuclide, mcdc, data) + return + + # Fission (arive here only if nuclide is fissionable) + total += sigma_fission + if xi < total: + total -= sigma_fission + for i in range(nuclide["N_neutron_fission_reaction"]): + reaction_ID = int( + mcdc_get.nuclide.neutron_fission_reaction_IDs(i, nuclide, data) + ) + reaction = mcdc["neutron_fission_reactions"][reaction_ID] + reaction_base_ID = reaction["parent_ID"] + reaction_base = mcdc["neutron_reactions"][reaction_base_ID] + total += reaction_micro_xs(E, reaction_base, nuclide, data) + if xi < total: + fission(reaction, particle_container, nuclide, mcdc, data) + return + + +# ====================================================================================== +# Elastic scattering +# ====================================================================================== + + +@njit +def elastic_scattering(reaction, particle_container, nuclide, mcdc, data): + # Particle attributes + particle = particle_container[0] + E = particle["E"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Sample nucleus thermal velocity + A = nuclide["atomic_weight_ratio"] + temperature = nuclide["temperature"] + if E > THERMAL_THRESHOLD_FACTOR * BOLTZMANN_K * temperature: + Vx = 0.0 + Vy = 0.0 + Vz = 0.0 + else: + Vx, Vy, Vz = sample_nucleus_velocity(A, particle_container) + + # ========================================================================= + # COM kinematics + # ========================================================================= + + # Particle speed + speed = particle_speed(particle_container) + + # Neutron velocity - LAB + vx = speed * ux + vy = speed * uy + vz = speed * uz + + # COM velocity + COM_x = (vx + A * Vx) / (1.0 + A) + COM_y = (vy + A * Vy) / (1.0 + A) + COM_z = (vz + A * Vz) / (1.0 + A) + + # Neutron velocity - COM + vx = vx - COM_x + vy = vy - COM_y + vz = vz - COM_z + + # Neutron speed - COM + speed = math.sqrt(vx * vx + vy * vy + vz * vz) + + # Neutron initial direction - COM + ux = vx / speed + uy = vy / speed + uz = vz / speed + + # Sample the scattering cosine from the multi-PDF distribution + multi_table = mcdc["multi_table_distributions"][reaction["mu_table_ID"]] + mu0 = sample_multi_table(E, particle_container, multi_table, data) + + # Scatter the direction in COM + azi = 2.0 * PI * rng.lcg(particle_container) + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu0, azi) + + # Neutron final velocity - COM + vx = speed * ux_new + vy = speed * uy_new + vz = speed * uz_new + + # ========================================================================= + # COM to LAB + # ========================================================================= + + # Final velocity - LAB + vx = vx + COM_x + vy = vy + COM_y + vz = vz + COM_z + + # Final energy - LAB + speed = math.sqrt(vx * vx + vy * vy + vz * vz) + particle["E"] = particle_energy_from_speed(speed) + + # Final direction - LAB + particle["ux"] = vx / speed + particle["uy"] = vy / speed + particle["uz"] = vz / speed + + +@njit +def sample_nucleus_velocity(A, particle_container): + particle = particle_container[0] + + # Particle speed + speed = particle_speed(particle_container) + + # Maxwellian parameter + beta = math.sqrt(2.0659834e-11 * A) + # The constant above is + # (1.674927471e-27 kg) / (1.38064852e-19 cm^2 kg s^-2 K^-1) / (293.6 K)/2 + + # Sample nuclide speed candidate V_tilda and + # nuclide-neutron polar cosine candidate mu_tilda via + # rejection sampling + y = beta * speed + while True: + if rng.lcg(particle_container) < 2.0 / (2.0 + PI_SQRT * y): + x = math.sqrt( + -math.log(rng.lcg(particle_container) * rng.lcg(particle_container)) + ) + else: + cos_val = math.cos(PI_HALF * rng.lcg(particle_container)) + x = math.sqrt( + -math.log(rng.lcg(particle_container)) + - math.log(rng.lcg(particle_container)) * cos_val * cos_val + ) + V_tilda = x / beta + mu_tilda = 2.0 * rng.lcg(particle_container) - 1.0 + + # Accept candidate V_tilda and mu_tilda? + if rng.lcg(particle_container) > math.sqrt( + speed * speed + V_tilda * V_tilda - 2.0 * speed * V_tilda * mu_tilda + ) / (speed + V_tilda): + break + + # Set nuclide velocity - LAB + azi = 2.0 * PI * rng.lcg(particle_container) + ux, uy, uz = scatter_direction( + particle["ux"], particle["uy"], particle["uz"], mu_tilda, azi + ) + Vx = ux * V_tilda + Vy = uy * V_tilda + Vz = uz * V_tilda + + return Vx, Vy, Vz + + +# ====================================================================================== +# Inelastic scattering +# ====================================================================================== + + +@njit +def inelastic_scattering(reaction, particle_container, nuclide, mcdc, data): + # Particle attributes + particle = particle_container[0] + E = particle["E"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Kill the current particle + particle["alive"] = False + + # Number of secondaries and spectra + N = reaction["multiplicity"] + N_spectrum = reaction["N_spectrum"] + use_all_spectrum = N == N_spectrum + + # Set up secondary partice container + particle_container_new = np.zeros(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Create the secondaries + for n in range(N): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # ============================================================================== + # Sample angle (if not energy-correlated) + # ============================================================================== + + angle_type = reaction["angle_type"] + if angle_type == ANGLE_ENERGY_CORRELATED: + pass + elif angle_type == ANGLE_ISOTROPIC: + mu = sample_isotropic_cosine(particle_container_new) + elif angle_type == ANGLE_DISTRIBUTED: + distribution_base = mcdc["distributions"][reaction["mu_ID"]] + multi_table = mcdc["multi_table_distributions"][ + distribution_base["child_ID"] + ] + mu = sample_multi_table(E, particle_container_new, multi_table, data) + + # ============================================================================== + # Sample energy (also angle if correlated) + # ============================================================================== + + # Get energy spectrum + if use_all_spectrum: + ID = int( + mcdc_get.neutron_inelastic_scattering_reaction.energy_spectrum_IDs( + n, reaction, data + ) + ) + spectrum_base = mcdc["distributions"][ID] + else: + probability_grid = mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability_grid_all( + reaction, data + ) + probability_idx = find_bin(E, probability_grid) + xi = rng.lcg(particle_container_new) + total = 0.0 + for j in range(N_spectrum): + probability = ( + mcdc_get.neutron_inelastic_scattering_reaction.spectrum_probability( + probability_idx, j, reaction, data + ) + ) + total += probability + if xi < total: + ID = int( + mcdc_get.neutron_inelastic_scattering_reaction.energy_spectrum_IDs( + j, reaction, data + ) + ) + spectrum_base = mcdc["distributions"][ID] + break + + # Sample energy + if not angle_type == ANGLE_ENERGY_CORRELATED: + E_new = sample_distribution( + E, spectrum_base, particle_container_new, mcdc, data + ) + # E_new = sample_distribution( + # E, spectrum_base, particle_container_new, mcdc, data, scale=True + # ) + else: + E_new, mu = sample_correlated_distribution( + E, spectrum_base, particle_container_new, mcdc, data + ) + #E_new, mu = sample_correlated_distribution( + # E, spectrum_base, particle_container_new, mcdc, data, scale=True + #) + + # ============================================================================== + # Frame transformation + # ============================================================================== + + reaction_base = mcdc["neutron_reactions"][int(reaction["parent_ID"])] + reference_frame = reaction_base["reference_frame"] + if reference_frame == REFERENCE_FRAME_COM: + A = nuclide["atomic_weight_ratio"] + mu_COM = mu + E_COM = E_new + + E_new = ( + E_COM + (E + 2 * mu_COM * (A + 1) * math.sqrt(E * E_COM)) / (A + 1) ** 2 + ) + mu = mu_COM * math.sqrt(E_COM / E_new) + math.sqrt(E / E_new) / (A + 1) + + azi = 2.0 * PI * rng.lcg(particle_container_new) + ux_new, uy_new, uz_new = scatter_direction(ux, uy, uz, mu, azi) + + # Now the secondary angle and energy are finalized + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + particle_new["E"] = E_new + + # ============================================================================== + # Bank the new particle + # ============================================================================== + + # Keep it if it is the last particle + if n == N - 1: + particle["alive"] = True + particle["ux"] = particle_new["ux"] + particle["uy"] = particle_new["uy"] + particle["uz"] = particle_new["uz"] + particle["E"] = particle_new["E"] + else: + particle_bank_module.bank_active_particle(particle_container_new, mcdc) + + +# ====================================================================================== +# Fission +# ====================================================================================== + + +@njit +def fission(reaction, particle_container, nuclide, mcdc, data): + settings = mcdc["settings"] + # Particle properties + particle = particle_container[0] + E = particle["E"] + #print('Energy',E) + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + Z = nuclide["atomic_number"] + #A = nuclide["atomic_weight_ratio"] + A = 235 + ZAID = int(1000*Z+A) + # Kill the current particle + particle["alive"] = False + simulation = util.access_simulation(mcdc) + # Adjust production and product weights if weighted emission + weight_production = 1.0 + weight_product = particle["w"] + if mcdc["weighted_emission"]["active"]: + weight_target = mcdc["weighted_emission"]["weight_target"] + weight_production = particle["w"] / weight_target + weight_product = weight_target + + # Fission yields + + N_delayed = nuclide["N_neutron_fission_delayed_precursor"] + evt = cgmfwrap.run_event(ZAID,E*1e-6) # produces nu_n + + nu_d = neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data) + nu_p = int(evt.nu_n) + + nu = nu_d + nu_p + + + # Set up secondary particle container + particle_container_new = np.zeros(1, type_.particle_data) + particle_new = particle_container_new[0] + + # Deal with Prompt and Delayed separately. CGMF takes care of all prompt business. + + + #print(particle_bank_module.get_bank_size(bank_active)) + + for i in range(nu_p): + # uses cmgf sampled energies and directions, set weight + particle_module.copy_as_child(particle_container_new, particle_container) # <-- contains time + particle_new["w"] = weight_product + + E_new = evt.neutron_energies[i]*1e6 + ux_new = evt.neutron_dir_cosu[i] + uy_new = evt.neutron_dir_cosv[i] + uz_new = evt.neutron_dir_cosw[i] + + particle_new["ux"] = ux_new + particle_new["uy"] = uy_new + particle_new["uz"] = uz_new + particle_new["E"] = E_new + + if settings["neutron_eigenvalue_mode"]: + + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + + continue + + # Below is only relevant for fixed-source problem + + # Skip if it's beyond time boundary + if particle_new["t"] > settings["time_boundary"]: + continue + + # Check if it hits current or next census times + hit_current_census = False + hit_future_census = False + idx_census = mcdc["idx_census"] + if settings["N_census"] > 1: + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census, settings, data + ): + hit_current_census = True + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census + 1, settings, data + ): + hit_future_census = True + + # Not hitting census --> add to active bank + if not hit_current_census: + particle_bank_module.bank_active_particle(particle_container_new, mcdc) + #s += 1 # debug + # Hit future census --> add to future bank + elif hit_future_census: + # Particle will participate in the future + particle_bank_module.bank_future_particle(particle_container_new, mcdc) + + # Hit current census --> add to census bank + else: + # Particle will participate after the current census is completed + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + + #print('prompt',particle_bank_module.get_bank_size(simulation["bank_active"])) + +# ============================================================================== + # Sample delayed fission neutron, separate from prompt neutrons. + # ============================================================================== + + #xi = rng.lcg(particle_container) + #N_delayed = 1 if xi < nu_d else 0 # ask about this !! + N_delayed = np.random.poisson(nu_d) + N_groups = nuclide["N_neutron_fission_delayed_precursor"] # number of precursor groups + + + # compute total delayed neutron fraction + beta_total = 0.0 + for j in range(N_groups): + beta_total += mcdc_get.nuclide.neutron_fission_delayed_fractions( + j, nuclide, data + ) + + #print('nu_d',nu_d) + #print('nu_p',nu_p) + for n in range(N_delayed): + # Set default attributes + particle_module.copy_as_child(particle_container_new, particle_container) + + # Set weight + particle_new["w"] = weight_product + + # assign group + xi = rng.lcg(particle_container_new) + + P_j = 0.0 + + for j in range(N_groups): + beta_j = mcdc_get.nuclide.neutron_fission_delayed_fractions( + j, nuclide, data + ) + + P_j += beta_j / beta_total + + if xi < P_j: + delayed_group = j + break + + + # Sample isotropic angle + ux_new, uy_new, uz_new = sample_isotropic_direction(particle_container_new) + + # Sample emission time + decay_rate = mcdc_get.nuclide.neutron_fission_delayed_fractions( + delayed_group, nuclide, data + ) + + xi = rng.lcg(particle_container_new) + particle_new["t"] -= math.log(xi) / decay_rate + + # Eigenvalue mode: bank right away + if settings["neutron_eigenvalue_mode"]: + + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + continue + # Below is only relevant for fixed-source problem + + # Skip if it's beyond time boundary + if particle_new["t"] > settings["time_boundary"]: + continue + + # Check if it hits current or next census times + hit_current_census = False + hit_future_census = False + idx_census = mcdc["idx_census"] + if settings["N_census"] > 1: + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census, settings, data + ): + hit_current_census = True + if particle_new["t"] > mcdc_get.settings.census_time( + idx_census + 1, settings, data + ): + hit_future_census = True + + # Not hitting census --> add to active bank + if not hit_current_census: + particle_bank_module.bank_active_particle(particle_container_new, mcdc) + + # Hit future census --> add to future bank + elif hit_future_census: + # Particle will participate in the future + particle_bank_module.bank_future_particle(particle_container_new, mcdc) + + # Hit current census --> add to census bank + else: + # Particle will participate after the current census is completed + particle_bank_module.bank_census_particle(particle_container_new, mcdc) + + + #print('delayed',particle_bank_module.get_bank_size(simulation["bank_active"])) + + +@njit +def neutron_fission_prompt_multiplicity(E, nuclide, mcdc, data): + data_base = mcdc["data"][nuclide["neutron_fission_prompt_multiplicity_ID"]] + return evaluate_data(E, data_base, mcdc, data) + + +@njit +def neutron_fission_delayed_multiplicity(E, nuclide, mcdc, data): + data_base = mcdc["data"][nuclide["neutron_fission_delayed_multiplicity_ID"]] + return evaluate_data(E, data_base, mcdc, data) diff --git a/mcdc/mcdc/transport/physics/util.py b/mcdc/mcdc/transport/physics/util.py new file mode 100644 index 000000000..3475a1510 --- /dev/null +++ b/mcdc/mcdc/transport/physics/util.py @@ -0,0 +1,57 @@ +import math + +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get + +from mcdc.transport.util import find_bin + + +@njit +def evaluate_neutron_xs_energy_grid(e, nuclide, data): + offset = nuclide["neutron_xs_energy_grid_offset"] + length = nuclide["neutron_xs_energy_grid_length"] + energy_grid = data[offset : offset + length] + # Above is equivalent to: energy_grid = mcdc_get.nuclide.neutron_xs_energy_grid_all(nuclide, data) + + idx = find_bin(e, energy_grid) + e0 = energy_grid[idx] + e1 = energy_grid[idx + 1] + return idx, e0, e1 + + +@njit +def evaluate_electron_xs_energy_grid(e, element, data): + energy_grid = mcdc_get.element.electron_xs_energy_grid_all(element, data) + idx = find_bin(e, energy_grid) + e0 = energy_grid[idx] + e1 = energy_grid[idx + 1] + return idx, e0, e1 + + +@njit +def scatter_direction(ux, uy, uz, mu0, azi): + cos_azi = math.cos(azi) + sin_azi = math.sin(azi) + Ac = (1.0 - mu0**2) ** 0.5 + + if uz != 1.0: + B = (1.0 - uz**2) ** 0.5 + C = Ac / B + + ux_new = ux * mu0 + (ux * uz * cos_azi - uy * sin_azi) * C + uy_new = uy * mu0 + (uy * uz * cos_azi + ux * sin_azi) * C + uz_new = uz * mu0 - cos_azi * Ac * B + + # If dir = 0i + 0j + k, interchange z and y in the scattering formula + else: + B = (1.0 - uy**2) ** 0.5 + C = Ac / B + + ux_new = ux * mu0 + (ux * uy * cos_azi - uz * sin_azi) * C + uz_new = uz * mu0 + (uz * uy * cos_azi + ux * sin_azi) * C + uy_new = uy * mu0 - cos_azi * Ac * B + + return ux_new, uy_new, uz_new diff --git a/mcdc/mcdc/transport/rng.py b/mcdc/mcdc/transport/rng.py new file mode 100644 index 000000000..4a8415e1e --- /dev/null +++ b/mcdc/mcdc/transport/rng.py @@ -0,0 +1,86 @@ +import numba as nb +import numpy as np + +from numba import uint64, njit + +# ====================================================================================== +# Random number generator +# LCG with hash seed-split +# ====================================================================================== + +# LCG parameters +RNG_G = nb.uint64(2806196910506780709) +RNG_C = nb.uint64(1) +RNG_MOD_MASK = nb.uint64(0x7FFFFFFFFFFFFFFF) +RNG_MOD = nb.uint64(0x8000000000000000) + +# Splitter seeds +SEED_SPLIT_CENSUS = nb.uint64(0x43454D654E54) +SEED_SPLIT_SOURCE = nb.uint64(0x43616D696C6C65) +SEED_SPLIT_SOURCE_PRECURSOR = nb.uint64(0x546F6464) +SEED_SPLIT_PARTICLE = nb.uint64(0) +SEED_SPLIT_UQ = nb.uint64(0x5368656261) + + +@njit +def wrapping_mul(a, b): + return a * b + + +@njit +def wrapping_add(a, b): + return a + b + + +def wrapping_mul_python(a, b): + a = uint64(a) + b = uint64(b) + with np.errstate(all="ignore"): + return a * b + + +def wrapping_add_python(a, b): + a = uint64(a) + b = uint64(b) + with np.errstate(all="ignore"): + return a + b + + +@njit +def split_seed(key, seed): + """ + murmur_hash64a + + If called from non-jitted function, may need to recast the argument key with numba.uint64 + """ + multiplier = uint64(0xC6A4A7935BD1E995) + length = uint64(8) + rotator = uint64(47) + key = uint64(key) + seed = uint64(seed) + + hash_value = uint64(seed) ^ wrapping_mul(length, multiplier) + + key = wrapping_mul(key, multiplier) + key ^= key >> rotator + key = wrapping_mul(key, multiplier) + hash_value ^= key + hash_value = wrapping_mul(hash_value, multiplier) + + hash_value ^= hash_value >> rotator + hash_value = wrapping_mul(hash_value, multiplier) + hash_value ^= hash_value >> rotator + return hash_value + + +@njit +def lcg_(seed): + seed = uint64(seed) + return wrapping_add(wrapping_mul(RNG_G, seed), RNG_C) & RNG_MOD_MASK + + +@njit +def lcg(state_container): + state = state_container[0] + state["rng_seed"] = lcg_(state["rng_seed"]) + return state["rng_seed"] / RNG_MOD diff --git a/mcdc/mcdc/transport/simulation.py b/mcdc/mcdc/transport/simulation.py new file mode 100644 index 000000000..6f2e3f06f --- /dev/null +++ b/mcdc/mcdc/transport/simulation.py @@ -0,0 +1,474 @@ +import numpy as np + +from numba import njit, objmode, uint64 + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.numba_types as type_ +import mcdc.output as output_module +import mcdc.transport.geometry as geometry +import mcdc.transport.mpi as mpi +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.physics as physics +import mcdc.transport.rng as rng +import mcdc.transport.tally as tally_module +import mcdc.transport.technique as technique +import mcdc.transport.util as util + +from mcdc.constant import * +from mcdc.print_ import ( + print_header_batch, + print_progress, + print_progress_eigenvalue, +) +from mcdc.transport.source import source_particle + +# ====================================================================================== +# Main simulations +# ====================================================================================== + + +def fixed_source_simulation(simulation_container, data): + # Ensure `mcdc` exist for the lifetime of the program by intentionally leaking their memory + # adapt.leak(simulation_container) + simulation = simulation_container[0] + + # Get some settings + settings = simulation["settings"] + N_batch = settings["N_batch"] + N_particle = settings["N_particle"] + N_census = settings["N_census"] + use_census_based_tally = settings["use_census_based_tally"] + + # Loop over batches + for idx_batch in range(N_batch): + simulation["idx_batch"] = idx_batch + seed_batch = rng.split_seed(uint64(idx_batch), settings["rng_seed"]) + + # Distribute work + mpi.distribute_work(N_particle, simulation) + + # Print multi-batch header + if N_batch > 1: + with objmode(): + print_header_batch(idx_batch, N_batch) + + # Loop over time censuses + for idx_census in range(N_census): + simulation["idx_census"] = idx_census + seed_census = rng.split_seed(uint64(seed_batch), rng.SEED_SPLIT_CENSUS) + + # Reset tally time filters if census-based tally is used + if use_census_based_tally: + tally_module.filter.set_census_based_time_grid(simulation, data) + + # Accordingly promote future particles to censused particles + if particle_bank_module.get_bank_size(simulation["bank_future"]) > 0: + particle_bank_module.promote_future_particles(simulation, data) + + # Loop over source particles + seed_source = rng.split_seed(uint64(seed_census), rng.SEED_SPLIT_SOURCE) + source_loop(uint64(seed_source), simulation, data) + + # Manage particle banks: population control and work rebalance + particle_bank_module.manage_particle_banks(simulation) + + # Time census-based tally closeout + if use_census_based_tally: + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) + if simulation["mpi_master"]: + with objmode(): + output_module.generate_census_based_tally(simulation, data) + tally_module.closeout.reset_sum_bins(simulation, data) + + # Terminate census loop if all banks are empty + if ( + idx_census > 0 + and particle_bank_module.total_size(simulation["bank_source"]) == 0 + and particle_bank_module.total_size(simulation["bank_census"]) == 0 + and particle_bank_module.total_size(simulation["bank_future"]) == 0 + ): + break + + # Multi-batch closeout + if N_batch > 1: + # Reset banks + particle_bank_module.set_bank_size(simulation["bank_active"], 0) + particle_bank_module.set_bank_size(simulation["bank_census"], 0) + particle_bank_module.set_bank_size(simulation["bank_source"], 0) + particle_bank_module.set_bank_size(simulation["bank_future"], 0) + + if not use_census_based_tally: + # Tally history closeout + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) + + # Tally closeout + if not use_census_based_tally: + tally_module.closeout.finalize(simulation, data) + + +def eigenvalue_simulation(simulation_container, data): + # Ensure `mcdc` exist for the lifetime of the program + # by intentionally leaking their memory + # adapt.leak(simulation_container) + simulation = simulation_container[0] + + # Get some settings + settings = simulation["settings"] + N_inactive = settings["N_inactive"] + N_cycle = settings["N_cycle"] + N_particle = settings["N_particle"] + + # Distribute work + mpi.distribute_work(N_particle, simulation) + + # Loop over power iteration cycles + for idx_cycle in range(N_cycle): + simulation["idx_cycle"] = idx_cycle + seed_cycle = rng.split_seed(uint64(idx_cycle), settings["rng_seed"]) + + # Loop over source particles + source_loop(uint64(seed_cycle), simulation, data) + + # Tally "history" closeout + tally_module.closeout.eigenvalue_cycle(simulation, data) + if simulation["cycle_active"]: + tally_module.closeout.reduce(simulation, data) + tally_module.closeout.accumulate(simulation, data) + + # Manage particle banks: population control and work rebalance + particle_bank_module.manage_particle_banks(simulation) + + # Print progress + with objmode(): + print_progress_eigenvalue(simulation, data) + + # Entering active cycle? + simulation["idx_cycle"] += 1 + if simulation["idx_cycle"] >= N_inactive: + simulation["cycle_active"] = True + + # Tally closeout + tally_module.closeout.finalize(simulation, data) + tally_module.closeout.eigenvalue_simulation(simulation) + + +# ============================================================================= +# Source loop +# ============================================================================= + + +@njit +def source_loop(seed, simulation, data): + # Progress bar indicator + N_prog = 0 + + # Loop over particle sources + work_start = simulation["mpi_work_start"] + work_size = simulation["mpi_work_size"] + + for idx_work in range(work_size): + simulation["idx_work"] = work_start + idx_work + generate_source_particle(work_start, idx_work, seed, simulation, data) + + # Run the source particle and its secondaries + exhaust_active_bank(simulation, data) + + source_closeout(simulation, idx_work, N_prog, data) + + +@njit +def generate_source_particle(work_start, idx_work, seed, program, data): + """Get a source particle and put into one of the banks""" + simulation = util.access_simulation(program) + settings = simulation["settings"] + + # Get from fixed-source? + if particle_bank_module.get_bank_size(simulation["bank_source"]) == 0: + particle_container = util.local_array(1, type_.particle_data) + particle = particle_container[0] + + # Sample source + seed_work = rng.split_seed(work_start + idx_work, seed) + source_particle(particle_container, seed_work, simulation, data) + + # Get from source bank + else: + particle_container = simulation["bank_source"]["particle_data"][ + idx_work : (idx_work + 1) + ] + particle = particle_container[0] + + # Skip if beyond time boundary + if particle["t"] > settings["time_boundary"]: + return + + # Check if it is beyond current or next census times + hit_census = False + hit_next_census = False + idx_census = simulation["idx_census"] + + if idx_census < settings["N_census"] - 1: + if particle["t"] > mcdc_get.settings.census_time( + idx_census + 1, settings, data + ): + hit_census = True + hit_next_census = True + elif particle["t"] > mcdc_get.settings.census_time(idx_census, settings, data): + hit_census = True + + # Put into the right bank + if not hit_census: + particle_bank_module.bank_active_particle(particle_container, program) + elif not hit_next_census: + # Particle will participate after the current census + particle_bank_module.bank_census_particle(particle_container, program) + else: + # Particle will participate in the future + particle_bank_module.bank_future_particle(particle_container, program) + + +@njit +def exhaust_active_bank(simulation, data): + particle_container = util.local_array(1, type_.particle) + particle = particle_container[0] + + # Loop until active bank is exhausted + while particle_bank_module.get_bank_size(simulation["bank_active"]) > 0: + # Get particle from active bank + particle_bank_module.pop_particle(particle_container, simulation["bank_active"]) + + # Particle loop + particle_loop(particle_container, simulation, data) + + +@njit +def source_closeout(simulation, idx_work, N_prog, data): + # Tally history closeout for one-batch fixed-source simulation + if ( + not simulation["settings"]["neutron_eigenvalue_mode"] + and simulation["settings"]["N_batch"] == 1 + ): + if not simulation["settings"]["use_census_based_tally"]: + tally_module.closeout.accumulate(simulation, data) + + # Progress printout + percent = (idx_work + 1.0) / simulation["mpi_work_size"] + if simulation["settings"]["use_progress_bar"] and int(percent * 100.0) > N_prog: + N_prog += 1 + with objmode(): + print_progress(percent, simulation) + + +# ====================================================================================== +# Particle loop +# ====================================================================================== + + +@njit +def particle_loop(particle_container, simulation, data): + particle = particle_container[0] + + while particle["alive"]: + step_particle(particle_container, simulation, data) + + +@njit +def step_particle(particle_container, program, data): + simulation = util.access_simulation(program) + particle = particle_container[0] + + # Determine and move to event + move_to_event(particle_container, simulation, data) + + # Execute events + if particle["event"] == EVENT_LOST: + return + + # Collision + if particle["event"] & EVENT_COLLISION: + collision_data_container = np.zeros(1, type_.collision_data) + + # Execute the physics + physics.collision(particle_container, collision_data_container, program, data) + + # Score collision tallies + if simulation["cycle_active"]: + # Cell tallies + cell = simulation["cells"][particle["cell_ID"]] + for i in range(cell["N_tally"]): + tally_base_ID = int(mcdc_get.cell.tally_IDs(i, cell, data)) + tally_base = simulation["tallies"][tally_base_ID] + + # Skip non-collision tallies + if tally_base["child_type"] != TALLY_COLLISION: + continue + + tally = simulation["collision_tallies"][tally_base["child_ID"]] + tally_module.score.collision_tally( + particle_container, + collision_data_container, + tally, + simulation, + data, + ) + + # Other collision tallies + for i in range(simulation["N_collision_tally"]): + tally = simulation["collision_tallies"][i] + + # Skip cell tallies + if tally["spatial_filter_type"] == SPATIAL_FILTER_CELL: + continue + + tally_module.score.collision_tally( + particle_container, + collision_data_container, + tally, + simulation, + data, + ) + + # Surface and domain crossing + if particle["event"] & EVENT_SURFACE_CROSSING: + geometry.surface_crossing(particle_container, simulation, data) + + # Census time crossing + if particle["event"] & EVENT_TIME_CENSUS: + particle_bank_module.bank_census_particle(particle_container, program) + particle["alive"] = False + + # Time boundary crossing + if particle["event"] & EVENT_TIME_BOUNDARY: + particle["alive"] = False + + # Weight roulette + if particle["alive"]: + technique.weight_roulette(particle_container, simulation) + + +@njit +def move_to_event(particle_container, simulation, data): + settings = simulation["settings"] + + # ================================================================================== + # Preparation (as needed) + # ================================================================================== + particle = particle_container[0] + + # Multigroup preparation + # In MG mode, particle speed is material-dependent. + if settings["neutron_multigroup_mode"]: + # If material is not identified yet, locate the particle + if particle["material_ID"] == -1: + if not geometry.locate_particle(particle_container, simulation, data): + # Particle is lost + particle["event"] = EVENT_LOST + return + + # ================================================================================== + # Geometry inspection + # ================================================================================== + # - Set particle top cell and material IDs (if not lost) + # - Set surface ID (if surface hit) + # - Set particle boundary event (surface or lattice crossing, or lost) + # - Return distance to boundary (surface or lattice) + + d_boundary = geometry.inspect_geometry(particle_container, simulation, data) + + # Particle is lost? + if particle["event"] == EVENT_LOST: + return + + # ================================================================================== + # Get distances to other events + # ================================================================================== + + # Distance to domain + speed = physics.particle_speed(particle_container, simulation, data) + + # Distance to time boundary + d_time_boundary = speed * (settings["time_boundary"] - particle["t"]) + + # Distance to census time + idx = simulation["idx_census"] + d_time_census = speed * ( + mcdc_get.settings.census_time(idx, settings, data) - particle["t"] + ) + + # Distance to next collision + d_collision = physics.collision_distance(particle_container, simulation, data) + + # ================================================================================== + # Determine event(s) + # ================================================================================== + # TODO: Make a function to better maintain the repeating operation + + distance = d_boundary + + # Check distance to collision + if d_collision < distance - COINCIDENCE_TOLERANCE: + distance = d_collision + particle["event"] = EVENT_COLLISION + particle["surface_ID"] = -1 + elif geometry.check_coincidence(d_collision, distance): + particle["event"] += EVENT_COLLISION + + # Check distance to time census + if d_time_census < distance - COINCIDENCE_TOLERANCE: + distance = d_time_census + particle["event"] = EVENT_TIME_CENSUS + particle["surface_ID"] = -1 + elif geometry.check_coincidence(d_time_census, distance): + particle["event"] += EVENT_TIME_CENSUS + + # Check distance to time boundary (exclusive event) + if d_time_boundary < distance + COINCIDENCE_TOLERANCE: + distance = d_time_boundary + particle["event"] = EVENT_TIME_BOUNDARY + particle["surface_ID"] = -1 + + # ================================================================================== + # Move particle + # ================================================================================== + + # Score tracklength tallies + if simulation["cycle_active"]: + # Cell tallies + cell = simulation["cells"][particle["cell_ID"]] + for i in range(cell["N_tally"]): + tally_base_ID = int(mcdc_get.cell.tally_IDs(i, cell, data)) + tally_base = simulation["tallies"][tally_base_ID] + + # Skip non-tracklength tallies + if tally_base["child_type"] != TALLY_TRACKLENGTH: + continue + + tally = simulation["tracklength_tallies"][tally_base["child_ID"]] + tally_module.score.tracklength_tally( + particle_container, distance, tally, simulation, data + ) + + # Other tracklength tallies + for i in range(simulation["N_tracklength_tally"]): + tally = simulation["tracklength_tallies"][i] + + # Skip cell tallies + if tally["spatial_filter_type"] == SPATIAL_FILTER_CELL: + continue + + tally_module.score.tracklength_tally( + particle_container, distance, tally, simulation, data + ) + + if settings["neutron_eigenvalue_mode"]: + tally_module.score.eigenvalue_tally( + particle_container, distance, simulation, data + ) + + # Move particle + particle_module.move(particle_container, distance, simulation, data) diff --git a/mcdc/mcdc/transport/source.py b/mcdc/mcdc/transport/source.py new file mode 100644 index 000000000..36291b67a --- /dev/null +++ b/mcdc/mcdc/transport/source.py @@ -0,0 +1,138 @@ +from numba import njit + +#### + +from mcdc.constant import COINCIDENCE_TOLERANCE, COINCIDENCE_TOLERANCE_TIME +import mcdc.mcdc_get as mcdc_get +import mcdc.transport.rng as rng + +from mcdc.transport.distribution import ( + sample_uniform, + sample_tabulated, + sample_pmf, + sample_white_direction, + sample_isotropic_direction, + sample_direction, +) +from mcdc.transport.util import find_bin_with_rules + + +@njit +def source_particle(particle_container, seed, simulation, data): + particle = particle_container[0] + particle["rng_seed"] = seed + + # Sample source + # TODO: use cdf and binary search instead + xi = rng.lcg(particle_container) + tot = 0.0 + for source in simulation["sources"]: + tot += source["probability"] + if tot >= xi: + break + + # Position + if source["point_source"]: + x = source["point"][0] + y = source["point"][1] + z = source["point"][2] + else: + x = sample_uniform(source["x"][0], source["x"][1], particle_container) + y = sample_uniform(source["y"][0], source["y"][1], particle_container) + z = sample_uniform(source["z"][0], source["z"][1], particle_container) + + # Direction + if source["isotropic_direction"]: + ux, uy, uz = sample_isotropic_direction(particle_container) + elif source["white_direction"]: + rx = source["direction"][0] + ry = source["direction"][1] + rz = source["direction"][2] + ux, uy, uz = sample_white_direction(rx, ry, rz, particle_container) + elif source["mono_direction"]: + ux = source["direction"][0] + uy = source["direction"][1] + uz = source["direction"][2] + else: + ux, uy, uz = sample_direction( + source["polar_cosine"], + source["azimuthal"], + source["direction"], + particle_container, + ) + + # Energy + if simulation["settings"]["neutron_multigroup_mode"]: + E = 0.0 + if source["mono_energetic"]: + g = source["energy_group"] + else: + ID = source["energy_group_pmf_ID"] + pmf = simulation["pmf_distributions"][ID] + g = sample_pmf(pmf, particle_container, data) + else: + g = 0 + if source["mono_energetic"]: + E = source["energy"] + else: + ID = source["energy_pdf_ID"] + table = simulation["tabulated_distributions"][ID] + E = sample_tabulated(table, particle_container, data) + + # Time + if source["discrete_time"]: + t = source["time"] + else: + t = sample_uniform( + source["time_range"][0], source["time_range"][1], particle_container + ) + + # Motion translation + if source["moving"]: + # Get moving interval index wrt the given time + time_grid = data[ + source["move_time_grid_offset"] : ( + source["move_time_grid_offset"] + source["N_move_grid"] + ) + ] + # Above is equivalent to: time_grid = mcdc_get.source.move_time_grid_all(source, data) + + tolerance = COINCIDENCE_TOLERANCE_TIME + go_lower = False + idx = find_bin_with_rules(t, time_grid, tolerance, go_lower) + + # Coinciding cases + if abs(time_grid[idx + 1] - t) < COINCIDENCE_TOLERANCE: + idx += 1 + + # Source move translations + start = source["move_translations_offset"] + idx * 3 + trans_0 = data[start : start + 3] + # Above is equivalent to: trans_0 = mcdc_get.source.move_translations_vector(idx, source, data) + + # Source move velocities + start = source["move_velocities_offset"] + idx * 3 + V = data[start : start + 3] + # Above is equivalent to: V = mcdc_get.source.move_velocities_vector(idx, source, data) + + # Source move time grid + time_0 = mcdc_get.source.move_time_grid(idx, source, data) + + # Translate the particle + t_local = t - time_0 + x += trans_0[0] + V[0] * t_local + y += trans_0[1] + V[1] * t_local + z += trans_0[2] + V[2] * t_local + + # Make and return particle + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["t"] = t + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["g"] = g + particle["E"] = E + particle["w"] = 1.0 + particle["particle_type"] = source["particle_type"] diff --git a/mcdc/mcdc/transport/tally/__init__.py b/mcdc/mcdc/transport/tally/__init__.py new file mode 100644 index 000000000..000e1cc2a --- /dev/null +++ b/mcdc/mcdc/transport/tally/__init__.py @@ -0,0 +1,3 @@ +import mcdc.transport.tally.filter as filter +import mcdc.transport.tally.score as score +import mcdc.transport.tally.closeout as closeout diff --git a/mcdc/mcdc/transport/tally/closeout.py b/mcdc/mcdc/transport/tally/closeout.py new file mode 100644 index 000000000..96f2cd66c --- /dev/null +++ b/mcdc/mcdc/transport/tally/closeout.py @@ -0,0 +1,319 @@ +import math +import numpy as np + +from numba import literal_unroll, njit, objmode +from mpi4py import MPI + +#### + +import mcdc.mcdc_set as mcdc_set +import mcdc.transport.particle_bank as particle_bank_module + +from mcdc.constant import ( + GYRATION_RADIUS_ALL, + GYRATION_RADIUS_INFINITE_X, + GYRATION_RADIUS_INFINITE_Y, + GYRATION_RADIUS_INFINITE_Z, + GYRATION_RADIUS_ONLY_X, + GYRATION_RADIUS_ONLY_Y, + GYRATION_RADIUS_ONLY_Z, +) +from mcdc.print_ import print_structure + +# ====================================================================================== +# Reduce tally bins +# ====================================================================================== + + +@njit +def reduce(simulation, data): + for tally in simulation["tallies"]: + _reduce(tally, simulation, data) + + +@njit +def _reduce(tally, simulation, data): + N = tally["bin_length"] + start = tally["bin_offset"] + end = start + N + + # Normalize + N_particle = simulation["settings"]["N_particle"] + for i in range(N): + data[start + i] /= N_particle + + # MPI Reduce + buff = np.zeros(N) + with objmode(): + MPI.COMM_WORLD.Reduce(data[start:end], buff, MPI.SUM, 0) + data[start:end] = buff + + +# ====================================================================================== +# Accumulate tally bins +# ====================================================================================== + + +@njit +def accumulate(simulation, data): + for tally in simulation["tallies"]: + _accumulate(tally, data) + + +@njit +def _accumulate(tally, data): + N_bin = tally["bin_length"] + offset_bin = tally["bin_offset"] + offset_sum = tally["bin_sum_offset"] + offset_sum_square = tally["bin_sum_square_offset"] + + # Note: Three separate loops are employed to avoid cache miss due to potentially + # large N_bin + + # Sum of score + for i in range(N_bin): + score = data[offset_bin + i] + data[offset_sum + i] += score + + # Sum of score squared + for i in range(N_bin): + score = data[offset_bin + i] + data[offset_sum_square + i] += score * score + + # Reset score bin + for i in range(N_bin): + data[offset_bin + i] = 0.0 + + +# ====================================================================================== +# Finalize +# ====================================================================================== + + +@njit +def finalize(simulation, data): + for tally in simulation["tallies"]: + _finalize(tally, simulation, data) + + +@njit +def _finalize(tally, simulation, data): + N_history = simulation["settings"]["N_particle"] + N_batch = simulation["settings"]["N_batch"] + N_bin = tally["bin_length"] + sum_start = tally["bin_sum_offset"] + sum_sq_start = tally["bin_sum_square_offset"] + sum_end = sum_start + N_bin + sum_sq_end = sum_sq_start + N_bin + + if N_batch > 1: + N_history = N_batch + + elif simulation["settings"]["neutron_eigenvalue_mode"]: + N_history = simulation["settings"]["N_active"] + + else: + # MPI Reduce + buff = np.zeros(N_bin) + buff_sq = np.zeros(N_bin) + with objmode(): + MPI.COMM_WORLD.Reduce(data[sum_start:sum_end], buff, MPI.SUM, 0) + MPI.COMM_WORLD.Reduce(data[sum_sq_start:sum_sq_end], buff_sq, MPI.SUM, 0) + data[sum_start:sum_end] = buff + data[sum_sq_start:sum_sq_end] = buff_sq + + # Calculate and store statistics + # sum --> mean + # sum_sq --> standard deviation + N_bin = tally["bin_length"] + offset_sum = tally["bin_sum_offset"] + offset_sum_square = tally["bin_sum_square_offset"] + for i in range(N_bin): + data[offset_sum + i] = data[offset_sum + i] / N_history + radicand = ( + data[offset_sum_square + i] / N_history - np.square(data[offset_sum + i]) + ) / (N_history - 1) + + # Check for round-off error (TODO: Check why this is needed.) + if abs(radicand) < 1e-16: + data[offset_sum_square + i] = 0.0 + else: + data[offset_sum_square + i] = math.sqrt(radicand) + + +# ====================================================================================== +# Reset sum bins +# ====================================================================================== + + +@njit +def reset_sum_bins(simulation, data): + for tally in simulation["tallies"]: + _reset_sum_bins(tally, data) + + +@njit +def _reset_sum_bins(tally, data): + N_bin = tally["bin_length"] + offset_sum = tally["bin_sum_offset"] + offset_sum_square = tally["bin_sum_square_offset"] + + for i in range(N_bin): + data[offset_sum + i] = 0.0 + data[offset_sum_square + i] = 0.0 + + +# ====================================================================================== +# Eigenvalue +# ====================================================================================== + + +@njit +def eigenvalue_cycle(simulation, data): + idx_cycle = simulation["idx_cycle"] + N_particle = simulation["settings"]["N_particle"] + + # MPI Allreduce + buff_nuSigmaF = np.zeros(1, np.float64) + buff_n = np.zeros(1, np.float64) + buff_nmax = np.zeros(1, np.float64) + buff_C = np.zeros(1, np.float64) + buff_Cmax = np.zeros(1, np.float64) + with objmode(): + MPI.COMM_WORLD.Allreduce( + np.array(simulation["eigenvalue_tally_nuSigmaF"]), buff_nuSigmaF, MPI.SUM + ) + if simulation["cycle_active"]: + MPI.COMM_WORLD.Allreduce( + np.array(simulation["eigenvalue_tally_n"]), buff_n, MPI.SUM + ) + MPI.COMM_WORLD.Allreduce( + np.array([simulation["n_max"]]), buff_nmax, MPI.MAX + ) + MPI.COMM_WORLD.Allreduce( + np.array(simulation["eigenvalue_tally_C"]), buff_C, MPI.SUM + ) + MPI.COMM_WORLD.Allreduce( + np.array([simulation["C_max"]]), buff_Cmax, MPI.MAX + ) + + # Update and store k_eff + simulation["k_eff"] = buff_nuSigmaF[0] / N_particle + mcdc_set.simulation.k_cycle(idx_cycle, simulation, data, value=simulation["k_eff"]) + + # Normalize other eigenvalue/global tallies + tally_n = buff_n[0] / N_particle + tally_C = buff_C[0] / N_particle + + # Maximum densities + simulation["n_max"] = buff_nmax[0] + simulation["C_max"] = buff_Cmax[0] + + # Accumulate running average + if simulation["cycle_active"]: + simulation["k_avg"] += simulation["k_eff"] + simulation["k_sdv"] += simulation["k_eff"] * simulation["k_eff"] + simulation["n_avg"] += tally_n + simulation["n_sdv"] += tally_n * tally_n + simulation["C_avg"] += tally_C + simulation["C_sdv"] += tally_C * tally_C + + N = 1 + simulation["idx_cycle"] - simulation["settings"]["N_inactive"] + simulation["k_avg_running"] = simulation["k_avg"] / N + if N == 1: + simulation["k_sdv_running"] = 0.0 + else: + simulation["k_sdv_running"] = math.sqrt( + (simulation["k_sdv"] / N - simulation["k_avg_running"] ** 2) / (N - 1) + ) + + # Reset accumulators + simulation["eigenvalue_tally_nuSigmaF"][0] = 0.0 + simulation["eigenvalue_tally_n"][0] = 0.0 + simulation["eigenvalue_tally_C"][0] = 0.0 + + # ===================================================================== + # Gyration radius + # ===================================================================== + + if simulation["settings"]["use_gyration_radius"]: + # Center of mass + N_local = particle_bank_module.get_bank_size(simulation["bank_census"]) + total_local = np.zeros(4, np.float64) # [x,y,z,W] + total = np.zeros(4, np.float64) + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + total_local[0] += P["x"] * P["w"] + total_local[1] += P["y"] * P["w"] + total_local[2] += P["z"] * P["w"] + total_local[3] += P["w"] + # MPI Allreduce + with objmode(): + MPI.COMM_WORLD.Allreduce(total_local, total, MPI.SUM) + # COM + W = total[3] + com_x = total[0] / W + com_y = total[1] / W + com_z = total[2] / W + + # Distance RMS + rms_local = np.zeros(1, np.float64) + rms = np.zeros(1, np.float64) + gr_type = simulation["settings"]["gyration_radius_type"] + if gr_type == GYRATION_RADIUS_ALL: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ( + (P["x"] - com_x) ** 2 + + (P["y"] - com_y) ** 2 + + (P["z"] - com_z) ** 2 + ) * P["w"] + elif gr_type == GYRATION_RADIUS_INFINITE_X: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["y"] - com_y) ** 2 + (P["z"] - com_z) ** 2) * P["w"] + elif gr_type == GYRATION_RADIUS_INFINITE_Y: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["x"] - com_x) ** 2 + (P["z"] - com_z) ** 2) * P["w"] + elif gr_type == GYRATION_RADIUS_INFINITE_Z: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["x"] - com_x) ** 2 + (P["y"] - com_y) ** 2) * P["w"] + elif gr_type == GYRATION_RADIUS_ONLY_X: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["x"] - com_x) ** 2) * P["w"] + elif gr_type == GYRATION_RADIUS_ONLY_Y: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["y"] - com_y) ** 2) * P["w"] + elif gr_type == GYRATION_RADIUS_ONLY_Z: + for i in range(N_local): + P = simulation["bank_census"]["particle_data"][i] + rms_local[0] += ((P["z"] - com_z) ** 2) * P["w"] + + # MPI Allreduce + with objmode(): + MPI.COMM_WORLD.Allreduce(rms_local, rms, MPI.SUM) + rms = math.sqrt(rms[0] / W) + + # Gyration radius + mcdc_set.simulation.gyration_radius(idx_cycle, simulation, data, value=rms) + + +@njit +def eigenvalue_simulation(simulation): + N = simulation["settings"]["N_active"] + simulation["n_avg"] /= N + simulation["C_avg"] /= N + if N > 1: + simulation["n_sdv"] = math.sqrt( + (simulation["n_sdv"] / N - simulation["n_avg"] ** 2) / (N - 1) + ) + simulation["C_sdv"] = math.sqrt( + (simulation["C_sdv"] / N - simulation["C_avg"] ** 2) / (N - 1) + ) + else: + simulation["n_sdv"] = 0.0 + simulation["C_sdv"] = 0.0 diff --git a/mcdc/mcdc/transport/tally/filter.py b/mcdc/mcdc/transport/tally/filter.py new file mode 100644 index 000000000..a743f785f --- /dev/null +++ b/mcdc/mcdc/transport/tally/filter.py @@ -0,0 +1,127 @@ +import math + +from numba import literal_unroll, njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.mcdc_set as mcdc_set + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE_DIRECTION, + COINCIDENCE_TOLERANCE_ENERGY, + COINCIDENCE_TOLERANCE_TIME, +) +from mcdc.transport.util import find_bin_with_tolerance, find_bin_with_rules + + +@njit +def get_filter_indices(particle_container, tally, data, MG_mode): + i_mu, i_azi, i_energy, i_time = 0, 0, 0, 0 + + if tally["filter_direction"]: + i_mu, i_azi = get_direction_index(particle_container, tally, data) + + if tally["filter_energy"]: + i_energy = get_energy_index(particle_container, tally, data, MG_mode) + + if tally["filter_time"]: + i_time = get_time_index(particle_container, tally, data) + + return i_mu, i_azi, i_energy, i_time + + +@njit +def get_direction_index(particle_container, tally, data): + particle = particle_container[0] + + # Particle properties + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + + # Polar reference + nx = tally["polar_reference"][0] + ny = tally["polar_reference"][1] + nz = tally["polar_reference"][2] + + # TODO: Rotate direction based on the polar reference + if nz != 1.0: + pass + + mu = uz + azi = math.acos(ux / math.sqrt(ux * ux + uy * uy)) + if uy < 0.0: + azi *= -1 + + tolerance = COINCIDENCE_TOLERANCE_DIRECTION + + grid_mu = data[tally["mu_offset"] : (tally["mu_offset"] + tally["mu_length"])] + # Above is equivalent to: grid_mu = mcdc_get.tally.mu_all(tally, data) + grid_azi = data[tally["azi_offset"] : (tally["azi_offset"] + tally["azi_length"])] + # Above is equivalent to: grid_azi = mcdc_get.tally.azi_all(tally, data) + + i_mu = find_bin_with_tolerance(mu, grid_mu, tolerance) + i_azi = find_bin_with_tolerance(azi, grid_azi, tolerance) + return i_mu, i_azi + + +@njit +def get_energy_index(particle_container, tally, data, neutron_multigroup_mode): + particle = particle_container[0] + + if neutron_multigroup_mode: + E = particle["g"] + else: + E = particle["E"] + + tolerance = COINCIDENCE_TOLERANCE_ENERGY + grid_energy = data[ + tally["energy_offset"] : (tally["energy_offset"] + tally["energy_length"]) + ] + # Above is equivalent to: grid_energy = mcdc_get.tally.energy_all(tally, data) + + return find_bin_with_tolerance(E, grid_energy, tolerance) + + +@njit +def get_time_index(particle_container, tally, data): + particle = particle_container[0] + + # Particle properties + time = particle["t"] + + grid_time = data[ + tally["time_offset"] : (tally["time_offset"] + tally["time_length"]) + ] + # Above is equivalent to: grid_time = mcdc_get.tally.time_all(tally, data) + + tolerance = COINCIDENCE_TOLERANCE_TIME + go_lower = False + return find_bin_with_rules(time, grid_time, tolerance, go_lower) + + +@njit +def set_census_based_time_grid(simulation, data): + settings = simulation["settings"] + tally_frequency = settings["census_tally_frequency"] + idx_census = simulation["idx_census"] + + # Starting time + if idx_census == 0: + t_start = 0.0 + else: + t_start = mcdc_get.settings.census_time(idx_census - 1, settings, data) + + # Ending time + t_end = mcdc_get.settings.census_time(idx_census, settings, data) + + # Time grid width + dt = (t_end - t_start) / tally_frequency + + # Set the time grid to all tallies + for tally in simulation["tallies"]: + mcdc_set.tally.time(0, tally, data, t_start) + for j in range(tally_frequency): + t_next = mcdc_get.tally.time(j, tally, data) + dt + mcdc_set.tally.time(j + 1, tally, data, t_next) diff --git a/mcdc/mcdc/transport/tally/score.py b/mcdc/mcdc/transport/tally/score.py new file mode 100644 index 000000000..924c03386 --- /dev/null +++ b/mcdc/mcdc/transport/tally/score.py @@ -0,0 +1,492 @@ +from numba import njit + +#### + +import mcdc.mcdc_get as mcdc_get +import mcdc.transport.mesh as mesh_module +import mcdc.transport.physics as physics +import mcdc.transport.util as util + +from mcdc.constant import ( + AXIS_T, + AXIS_X, + AXIS_Y, + AXIS_Z, + COINCIDENCE_TOLERANCE, + COINCIDENCE_TOLERANCE_TIME, + INF, + NEUTRON_REACTION_CAPTURE, + NEUTRON_REACTION_FISSION, + NEUTRON_REACTION_TOTAL, + SCORE_FLUX, + SCORE_DENSITY, + SCORE_COLLISION, + SCORE_CAPTURE, + SCORE_FISSION, + SCORE_NET_CURRENT, + SCORE_ENERGY_DEPOSITION, + SPATIAL_FILTER_MESH, +) +from mcdc.transport.geometry.surface import get_normal_component +from mcdc.transport.tally.filter import get_filter_indices + +# ====================================================================================== +# Surface tally +# ====================================================================================== + + +@njit +def surface_tally(particle_container, surface, tally, simulation, data): + particle = particle_container[0] + tally_base = simulation["tallies"][tally["parent_ID"]] + + # Get filter indices + MG_mode = simulation["settings"]["neutron_multigroup_mode"] + i_mu, i_azi, i_energy, i_time = get_filter_indices( + particle_container, tally_base, data, MG_mode + ) + + # No score if outside non-changing phase-space bins + if i_mu == -1 or i_azi == -1 or i_energy == -1 or i_time == -1: + return + + # Tally index + idx_base = ( + tally_base["bin_offset"] + + i_mu * tally_base["stride_mu"] + + i_azi * tally_base["stride_azi"] + + i_energy * tally_base["stride_energy"] + + i_time * tally_base["stride_time"] + ) + + # Flux + speed = physics.particle_speed(particle_container, simulation, data) + mu = get_normal_component(particle_container, speed, surface, data) + flux = particle["w"] / abs(mu) + + # Score + for i_score in range(tally_base["scores_length"]): + score_type = mcdc_get.tally.scores(i_score, tally_base, data) + score = 0.0 + if score_type == SCORE_NET_CURRENT: + surface = simulation["surfaces"][particle["surface_ID"]] + mu = get_normal_component(particle_container, speed, surface, data) + score = flux * mu + util.atomic_add(data, idx_base + i_score, score) + + +# ====================================================================================== +# Collision tally +# ====================================================================================== + + +@njit +def collision_tally( + particle_container, collision_data_container, tally, simulation, data +): + particle = particle_container[0] + collision_data = collision_data_container[0] + tally_base = simulation["tallies"][tally["parent_ID"]] + + # Get filter indices + MG_mode = simulation["settings"]["neutron_multigroup_mode"] + i_mu, i_azi, i_energy, i_time = get_filter_indices( + particle_container, tally_base, data, MG_mode + ) + + # No score if outside non-changing phase-space bins + if i_mu == -1 or i_azi == -1 or i_energy == -1 or i_time == -1: + return + + # Mesh tally indices if needed + i_x, i_y, i_z = 0, 0, 0 + mesh_tally = tally["spatial_filter_type"] == SPATIAL_FILTER_MESH + if mesh_tally: + mesh = simulation["meshes"][tally["spatial_filter_ID"]] + i_x, i_y, i_z = mesh_module.get_indices( + particle_container, mesh, simulation, data + ) + + # No score outside mesh bins + if i_x == -1 or i_y == -1 or i_z == -1: + return + + # Tally index + idx_base = ( + tally_base["bin_offset"] + + i_mu * tally_base["stride_mu"] + + i_azi * tally_base["stride_azi"] + + i_energy * tally_base["stride_energy"] + + i_time * tally_base["stride_time"] + ) + if mesh_tally: + idx_base += ( + +i_x * tally["mesh_stride_x"] + + i_y * tally["mesh_stride_y"] + + i_z * tally["mesh_stride_z"] + ) + + # Score + for i_score in range(tally_base["scores_length"]): + score_type = mcdc_get.tally.scores(i_score, tally_base, data) + score = 0.0 + if score_type == SCORE_ENERGY_DEPOSITION: + score = collision_data["energy_deposition"] + util.atomic_add(data, idx_base + i_score, score) + + +# ====================================================================================== +# Tracklength tally +# ====================================================================================== + + +@njit +def tracklength_tally(particle_container, distance, tally, simulation, data): + particle = particle_container[0] + tally_base = simulation["tallies"][tally["parent_ID"]] + + # Get filter indices + MG_mode = simulation["settings"]["neutron_multigroup_mode"] + i_mu, i_azi, i_energy, i_time = get_filter_indices( + particle_container, tally_base, data, MG_mode + ) + + # No score if outside non-changing phase-space bins + if i_mu == -1 or i_azi == -1 or i_energy == -1: + return + + # Particle/track properties + x = particle["x"] + y = particle["y"] + z = particle["z"] + t = particle["t"] + ux = particle["ux"] + uy = particle["uy"] + uz = particle["uz"] + ut = 1.0 / physics.particle_speed(particle_container, simulation, data) + x_final = x + ux * distance + y_final = y + uy * distance + z_final = z + uz * distance + t_final = t + ut * distance + + # No score if particle does not cross the time bins + t_min = mcdc_get.tally.time(0, tally_base, data) + t_max = mcdc_get.tally.time_last(tally_base, data) + if ( + t_final < t_min + COINCIDENCE_TOLERANCE_TIME + or t > t_max - COINCIDENCE_TOLERANCE_TIME + ): + return + + # Get the appropriate time index if the filter starts in the future + if t < t_min + COINCIDENCE_TOLERANCE_TIME: + i_time = 0 + + # ============================== + # Mesh tally preparation [START] + # - Get mesh bin indices + # - Return if it's outside mesh grid + + # Flag if it's a mesh tally + mesh_tally = tally["spatial_filter_type"] == SPATIAL_FILTER_MESH + + # Mesh axis indices + i_x, i_y, i_z = 0, 0, 0 + if mesh_tally: + mesh = simulation["meshes"][tally["spatial_filter_ID"]] + + # Mesh axis indices + i_x, i_y, i_z = mesh_module.get_indices( + particle_container, mesh, simulation, data + ) + + # No score if particle does not cross the mesh bins + # Also get the appropriate index if needed + x_min = mesh_module.get_x(0, mesh, simulation, data) + x_max = mesh_module.get_x(mesh["Nx"], mesh, simulation, data) + if ux > 0.0: + if ( + x_final < x_min + COINCIDENCE_TOLERANCE + or x > x_max - COINCIDENCE_TOLERANCE + ): + return + if x < x_min + COINCIDENCE_TOLERANCE: + i_x = 0 + else: + if ( + x < x_min + COINCIDENCE_TOLERANCE + or x_final > x_max - COINCIDENCE_TOLERANCE + ): + return + if x > x_max - COINCIDENCE_TOLERANCE: + i_x = mesh["Nx"] + # + y_min = mesh_module.get_y(0, mesh, simulation, data) + y_max = mesh_module.get_y(mesh["Ny"], mesh, simulation, data) + if uy > 0.0: + if ( + y_final < y_min + COINCIDENCE_TOLERANCE + or y > y_max - COINCIDENCE_TOLERANCE + ): + return + if y < y_min + COINCIDENCE_TOLERANCE: + i_y = 0 + else: + if ( + y < y_min + COINCIDENCE_TOLERANCE + or y_final > y_max - COINCIDENCE_TOLERANCE + ): + return + if y > y_max - COINCIDENCE_TOLERANCE: + i_y = mesh["Ny"] + # + z_min = mesh_module.get_z(0, mesh, simulation, data) + z_max = mesh_module.get_z(mesh["Nz"], mesh, simulation, data) + if uz > 0.0: + if ( + z_final < z_min + COINCIDENCE_TOLERANCE + or z > z_max - COINCIDENCE_TOLERANCE + ): + return + if z < z_min + COINCIDENCE_TOLERANCE: + i_z = 0 + else: + if ( + z < z_min + COINCIDENCE_TOLERANCE + or z_final > z_max - COINCIDENCE_TOLERANCE + ): + return + if z > z_max - COINCIDENCE_TOLERANCE: + i_z = mesh["Nz"] + + # Mesh tally preparation [END] + # ============================ + + # Tally base index + idx_base = ( + tally_base["bin_offset"] + + i_mu * tally_base["stride_mu"] + + i_azi * tally_base["stride_azi"] + + i_energy * tally_base["stride_energy"] + + i_time * tally_base["stride_time"] + ) + if mesh_tally: + idx_base += ( + i_x * tally["mesh_stride_x"] + + i_y * tally["mesh_stride_y"] + + i_z * tally["mesh_stride_z"] + ) + + # Sweep through the distance + distance_swept = 0.0 + while distance_swept < distance - COINCIDENCE_TOLERANCE: + # The next time grid + t_next = mcdc_get.tally.time(i_time + 1, tally_base, data) + + # Get the distance to score in this segment + if t_final < t_next - COINCIDENCE_TOLERANCE_TIME: + distance_scored = distance - distance_swept + else: + distance_scored = (t_next - t) / ut + + # =========================================== + # Mesh tally grid crossing evaluation [START] + # - Determine smaller distance and which + # axis is crossed + + axis_crossed = AXIS_T + if mesh_tally: + mesh = simulation["meshes"][tally["spatial_filter_ID"]] + + # x-direction + if ux == 0.0: + dx = INF + else: + if ux > 0.0: + x_next = mesh_module.get_x(i_x + 1, mesh, simulation, data) + x_next = min(x_next, x_final) + else: + x_next = mesh_module.get_x(i_x, mesh, simulation, data) + x_next = max(x_next, x_final) + dx = (x_next - x) / ux + if dx <= distance_scored: + axis_crossed = AXIS_X + distance_scored = dx + + # y-direction + if uy == 0.0: + dy = INF + else: + if uy > 0.0: + y_next = mesh_module.get_y(i_y + 1, mesh, simulation, data) + y_next = min(y_next, y_final) + else: + y_next = mesh_module.get_y(i_y, mesh, simulation, data) + y_next = max(y_next, y_final) + dy = (y_next - y) / uy + if dy <= distance_scored: + axis_crossed = AXIS_Y + distance_scored = dy + + # z-direction + if uz == 0.0: + dz = INF + else: + if uz > 0.0: + z_next = mesh_module.get_z(i_z + 1, mesh, simulation, data) + z_next = min(z_next, z_final) + else: + z_next = mesh_module.get_z(i_z, mesh, simulation, data) + z_next = max(z_next, z_final) + dz = (z_next - z) / uz + if dz <= distance_scored: + axis_crossed = AXIS_Z + distance_scored = dz + + # Mesh tally grid crossing evaluation [END] + # ========================================= + + # Score + flux = distance_scored * particle["w"] + for i_score in range(tally_base["scores_length"]): + score_type = mcdc_get.tally.scores(i_score, tally_base, data) + score = 0.0 + if score_type == SCORE_FLUX: + score = flux + elif score_type == SCORE_DENSITY: + speed = physics.particle_speed(particle_container, simulation, data) + score = flux / speed + elif score_type == SCORE_COLLISION: + score = flux * physics.macro_xs( + NEUTRON_REACTION_TOTAL, particle_container, simulation, data + ) + elif score_type == SCORE_CAPTURE: + score = flux * physics.macro_xs( + NEUTRON_REACTION_CAPTURE, particle_container, simulation, data + ) + elif score_type == SCORE_FISSION: + score = flux * physics.macro_xs( + NEUTRON_REACTION_FISSION, particle_container, simulation, data + ) + util.atomic_add(data, idx_base + i_score, score) + + # Accumulate distance swept + distance_swept += distance_scored + + # Move the 4D position + if mesh_tally: + x += distance_scored * ux + y += distance_scored * uy + z += distance_scored * uz + t += distance_scored * ut + + # Increment index and heck if out of bounds + if axis_crossed == AXIS_T: + i_time += 1 + idx_base += tally_base["stride_time"] + if i_time == tally_base["time_length"] - 1: + return + elif mesh_tally: + mesh = simulation["meshes"][tally["spatial_filter_ID"]] + if axis_crossed == AXIS_X: + if ux > 0.0: + i_x += 1 + if i_x == mesh["Nx"]: + return + idx_base += tally["mesh_stride_x"] + else: + i_x -= 1 + if i_x == -1: + return + idx_base -= tally["mesh_stride_x"] + elif axis_crossed == AXIS_Y: + if uy > 0.0: + i_y += 1 + if i_y == mesh["Ny"]: + return + idx_base += tally["mesh_stride_y"] + else: + i_y -= 1 + if i_y == -1: + return + idx_base -= tally["mesh_stride_y"] + elif axis_crossed == AXIS_Z: + if uz > 0.0: + i_z += 1 + if i_z == mesh["Nz"]: + return + idx_base += tally["mesh_stride_z"] + else: + i_z -= 1 + if i_z == -1: + return + idx_base -= tally["mesh_stride_z"] + + +# ============================================================================= +# Eigenvalue tally +# ============================================================================= + + +@njit +def eigenvalue_tally(particle_container, distance, simulation, data): + particle = particle_container[0] + flux = distance * particle["w"] + + # Get nu-fission + nuSigmaF = physics.neutron_production_xs( + NEUTRON_REACTION_FISSION, particle_container, simulation, data + ) + + # Fission production (needed even during inactive cycle) + util.atomic_add(simulation["eigenvalue_tally_nuSigmaF"], 0, flux * nuSigmaF) + + # Done, if inactive + if not simulation["cycle_active"]: + return + + # ================================================================================== + # Neutron density + # ================================================================================== + + v = physics.particle_speed(particle_container, simulation, data) + n_density = flux / v + util.atomic_add(simulation["eigenvalue_tally_n"], 0, n_density) + + # Maximum neutron density + if simulation["n_max"] < n_density: + simulation["n_max"] = n_density + + # ================================================================================== + # TODO: Delayed neutron precursor density + # ================================================================================== + return + # Get the decay-wighted multiplicity + total = 0.0 + if simulation["settings"]["neutron_multigroup_mode"]: + g = particle["g"] + for j in range(J): + nu_d = mcdc_get.material.mgxs_nu_d(g, j, material, data) + decay = mcdc_get.material.mgxs_decay_rate(j, material, data) + total += nu_d / decay + else: + E = P["E"] + for i in range(material["N_nuclide"]): + ID_nuclide = material["nuclide_IDs"][i] + nuclide = simulation["nuclides"][ID_nuclide] + if not nuclide["fissionable"]: + continue + for j in range(J): + nu_d = get_nu_group(NU_FISSION_DELAYED, nuclide, E, j) + decay = nuclide["ce_decay"][j] + total += nu_d / decay + + SigmaF = physics.macro_xs( + NEUTRON_REACTION_FISSION, particle_container, simulation, data + ) + C_density = flux * total * SigmaF / simulation["k_eff"] + util.atomic_add(simulation["eigenvalue_tally_C"], 0, C_density) + + # Maximum precursor density + if simulation["C_max"] < C_density: + simulation["C_max"] = C_density diff --git a/mcdc/mcdc/transport/technique.py b/mcdc/mcdc/transport/technique.py new file mode 100644 index 000000000..fe629af33 --- /dev/null +++ b/mcdc/mcdc/transport/technique.py @@ -0,0 +1,83 @@ +import numpy as np +import math + +from numba import njit + +#### + +import mcdc.numba_types as type_ +import mcdc.transport.particle as particle_module +import mcdc.transport.particle_bank as particle_bank_module +import mcdc.transport.rng as rng +import mcdc.transport.util as util + +# ====================================================================================== +# Weight Roulette +# ====================================================================================== + + +@njit +def weight_roulette(particle_container, simulation): + particle = particle_container[0] + if particle["w"] < simulation["weight_roulette"]["weight_threshold"]: + w_target = simulation["weight_roulette"]["weight_target"] + survival_probability = particle["w"] / w_target + if rng.lcg(particle_container) < survival_probability: + particle["w"] = w_target + else: + particle["alive"] = False + + +# ====================================================================================== +# Population Control +# ====================================================================================== + + +@njit +def population_control(simulation): + """Uniform Splitting-Roulette technique""" + + bank_census = simulation["bank_census"] + M = simulation["settings"]["N_particle"] + bank_source = simulation["bank_source"] + + # Scan the bank + idx_start, N_local, N = particle_bank_module.bank_scanning(bank_census, simulation) + idx_end = idx_start + N_local + + # Abort if census bank is empty + if N == 0: + return + + # Weight scaling + ws = float(N) / float(M) + + # Splitting Number + sn = 1.0 / ws + + P_rec_arr = util.local_array(1, type_.particle_data) + P_rec = P_rec_arr[0] + + # Perform split-roulette to all particles in local bank + particle_bank_module.set_bank_size(bank_source, 0) + for idx in range(N_local): + # Weight of the surviving particles + w = bank_census["particle_data"][idx]["w"] + w_survive = w * ws + + # Determine number of guaranteed splits + N_split = math.floor(sn) + + # Survive the russian roulette? + xi = rng.lcg(bank_census["particle_data"][idx : idx + 1]) + if xi < sn - N_split: + N_split += 1 + + # Split the particle + for i in range(N_split): + particle_module.copy_as_child( + P_rec_arr, bank_census["particle_data"][idx : idx + 1] + ) + # Set weight + P_rec["w"] = w_survive + particle_bank_module.bank_source_particle(P_rec_arr, simulation) diff --git a/mcdc/mcdc/transport/util.py b/mcdc/mcdc/transport/util.py new file mode 100644 index 000000000..4627841d0 --- /dev/null +++ b/mcdc/mcdc/transport/util.py @@ -0,0 +1,152 @@ +import math +import numpy as np + +from numba import njit +from typing import Sequence + + +@njit +def find_bin_with_rules(value, grid, epsilon, go_lower): + """ + Return the bin index i for which grid[i] <= value < grid[i+1], with optional + epsilon tolerance and tie-breaking toward the lower/upper bin. + + Parameters + ---------- + value : float + Query point. + grid : Sequence[float] + Monotonically increasing bin edges of length N_grid = N_bin + 1. + epsilon : float + Tolerance to treat values as being exactly on a grid edge if + |value - grid[k]| <= epsilon. + go_lower : bool + Tie-breaking rule when value is at/within epsilon of a grid edge: + - True -> tie to the lower/left bin + - False -> tie to the upper/right bin + + Edge behavior (with epsilon) + ---------------------------- + - Interior edges (grid[k], 0 bin k-1 + * go_lower=False -> bin k + - First edge (grid[0]): + * If inside or exactly at grid[0] within epsilon: + - go_lower=True -> -1 (treat as outside left) + - go_lower=False -> 0 (first bin) + - Last edge (grid[-1]): + * If exactly at/within epsilon: + - go_lower=True -> last bin (N_bin-1) + - go_lower=False -> -1 (outside right) + - Beyond first/last edge by more than epsilon: return -1. + + Notes + ----- + - With epsilon=0 and go_lower=True, this reduces to the standard + left-closed/right-open binning (grid[i] <= value < grid[i+1]). + - Scalar-only implementation (no NumPy required). + """ + n = len(grid) + + # Fast reject beyond tolerance band + if value < grid[0] - epsilon or value > grid[-1] + epsilon: + return -1 + + # Base binary search (strict left-closed / right-open, no epsilon) + low, high = 0, n - 1 # search over edge indices + if value < grid[0] or value >= grid[-1]: + base = -1 + else: + while high - low > 1: + mid = (low + high) // 2 + if value < grid[mid]: + high = mid + else: + low = mid + base = low # provisional bin: [grid[low], grid[low+1]) + + # Tie-breaking near edges (epsilon band) + if base == -1: + # Near first edge? + if abs(value - grid[0]) <= epsilon: + return -1 if go_lower else 0 + # Near last edge? + if abs(value - grid[-1]) <= epsilon: + return (n - 2) if go_lower else -1 + return -1 + + idx = base + + # Check left edge of this bin + if abs(value - grid[idx]) <= epsilon: + if idx == 0: + return -1 if go_lower else 0 + return (idx - 1) if go_lower else idx + + # Check right edge of this bin + right_edge = grid[idx + 1] + if abs(value - right_edge) <= epsilon: + if idx + 1 == n - 1: # last grid point + return (n - 2) if go_lower else -1 + return idx if go_lower else (idx + 1) + + # Strict interior + return idx + + +@njit +def find_bin(value, grid): + tolerance = 0.0 + go_lower = True + return find_bin_with_rules(value, grid, tolerance, go_lower) + + +@njit +def find_bin_with_tolerance(value, grid, tolerance): + go_lower = True + return find_bin_with_rules(value, grid, tolerance, go_lower) + + +# ====================================================================================== +# Interpolation +# ====================================================================================== + + +@njit +def linear_interpolation(x, x1, x2, y1, y2): + return y1 + (x - x1) * (y2 - y1) / (x2 - x1) + + +@njit +def log_interpolation(x, x1, x2, y1, y2): + # Convert to logs + lx1, lx2 = math.log(x1), math.log(x2) + ly1, ly2 = math.log(y1), math.log(y2) + + # Slope in log–log space + m = (ly2 - ly1) / (lx2 - lx1) + + # Interpolate log(y) + ly = ly1 + m * (math.log(x) - lx1) + + return math.exp(ly) + + +# ====================================================================================== +# Framework utilities +# ====================================================================================== + + +@njit +def atomic_add(array, idx, value): + array[idx] += value + + +@njit +def local_array(shape, dtype): + return np.zeros(shape, dtype=dtype) + + +@njit +def access_simulation(program): + return program diff --git a/mcdc/mcdc/util.py b/mcdc/mcdc/util.py new file mode 100644 index 000000000..445967f23 --- /dev/null +++ b/mcdc/mcdc/util.py @@ -0,0 +1,26 @@ +def flatten(lst): + """ + Recursively flattens a nested list of arbitrary depth. + + Parameters + ---------- + lst : list + A (possibly nested) list, e.g. [1, [2, [3, 4]], 5]. + + Yields + ------ + element + Each non-list element contained in `lst`, in depth-first order. + + Examples + -------- + >>> list(flatten([1, [2, [3, 4]], 5])) + [1, 2, 3, 4, 5] + """ + for item in lst: + if isinstance(item, list): + # If the current item is a list, recursively flatten it + yield from flatten(item) + else: + # Otherwise, yield the item directly + yield item diff --git a/mcdc/mcdc/visualize.py b/mcdc/mcdc/visualize.py new file mode 100644 index 000000000..25640041a --- /dev/null +++ b/mcdc/mcdc/visualize.py @@ -0,0 +1,262 @@ +from numba import njit +import numpy as np +from mcdc.main import preparation + +_visualize_cache = None + + +def visualize( + vis_type, + x=0.0, + y=0.0, + z=0.0, + pixels=(100, 100), + colors=None, + time=[0.0], + save_as=None, +): + """ + 2D visualization of the created model + + Parameters + ---------- + vis_plane : {'xy', 'yz', 'xz', 'zx', 'yz', 'zy'} + Axis plane to visualize + x : float or array_like + Plane x-position (float) for 'yz' plot. Range of x-axis for 'xy' or 'xz' plot. + y : float or array_like + Plane y-position (float) for 'xz' plot. Range of y-axis for 'xy' or 'yz' plot. + z : float or array_like + Plane z-position (float) for 'xy' plot. Range of z-axis for 'xz' or 'yz' plot. + time : array_like + Times at which the geometry snapshots are taken + pixels : array_like + Number of respective pixels in the two axes in vis_plane + colors : array_like + List of pairs of material and its color + """ + import matplotlib.pyplot as plt + import numpy as np + import sys + + from matplotlib import colors as mpl_colors + + # Use cached preparation if available + global _visualize_cache + if _visualize_cache is None: + _visualize_cache = preparation() + simulation_container, data = _visualize_cache + simulation = simulation_container[0] + + # ================================================================================== + # Numba-compiled functions + # ================================================================================== + + from mcdc.transport.geometry.interface import locate_particle + + @njit(cache=True) + def _compute_material_row( + first_coord, + second_midpoint, + first_key_idx, + second_key_idx, + reference_key_idx, + reference_val, + time_val, + particle_arr, + simulation, + data, + ): + """ + Compute material IDs for a single row of pixels using numba. + + Parameters + ---------- + first_coord : float + First axis coordinate + second_midpoint : np.ndarray + Midpoints along the second axis + first_key_idx : int + Index for first axis (0=x, 1=y, 2=z) + second_key_idx : int + Index for second axis (0=x, 1=y, 2=z) + reference_key_idx : int + Index for reference axis (0=x, 1=y, 2=z) + reference_val : float + Value for the reference (slice) coordinate + time_val : float + Time value for the visualization + particle_arr : np.ndarray + Particle array of size (1,) used for particle lookup. + simulation : structured array + MCDC simulation data + data : structured array + Additional simulation data + """ + n_second = len(second_midpoint) + row_materials = np.empty(n_second, dtype=np.int32) + + particle = particle_arr[0] + + # Set time and energy + particle["t"] = time_val + particle["g"] = 0 + particle["E"] = 1e6 + particle["ux"] = 0.0 + particle["uy"] = 0.0 + particle["uz"] = 1.0 + + # Set reference coordinate + if reference_key_idx == 0: + particle["x"] = reference_val + elif reference_key_idx == 1: + particle["y"] = reference_val + else: + particle["z"] = reference_val + + # Set first axis coordinate + if first_key_idx == 0: + particle["x"] = first_coord + elif first_key_idx == 1: + particle["y"] = first_coord + else: + particle["z"] = first_coord + + for j in range(n_second): + # Set second axis coordinate + if second_key_idx == 0: + particle["x"] = second_midpoint[j] + elif second_key_idx == 1: + particle["y"] = second_midpoint[j] + else: + particle["z"] = second_midpoint[j] + + # Reset IDs for fresh lookup + particle["cell_ID"] = -1 + particle["material_ID"] = -1 + + if locate_particle(particle_arr, simulation, data): + row_materials[j] = particle["material_ID"] + else: + row_materials[j] = -1 + + return row_materials + + import mcdc.numba_types as type_ + + # Color assignment for materials (by material ID) + if colors is not None: + new_colors = {} + for item in colors.items(): + new_colors[item[0].ID] = mpl_colors.to_rgb(item[1]) + colors = new_colors + else: + colors = {} + for i in range(len(simulation["materials"])): + colors[i] = plt.cm.Set1(i)[:-1] + WHITE = mpl_colors.to_rgb("white") + + # Set reference axis + for axis in ["x", "y", "z"]: + if axis not in vis_type: + reference_key = axis + + if reference_key == "x": + reference = x + elif reference_key == "y": + reference = y + elif reference_key == "z": + reference = z + + # Set first and second axes + first_key = vis_type[0] + second_key = vis_type[1] + + if first_key == "x": + first = x + elif first_key == "y": + first = y + elif first_key == "z": + first = z + + if second_key == "x": + second = x + elif second_key == "y": + second = y + elif second_key == "z": + second = z + + # Axis pixels grids and midpoints + first_grid = np.linspace(first[0], first[1], pixels[0] + 1) + first_midpoint = 0.5 * (first_grid[1:] + first_grid[:-1]) + + second_grid = np.linspace(second[0], second[1], pixels[1] + 1) + second_midpoint = 0.5 * (second_grid[1:] + second_grid[:-1]) + + # Map axis keys to indices for the numba function + key_to_idx = {"x": 0, "y": 1, "z": 2} + first_idx = key_to_idx[first_key] + second_idx = key_to_idx[second_key] + ref_idx = key_to_idx[reference_key] + + # Create Color Palette for fast lookup + max_id = max(colors.keys()) + palette = np.zeros((max_id + 1, 3)) + for mat_id, col in colors.items(): + palette[mat_id] = col + + for t in time: + # Create particle array and material IDs grid + particle_arr = np.zeros(1, dtype=type_.particle) + material_ids = np.empty((pixels[0], pixels[1]), dtype=np.int32) + last_progress = -1 + + # Loop over rows with progress bar + for i in range(pixels[0]): + row_materials = _compute_material_row( + first_midpoint[i], + second_midpoint, + first_idx, + second_idx, + ref_idx, + reference, + t, + particle_arr, + simulation, + data, + ) + material_ids[i, :] = row_materials + + # Update progress bar + percent = (i + 1) / pixels[0] + if int(percent * 100) > last_progress: + last_progress = int(percent * 100) + sys.stdout.write( + "\r Visualizing: [%-28s] %d%%" + % ("=" * int(percent * 28), percent * 100) + ) + sys.stdout.flush() + + sys.stdout.write("\n") + sys.stdout.flush() + + # Color Mapping + pixel_data = np.full(pixels + (3,), WHITE) + valid_mask = material_ids >= 0 + pixel_data[valid_mask] = palette[ + material_ids[valid_mask] + ] # Apply colors using palette lookup + + pixel_data = np.transpose(pixel_data, (1, 0, 2)) + plt.imshow(pixel_data, origin="lower", extent=first + second) + plt.xlabel(first_key + " [cm]") + plt.ylabel(second_key + " [cm]") + plt.title(reference_key + " = %.2f cm" % reference + ", time = %.2f s" % t) + if save_as is not None: + if len(time) > 1: + plt.savefig(f"{save_as}_{t:03}.png") + else: + plt.savefig(save_as + ".png") + plt.clf() + else: + plt.show() diff --git a/mcdc/pyproject.toml b/mcdc/pyproject.toml new file mode 100644 index 000000000..6e7c63049 --- /dev/null +++ b/mcdc/pyproject.toml @@ -0,0 +1,99 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "mcdc" +version = "0.12.0" + +authors = [ + { name = "Ilham Variansyah", email = "variansi@oregonstate.edu" }, + { name = "Sam Pasmann", email = "spasmann@nd.edu" }, + { name = "Joanna Morgan", email = "morgan83@llnl.gov" }, + { name = "Kayla Clements", email = "clemekay@oregonstate.edu" }, + { name = "Braxton Cuneo", email = "bcuneo@seattleu.edu" }, + { name = "Caleb Shaw" }, + { name = "Rohan Pankaj" }, + { name = "Alexander Mote" }, + { name = "Ethan Lame" }, + { name = "Benjamin Whewell" }, + { name = "Ryan G. McClarren" }, + { name = "Todd S. Palmer" }, + { name = "Lizhong Chen" }, + { name = "Dmitriy Y. Anistratov" }, + { name = "C. T. Kelley" }, + { name = "Camille J. Palmer" }, + { name = "Kyle E. Niemeyer" }, +] + +maintainers = [ + { name = "Ilham Variansyah", email = "variansi@oregonstate.edu" }, + { name = "Braxton Cuneo", email = "bcuneo@seattleu.edu" }, + { name = "Kayla Clements", email = "clemekay@oregonstate.edu" }, + { name = "Joanna Piper Morgan", email = "morgan83@llnl.gov" }, + { name = "Kyle E. Niemeyer", email = "kyle.niemeyer@oregonstate.edu" }, +] + +description = "MC/DC (Monte Carlo Dynamic Code): a performant, scalable, and machine-portable Python-based Monte Carlo neutron transport package" +readme = "README.md" +requires-python = ">=3.10" +license = { file = "LICENSE" } + +keywords = [ + "Monte Carlo", + "nuclear engineering", + "neutron transport", + "HPC", + "GPU", + "numba", + "mpi4py", +] + +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Intended Audience :: Science/Research", + "Typing :: Typed", + "Natural Language :: English", + "Operating System :: Unix", + "Operating System :: MacOS", +] + +dependencies = [ + "numba>=0.60.0", + "numpy>=2.0.0", + "scipy", + "matplotlib", + "mpi4py>=3.1.4", + "h5py", + "colorama", + "sympy", +] + +[project.optional-dependencies] +docs = [ + "sphinx==7.2.6", + "furo", + "sphinx_toolbox", +] + +dev = [ + "black", + "pre-commit", + "pytest", +] + +[project.urls] +Homepage = "https://cement-psaap.github.io/" +Repository = "https://github.com/CEMeNT-PSAAP/MCDC" +Documentation = "https://mcdc.readthedocs.io/en/latest/" +Issues = "https://github.com/CEMeNT-PSAAP/MCDC/issues" + +[tool.black] +force-exclude = ''' +( + mcdc/numba_types\.py + | mcdc/mcdc_get/ + | mcdc/mcdc_set/ +) +''' diff --git a/mcdc/test/regression/.gitignore b/mcdc/test/regression/.gitignore new file mode 100644 index 000000000..de1abf13b --- /dev/null +++ b/mcdc/test/regression/.gitignore @@ -0,0 +1 @@ +mcdc-regression_test_data diff --git a/mcdc/test/regression/README.md b/mcdc/test/regression/README.md new file mode 100644 index 000000000..b9f5e4436 --- /dev/null +++ b/mcdc/test/regression/README.md @@ -0,0 +1,33 @@ +# MC/DC - Regression Test + +To run all tests: + +```bash +python run.py +``` + +To run a specific test (with wildcard `*` support): + +```bash +python run.py --name= +``` + +To run in Numba mode: + +```bash +python run.py --mode=numba +``` + +To run in multiple MPI ranks (currently support `mpiexec` and `srun`): + +```bash +python run.py --mpiexec= +``` + +To add a new test: + +1. Create a folder. The name of the folder will be the test name. +2. Add the input file named `input.py`. +3. Add the answer key file named `answer.h5`. +4. Make sure that the number of particles run is large enough for a good test. +5. If the test runs longer than 10 seconds in serial Python mode, consider decreasing the number of particles. diff --git a/mcdc/test/regression/azurv1/answer.h5 b/mcdc/test/regression/azurv1/answer.h5 new file mode 100644 index 000000000..1049577e5 Binary files /dev/null and b/mcdc/test/regression/azurv1/answer.h5 differ diff --git a/mcdc/test/regression/azurv1/input.py b/mcdc/test/regression/azurv1/input.py new file mode 100644 index 000000000..80eb28d39 --- /dev/null +++ b/mcdc/test/regression/azurv1/input.py @@ -0,0 +1,51 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Infinite medium with isotropic plane surface at the center +# Based on Ganapol LA-UR-01-1854 (AZURV1 benchmark) +# Effective scattering ratio c = 1.1 + +# Set materials +m = mcdc.MaterialMG( + capture=np.array([1.0 / 3.0]), + scatter=np.array([[1.0 / 3.0]]), + fission=np.array([1.0 / 3.0]), + nu_p=np.array([2.3]), +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Isotropic pulse at x=t=0 + +mcdc.Source( + position=[0.0, 0.0, 0.0], + isotropic=True, + energy_group=0, + time=0.0, +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured(x=np.linspace(-20.5, 20.5, 202)) +mcdc.Tally(mesh=mesh, scores=["flux"], time=np.linspace(0.0, 20.0, 21)) + +# Settings +mcdc.settings.N_particle = 60 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/azurv1_census/answer.h5 b/mcdc/test/regression/azurv1_census/answer.h5 new file mode 100644 index 000000000..93af801c8 Binary files /dev/null and b/mcdc/test/regression/azurv1_census/answer.h5 differ diff --git a/mcdc/test/regression/azurv1_census/input.py b/mcdc/test/regression/azurv1_census/input.py new file mode 100644 index 000000000..9a8098887 --- /dev/null +++ b/mcdc/test/regression/azurv1_census/input.py @@ -0,0 +1,57 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Infinite medium with isotropic plane surface at the center +# Based on Ganapol LA-UR-01-1854 (AZURV1 benchmark) +# Effective scattering ratio c = 1.1 + +# Set materials +m = mcdc.MaterialMG( + capture=np.array([1.0 / 3.0]), + scatter=np.array([[1.0 / 3.0]]), + fission=np.array([1.0 / 3.0]), + nu_p=np.array([2.3]), +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Isotropic pulse at x=t=0 + +mcdc.Source( + position=[0.0, 0.0, 0.0], + isotropic=True, + energy_group=0, + time=0.0, +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured(x=np.linspace(-20.5, 20.5, 202)) +mcdc.Tally(mesh=mesh, scores=["flux"], time=np.linspace(0.0, 20.0, 21)) + +# Settings +mcdc.settings.N_particle = 50 +mcdc.settings.N_batch = 2 +mcdc.settings.census_bank_buffer_ratio = 5.0 +mcdc.settings.source_bank_buffer_ratio = 5.0 +mcdc.settings.set_time_census(np.linspace(0.0, 20.0, 21)[1:-1]) + +# Tecniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/azurv1_census_tally/answer.h5 b/mcdc/test/regression/azurv1_census_tally/answer.h5 new file mode 100644 index 000000000..d5a91a70b Binary files /dev/null and b/mcdc/test/regression/azurv1_census_tally/answer.h5 differ diff --git a/mcdc/test/regression/azurv1_census_tally/input.py b/mcdc/test/regression/azurv1_census_tally/input.py new file mode 100644 index 000000000..ab15e958d --- /dev/null +++ b/mcdc/test/regression/azurv1_census_tally/input.py @@ -0,0 +1,60 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Infinite medium with isotropic plane surface at the center +# Based on Ganapol LA-UR-01-1854 (AZURV1 benchmark) +# Effective scattering ratio c = 1.1 + +# Set materials +m = mcdc.MaterialMG( + capture=np.array([1.0 / 3.0]), + scatter=np.array([[1.0 / 3.0]]), + fission=np.array([1.0 / 3.0]), + nu_p=np.array([2.3]), +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Isotropic pulse at x=t=0 + +mcdc.Source( + position=[0.0, 0.0, 0.0], + isotropic=True, + energy_group=0, + time=0.0, +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured(x=np.linspace(-20.5, 20.5, 202)) +mcdc.Tally(mesh=mesh, scores=["flux"], time=np.linspace(0.0, 20.0, 21)) + +# Settings +mcdc.settings.N_particle = 50 +mcdc.settings.N_batch = 2 +mcdc.settings.census_bank_buffer_ratio = 5.0 +mcdc.settings.source_bank_buffer_ratio = 5.0 +mcdc.settings.set_time_census(np.linspace(0.0, 20.0, 21)[1:], tally_frequency=5) + +# Tecniques +mcdc.simulation.population_control() + +# Run +mcdc.run() + +# Post-processing +mcdc.recombine_tallies() diff --git a/mcdc/test/regression/c5g7_2d_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py b/mcdc/test/regression/c5g7_2d_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py new file mode 100644 index 000000000..2b8a87d3c --- /dev/null +++ b/mcdc/test/regression/c5g7_2d_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py @@ -0,0 +1,207 @@ +import h5py +import numpy as np + +import mcdc + +# ============================================================================= +# Materials +# ============================================================================= + +# Load material data +lib = h5py.File("c5g7_xs.h5", "r") + + +# Materials +def set_mat(mat): + return mcdc.MaterialMG( + capture=mat["capture"][:], + scatter=mat["scatter"][:], + fission=mat["fission"][:], + nu_p=mat["nu_p"][:], + nu_d=mat["nu_d"][:], + chi_p=mat["chi_p"][:], + chi_d=mat["chi_d"][:], + speed=mat["speed"][:], + decay_rate=mat["decay"][:], + ) + + +# Set the material +mat_uo2 = set_mat(lib["uo2"]) # Fuel: UO2 +mat_mox43 = set_mat(lib["mox43"]) # Fuel: MOX 4.3% +mat_mox7 = set_mat(lib["mox7"]) # Fuel: MOX 7.0% +mat_mox87 = set_mat(lib["mox87"]) # Fuel: MOX 8.7% +mat_gt = set_mat(lib["gt"]) # Guide tube +mat_fc = set_mat(lib["fc"]) # Fission chamber +mat_cr = set_mat(lib["cr"]) # Control rod +mat_mod = set_mat(lib["mod"]) # Moderator + +# ============================================================================= +# Pin cells +# ============================================================================= + +pitch = 1.26 +radius = 0.54 + +# Surfaces +cy = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=radius) + +# Cells +uo2 = mcdc.Cell(region=-cy, fill=mat_uo2) +mox4 = mcdc.Cell(region=-cy, fill=mat_mox43) +mox7 = mcdc.Cell(region=-cy, fill=mat_mox7) +mox8 = mcdc.Cell(region=-cy, fill=mat_mox87) +gt = mcdc.Cell(region=-cy, fill=mat_gt) +fc = mcdc.Cell(region=-cy, fill=mat_fc) +cr = mcdc.Cell(region=-cy, fill=mat_cr) +mod = mcdc.Cell(region=+cy, fill=mat_mod) +modi = mcdc.Cell(region=-cy, fill=mat_mod) # For all-water lattice + +# Universes +u = mcdc.Universe(cells=[uo2, mod]) +l = mcdc.Universe(cells=[mox4, mod]) +m = mcdc.Universe(cells=[mox7, mod]) +n = mcdc.Universe(cells=[mox8, mod]) +g = mcdc.Universe(cells=[gt, mod]) +f = mcdc.Universe(cells=[fc, mod]) +c = mcdc.Universe(cells=[cr, mod]) +w = mcdc.Universe(cells=[modi, mod]) + +# ============================================================================= +# Assemblies +# ============================================================================= + +# Lattices +lattice_uo2 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u], + [u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u], + [u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +lattice_mox = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, g, m, m, g, m, m, g, m, m, m, m, l], + [l, m, m, g, m, n, n, n, n, n, n, n, m, g, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, g, n, n, g, n, n, g, n, n, g, n, n, g, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, g, n, n, g, n, n, f, n, n, g, n, n, g, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, g, n, n, g, n, n, g, n, n, g, n, n, g, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, g, m, n, n, n, n, n, n, n, m, g, m, m, l], + [l, m, m, m, m, g, m, m, g, m, m, g, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +lattice_mod = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch * 17, 1], + y=[-pitch * 17 / 2, pitch * 17, 1], + universes=[[w]], +) + +# Assembly cells +# Surfaces +x0 = mcdc.Surface.PlaneX(x=-pitch * 17 / 2) +x1 = mcdc.Surface.PlaneX(x=pitch * 17 / 2) +y0 = mcdc.Surface.PlaneY(y=-pitch * 17 / 2) +y1 = mcdc.Surface.PlaneY(y=pitch * 17 / 2) +# Cells +assembly_uo2 = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_uo2) +assembly_mox = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_mox) +assembly_mod = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_mod) + +# Set assemblies in their respective universes +u_ = mcdc.Universe(cells=[assembly_uo2]) +m_ = mcdc.Universe(cells=[assembly_mox]) +w_ = mcdc.Universe(cells=[assembly_mod]) + +# ============================================================================= +# Root universe: core +# ============================================================================= + +# Lattice +lattice_core = mcdc.Lattice( + x=[-pitch * 17 * 3 / 2, pitch * 17, 3], + y=[-pitch * 17 * 3 / 2, pitch * 17, 3], + universes=[[u_, m_, w_], [m_, u_, w_], [w_, w_, w_]], +) + +# Core cell +# Surfaces +x0_ = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +x1_ = mcdc.Surface.PlaneX(x=pitch * 17 * 3, boundary_condition="vacuum") +y0_ = mcdc.Surface.PlaneY(y=-pitch * 17 * 3, boundary_condition="vacuum") +y1_ = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +# Cell +core = mcdc.Cell( + region=+x0_ & -x1_ & +y0_ & -y1_, + fill=lattice_core, + translation=[pitch * 17 * 3 / 2, -pitch * 17 * 3 / 2, 0.0], +) + +# Root universe +mcdc.simulation.set_root_universe(cells=[core]) + +# ============================================================================= +# Set source +# ============================================================================= + +mcdc.Source( + x=[0.0, pitch * 17 * 2], + y=[-pitch * 17 * 2, 0.0], + isotropic=True, + energy_group=6, +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(0.0, pitch * 17 * 3, 17 * 3 + 1), + y=np.linspace(-pitch * 17 * 3, 0.0, 17 * 3 + 1), +) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 20 +mcdc.settings.census_bank_buffer_ratio = 4.0 +mcdc.settings.source_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2, gyration_radius="infinite-z") + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2, gyration_radius="infinite-z") + + +mcdc.run() diff --git a/mcdc/test/regression/c5g7_2d_k_eigenvalue/answer.h5 b/mcdc/test/regression/c5g7_2d_k_eigenvalue/answer.h5 new file mode 100644 index 000000000..47d3e5bd1 Binary files /dev/null and b/mcdc/test/regression/c5g7_2d_k_eigenvalue/answer.h5 differ diff --git a/mcdc/test/regression/c5g7_2d_k_eigenvalue/c5g7_xs.h5 b/mcdc/test/regression/c5g7_2d_k_eigenvalue/c5g7_xs.h5 new file mode 100644 index 000000000..218b300f5 Binary files /dev/null and b/mcdc/test/regression/c5g7_2d_k_eigenvalue/c5g7_xs.h5 differ diff --git a/mcdc/test/regression/c5g7_2d_k_eigenvalue/input.py b/mcdc/test/regression/c5g7_2d_k_eigenvalue/input.py new file mode 100644 index 000000000..2b8a87d3c --- /dev/null +++ b/mcdc/test/regression/c5g7_2d_k_eigenvalue/input.py @@ -0,0 +1,207 @@ +import h5py +import numpy as np + +import mcdc + +# ============================================================================= +# Materials +# ============================================================================= + +# Load material data +lib = h5py.File("c5g7_xs.h5", "r") + + +# Materials +def set_mat(mat): + return mcdc.MaterialMG( + capture=mat["capture"][:], + scatter=mat["scatter"][:], + fission=mat["fission"][:], + nu_p=mat["nu_p"][:], + nu_d=mat["nu_d"][:], + chi_p=mat["chi_p"][:], + chi_d=mat["chi_d"][:], + speed=mat["speed"][:], + decay_rate=mat["decay"][:], + ) + + +# Set the material +mat_uo2 = set_mat(lib["uo2"]) # Fuel: UO2 +mat_mox43 = set_mat(lib["mox43"]) # Fuel: MOX 4.3% +mat_mox7 = set_mat(lib["mox7"]) # Fuel: MOX 7.0% +mat_mox87 = set_mat(lib["mox87"]) # Fuel: MOX 8.7% +mat_gt = set_mat(lib["gt"]) # Guide tube +mat_fc = set_mat(lib["fc"]) # Fission chamber +mat_cr = set_mat(lib["cr"]) # Control rod +mat_mod = set_mat(lib["mod"]) # Moderator + +# ============================================================================= +# Pin cells +# ============================================================================= + +pitch = 1.26 +radius = 0.54 + +# Surfaces +cy = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=radius) + +# Cells +uo2 = mcdc.Cell(region=-cy, fill=mat_uo2) +mox4 = mcdc.Cell(region=-cy, fill=mat_mox43) +mox7 = mcdc.Cell(region=-cy, fill=mat_mox7) +mox8 = mcdc.Cell(region=-cy, fill=mat_mox87) +gt = mcdc.Cell(region=-cy, fill=mat_gt) +fc = mcdc.Cell(region=-cy, fill=mat_fc) +cr = mcdc.Cell(region=-cy, fill=mat_cr) +mod = mcdc.Cell(region=+cy, fill=mat_mod) +modi = mcdc.Cell(region=-cy, fill=mat_mod) # For all-water lattice + +# Universes +u = mcdc.Universe(cells=[uo2, mod]) +l = mcdc.Universe(cells=[mox4, mod]) +m = mcdc.Universe(cells=[mox7, mod]) +n = mcdc.Universe(cells=[mox8, mod]) +g = mcdc.Universe(cells=[gt, mod]) +f = mcdc.Universe(cells=[fc, mod]) +c = mcdc.Universe(cells=[cr, mod]) +w = mcdc.Universe(cells=[modi, mod]) + +# ============================================================================= +# Assemblies +# ============================================================================= + +# Lattices +lattice_uo2 = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u], + [u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, f, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, g, u, u, g, u, u, g, u, u, g, u, u, g, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, g, u, u, u, u, u, u, u, u, u, g, u, u, u], + [u, u, u, u, u, g, u, u, g, u, u, g, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + [u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u, u], + ], +) + +lattice_mox = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch, 17], + y=[-pitch * 17 / 2, pitch, 17], + universes=[ + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, m, m, m, m, g, m, m, g, m, m, g, m, m, m, m, l], + [l, m, m, g, m, n, n, n, n, n, n, n, m, g, m, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, g, n, n, g, n, n, g, n, n, g, n, n, g, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, g, n, n, g, n, n, f, n, n, g, n, n, g, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, m, n, n, n, n, n, n, n, n, n, n, n, m, m, l], + [l, m, g, n, n, g, n, n, g, n, n, g, n, n, g, m, l], + [l, m, m, m, n, n, n, n, n, n, n, n, n, m, m, m, l], + [l, m, m, g, m, n, n, n, n, n, n, n, m, g, m, m, l], + [l, m, m, m, m, g, m, m, g, m, m, g, m, m, m, m, l], + [l, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, l], + [l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l, l], + ], +) + +lattice_mod = mcdc.Lattice( + x=[-pitch * 17 / 2, pitch * 17, 1], + y=[-pitch * 17 / 2, pitch * 17, 1], + universes=[[w]], +) + +# Assembly cells +# Surfaces +x0 = mcdc.Surface.PlaneX(x=-pitch * 17 / 2) +x1 = mcdc.Surface.PlaneX(x=pitch * 17 / 2) +y0 = mcdc.Surface.PlaneY(y=-pitch * 17 / 2) +y1 = mcdc.Surface.PlaneY(y=pitch * 17 / 2) +# Cells +assembly_uo2 = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_uo2) +assembly_mox = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_mox) +assembly_mod = mcdc.Cell(region=+x0 & -x1 & +y0 & -y1, fill=lattice_mod) + +# Set assemblies in their respective universes +u_ = mcdc.Universe(cells=[assembly_uo2]) +m_ = mcdc.Universe(cells=[assembly_mox]) +w_ = mcdc.Universe(cells=[assembly_mod]) + +# ============================================================================= +# Root universe: core +# ============================================================================= + +# Lattice +lattice_core = mcdc.Lattice( + x=[-pitch * 17 * 3 / 2, pitch * 17, 3], + y=[-pitch * 17 * 3 / 2, pitch * 17, 3], + universes=[[u_, m_, w_], [m_, u_, w_], [w_, w_, w_]], +) + +# Core cell +# Surfaces +x0_ = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +x1_ = mcdc.Surface.PlaneX(x=pitch * 17 * 3, boundary_condition="vacuum") +y0_ = mcdc.Surface.PlaneY(y=-pitch * 17 * 3, boundary_condition="vacuum") +y1_ = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +# Cell +core = mcdc.Cell( + region=+x0_ & -x1_ & +y0_ & -y1_, + fill=lattice_core, + translation=[pitch * 17 * 3 / 2, -pitch * 17 * 3 / 2, 0.0], +) + +# Root universe +mcdc.simulation.set_root_universe(cells=[core]) + +# ============================================================================= +# Set source +# ============================================================================= + +mcdc.Source( + x=[0.0, pitch * 17 * 2], + y=[-pitch * 17 * 2, 0.0], + isotropic=True, + energy_group=6, +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(0.0, pitch * 17 * 3, 17 * 3 + 1), + y=np.linspace(-pitch * 17 * 3, 0.0, 17 * 3 + 1), +) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 20 +mcdc.settings.census_bank_buffer_ratio = 4.0 +mcdc.settings.source_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2, gyration_radius="infinite-z") + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2, gyration_radius="infinite-z") + + +mcdc.run() diff --git a/mcdc/test/regression/cgmf_sphere/.ipynb_checkpoints/input-checkpoint.py b/mcdc/test/regression/cgmf_sphere/.ipynb_checkpoints/input-checkpoint.py new file mode 100644 index 000000000..6a8d49eba --- /dev/null +++ b/mcdc/test/regression/cgmf_sphere/.ipynb_checkpoints/input-checkpoint.py @@ -0,0 +1,53 @@ +## MCNP vs MCDC examples +# U-235 Sphere +import mcdc +import numpy as np + +#=========== +# Set Model +#=========== + +U_235 = mcdc.Material(name='U_235',nuclide_composition={'U235',1.0}) + +# Set Surfaces +# +sphere = mcdc.Surface.Sphere(center=[0, 0, 0], radius=10, boundary_condition="vacuum") +inside_sphere = -sphere +sphere_cell = mcdc.Cell(region=inside_sphere, fill=U_235) + +# Set Source +mcdc.Source(x=[0.0, 2.5], isotropic=True, energy=14e6) # energy in ev + +# set mesh: circular slice in x-z plane +theta = np.linspace(0,pi,50) +phi = 0 +r = np.linspace(0,10,50) + +x = r * np.cos(phi)*np.sin(theta) +y = 0 +z = r*np.cos(theta) + +mesh = np.meshgrid(x,z) + +E_1 = np.linspace(1e-4,1,100) # thermal energy axis +E_2 = np.linspace(200,1e5,1000) +E_3 = np.linspace(1.1e5,14e6,1000) +E_axis = [] +E_axis.append(E_1,E_2,E_3) + +# tallies + +mcdc.Tally( + mesh=mesh, + scores=["flux"], + energy=E_axis +) + +# whole sphere +mcdc.Tally(cell=sphere_cell, scores=["fission"],energy=E_axis) + +# Settings +mcdc.settings.N_batch = 1 +mcdc.settings.N_particle = 10e7 + +mcdc.run() diff --git a/mcdc/test/regression/cgmf_sphere/input.py b/mcdc/test/regression/cgmf_sphere/input.py new file mode 100644 index 000000000..cd6e12c87 --- /dev/null +++ b/mcdc/test/regression/cgmf_sphere/input.py @@ -0,0 +1,56 @@ +## MCNP vs MCDC examples +# U-235 Sphere +import mcdc +import numpy as np + +#=========== +# Set Model +#=========== + +U_235 = mcdc.Material(nuclide_composition={"U235": 0.048807514}) + +# Set Surfaces +# +sphere = mcdc.Surface.Sphere(center=[0, 0, 0], radius=2.0, boundary_condition="vacuum") + +inside_sphere = -sphere +sphere_cell = mcdc.Cell(region=inside_sphere, fill=U_235) + +# Set Source +ENERGY = 14e6 +energy = np.array([[ENERGY - 1, ENERGY + 1], [0.5,0.5]]) +mcdc.Source(position=[0,0,0], isotropic=True, energy=energy) # energy in ev + + +r = np.linspace(0,5,100) +theta = np.linspace(0,np.pi,100) +phi = np.linspace(0,2*np.pi,100) + +x = r *np.cos(theta) * np.sin(phi) +y = r *np.sin(theta) * np.sin(phi) +z = r * np.cos(theta) +mesh = np.meshgrid(x,y,z) + +#E_1 = np.linspace(1e-4,1,100) # thermal energy axis +#E_2 = np.linspace(200,1e5,1000) +#E_3 = np.linspace(1.1e5,14e6,1000) +#E_axis = np.concatenate([E_1, E_2, E_3]) + +E_1 = np.linspace(1e-10,1e-6,10) # thermal energy axis +E_2 = np.linspace(2e-4,1e-1,10) +E_3 = np.linspace(1.1e-1,14,10) +E_axis = np.concatenate([E_1, E_2, E_3]) + +# tallies + +# whole sphere +mcdc.Tally(cell=sphere_cell, scores=["flux"],energy=E_axis) + +# Settings +N = 1500 + +#mcdc.settings.N_batch = 1 +mcdc.settings.N_particle = N +mcdc.settings.active_bank_buffer = 1000 + +mcdc.run() diff --git a/mcdc/test/regression/cooper2/answer.h5 b/mcdc/test/regression/cooper2/answer.h5 new file mode 100644 index 000000000..4a9e36dcc Binary files /dev/null and b/mcdc/test/regression/cooper2/answer.h5 differ diff --git a/mcdc/test/regression/cooper2/input.py b/mcdc/test/regression/cooper2/input.py new file mode 100644 index 000000000..4bab6d5f9 --- /dev/null +++ b/mcdc/test/regression/cooper2/input.py @@ -0,0 +1,62 @@ +import numpy as np +import mcdc + +# ============================================================================= +# Set model +# ============================================================================= +# A shielding problem based on Problem 2 of [Coper NSE 2001] +# https://ans.tandfonline.com/action/showCitFormats?doi=10.13182/NSE00-34 + +# Set materials +SigmaT = 5.0 +c = 0.8 +m_barrier = mcdc.MaterialMG( + capture=np.array([SigmaT]), scatter=np.array([[SigmaT * c]]) +) +SigmaT = 1.0 +m_room = mcdc.MaterialMG(capture=np.array([SigmaT]), scatter=np.array([[SigmaT * c]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +sx2 = mcdc.Surface.PlaneX(x=2.0) +sx3 = mcdc.Surface.PlaneX(x=2.4) +sx4 = mcdc.Surface.PlaneX(x=4.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +sy2 = mcdc.Surface.PlaneY(y=2.0) +sy3 = mcdc.Surface.PlaneY(y=4.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+sx1 & -sx2 & +sy1 & -sy2, fill=m_room) +mcdc.Cell(region=+sx1 & -sx4 & +sy2 & -sy3, fill=m_room) +mcdc.Cell(region=+sx3 & -sx4 & +sy1 & -sy2, fill=m_room) +mcdc.Cell(region=+sx2 & -sx3 & +sy1 & -sy2, fill=m_barrier) + +# ============================================================================= +# Set source +# ============================================================================= + +mcdc.Source( + x=[0.0, 1.0], + y=[0.0, 1.0], + isotropic=True, + energy_group=0, + time=0.0, +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mesh = mcdc.MeshUniform(x=(0.0, 0.1, 40), y=(0.0, 0.1, 40)) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 50 +mcdc.settings.N_batch = 2 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/fuel_array_packaged/answer.h5 b/mcdc/test/regression/fuel_array_packaged/answer.h5 new file mode 100644 index 000000000..e522be61a Binary files /dev/null and b/mcdc/test/regression/fuel_array_packaged/answer.h5 differ diff --git a/mcdc/test/regression/fuel_array_packaged/input.py b/mcdc/test/regression/fuel_array_packaged/input.py new file mode 100644 index 000000000..551123012 --- /dev/null +++ b/mcdc/test/regression/fuel_array_packaged/input.py @@ -0,0 +1,108 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Materials +# ====================================================================================== + +fuel = mcdc.MaterialMG( + capture=np.array([0.45]), + fission=np.array([0.55]), + nu_p=np.array([2.5]), +) + +cover = mcdc.MaterialMG( + capture=np.array([0.05]), + scatter=np.array([[0.95]]), +) + +water = mcdc.MaterialMG( + capture=np.array([0.02]), + scatter=np.array([[0.08]]), +) + +# ====================================================================================== +# The assembly +# ====================================================================================== + +# Surfaces +cylinder_z = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=1.0) +cylinder_x = mcdc.Surface.CylinderX(center=[0.0, 0.0], radius=1.0) + +top_z = mcdc.Surface.PlaneZ(z=2.5) +bot_z = mcdc.Surface.PlaneZ(z=-2.5) +top_x = mcdc.Surface.PlaneX(x=2.5) +bot_x = mcdc.Surface.PlaneX(x=-2.5) + +sphere = mcdc.Surface.Sphere(center=[0.0, 0.0, 0.0], radius=3.0) + +# Cells +pellet_z = -cylinder_z & +bot_z & -top_z +pellet_x = -cylinder_x & +bot_x & -top_x +shooting_star = pellet_z | pellet_x +fuel_shooting_star = mcdc.Cell(region=shooting_star, fill=fuel) +cover_sphere = mcdc.Cell(region=-sphere & ~shooting_star, fill=cover) +water_tank = mcdc.Cell(region=+sphere, fill=water) + +# ====================================================================================== +# Copy the assembly via universe cells +# ====================================================================================== + +# Set the universe +assembly = mcdc.Universe(cells=[fuel_shooting_star, cover_sphere, water_tank]) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-10.0, boundary_condition="vacuum") +mid_x = mcdc.Surface.PlaneX(x=0.0) +max_x = mcdc.Surface.PlaneX(x=10.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-5.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=5.0, boundary_condition="vacuum") + +# Make copies via universe cells +container_left = +min_y & -max_y & +min_z & -max_z & +min_x & -mid_x +container_right = +min_y & -max_y & +min_z & -max_z & +mid_x & -max_x +assembly_left = mcdc.Cell(region=container_left, fill=assembly, translation=[-5, 0, 0]) +assembly_right = mcdc.Cell( + region=container_right, fill=assembly, translation=[+5, 0, 0], rotation=[0, 10, 0] +) + +# Root universe +mcdc.simulation.set_root_universe(cells=[assembly_left, assembly_right]) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source(x=[-0.1, 0.1], isotropic=True, energy_group=0) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-10, 10, 201), + z=np.linspace(-5, 5, 101), +) +mcdc.Tally(mesh=mesh, scores=["fission"]) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run (or visualize) +visualize = False +if not visualize: + mcdc.run() +else: + colors = { + fuel: "red", + cover: "gray", + water: "blue", + } + mcdc.visualize( + "xz", y=0.0, x=[-11.0, 11.0], z=[-6, 6], pixels=(400, 400), colors=colors + ) diff --git a/mcdc/test/regression/fuel_array_packaged/process.py b/mcdc/test/regression/fuel_array_packaged/process.py new file mode 100644 index 000000000..d7b71c261 --- /dev/null +++ b/mcdc/test/regression/fuel_array_packaged/process.py @@ -0,0 +1,29 @@ +import matplotlib.pyplot as plt +import h5py, sys +import numpy as np + +# Load result +with h5py.File(sys.argv[1], "r") as f: + x = f["tallies/mesh_tally_0/grid/x"][:] + z = f["tallies/mesh_tally_0/grid/z"][:] + dx = [x[1:] - x[:-1]][-1] + x_mid = 0.5 * (x[:-1] + x[1:]) + dz = [z[1:] - z[:-1]][-1] + z_mid = 0.5 * (z[:-1] + z[1:]) + + phi = f["tallies/mesh_tally_0/fission/mean"][:] + phi_sd = f["tallies/mesh_tally_0/fission/sdev"][:] + + +# Plot result +X, Y = np.meshgrid(z_mid, x_mid) +Z = phi +plt.pcolormesh(X, Y, Z) +plt.gca().set_aspect("equal") +plt.show() + +X, Y = np.meshgrid(z_mid, x_mid) +Z = phi_sd +plt.pcolormesh(X, Y, Z) +plt.gca().set_aspect("equal") +plt.show() diff --git a/mcdc/test/regression/inf_shem361/SHEM-361.npz b/mcdc/test/regression/inf_shem361/SHEM-361.npz new file mode 100644 index 000000000..6ba2c1f4b Binary files /dev/null and b/mcdc/test/regression/inf_shem361/SHEM-361.npz differ diff --git a/mcdc/test/regression/inf_shem361/answer.h5 b/mcdc/test/regression/inf_shem361/answer.h5 new file mode 100644 index 000000000..9614ab9c5 Binary files /dev/null and b/mcdc/test/regression/inf_shem361/answer.h5 differ diff --git a/mcdc/test/regression/inf_shem361/input.py b/mcdc/test/regression/inf_shem361/input.py new file mode 100644 index 000000000..60b97b20c --- /dev/null +++ b/mcdc/test/regression/inf_shem361/input.py @@ -0,0 +1,61 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# The infinite homogenous medium is modeled with reflecting slab + +# Load material data +with np.load("SHEM-361.npz") as data: + SigmaC = data["SigmaC"] * 1.5 # /cm + SigmaS = data["SigmaS"] + SigmaF = data["SigmaF"] + nu_p = data["nu_p"] + nu_d = data["nu_d"] + chi_p = data["chi_p"] + chi_d = data["chi_d"] + G = data["G"] + speed = data["v"] + lamd = data["lamd"] + +# Set material +m = mcdc.MaterialMG( + capture=SigmaC, + scatter=SigmaS, + fission=SigmaF, + nu_p=nu_p, + chi_p=chi_p, + nu_d=nu_d, + chi_d=chi_d, +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +c = mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + position=(0.0, 0.0, 0.0), isotropic=True, energy_group=np.array([[360], [1.0]]) +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mcdc.Tally(scores=["flux"], energy="all_groups") + +# Swttings +mcdc.settings.N_particle = 25 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/inf_shem361_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py b/mcdc/test/regression/inf_shem361_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py new file mode 100644 index 000000000..e57e0b691 --- /dev/null +++ b/mcdc/test/regression/inf_shem361_k_eigenvalue/.ipynb_checkpoints/input-checkpoint.py @@ -0,0 +1,64 @@ +import numpy as np + +import mcdc + +# ============================================================================= +# Set model +# ============================================================================= +# The infinite homogenous medium is modeled with reflecting slab + +# Load material data +with np.load("SHEM-361.npz") as data: + SigmaC = data["SigmaC"] # /cm + SigmaS = data["SigmaS"] + SigmaF = data["SigmaF"] + nu_p = data["nu_p"] + nu_d = data["nu_d"] + chi_p = data["chi_p"] + chi_d = data["chi_d"] + G = data["G"] + +# Set material +m = mcdc.MaterialMG( + capture=SigmaC, + scatter=SigmaS, + fission=SigmaF, + nu_p=nu_p, + chi_p=chi_p, + nu_d=nu_d, + chi_d=chi_d, +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +c = mcdc.Cell(region=+s1 & -s2, fill=m) + +# ============================================================================= +# Set initial source +# ============================================================================= + +mcdc.Source( + position=(0.0, 0.0, 0.0), isotropic=True, energy_group=np.array([[360], [1.0]]) +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mcdc.Tally(scores=["flux"], energy="all_groups") + +# Settings +mcdc.settings.N_particle = 70 +mcdc.settings.source_bank_buffer_ratio = 2.0 +mcdc.settings.census_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2) + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/inf_shem361_k_eigenvalue/SHEM-361.npz b/mcdc/test/regression/inf_shem361_k_eigenvalue/SHEM-361.npz new file mode 100644 index 000000000..6ba2c1f4b Binary files /dev/null and b/mcdc/test/regression/inf_shem361_k_eigenvalue/SHEM-361.npz differ diff --git a/mcdc/test/regression/inf_shem361_k_eigenvalue/answer.h5 b/mcdc/test/regression/inf_shem361_k_eigenvalue/answer.h5 new file mode 100644 index 000000000..ea8a91eec Binary files /dev/null and b/mcdc/test/regression/inf_shem361_k_eigenvalue/answer.h5 differ diff --git a/mcdc/test/regression/inf_shem361_k_eigenvalue/input.py b/mcdc/test/regression/inf_shem361_k_eigenvalue/input.py new file mode 100644 index 000000000..e57e0b691 --- /dev/null +++ b/mcdc/test/regression/inf_shem361_k_eigenvalue/input.py @@ -0,0 +1,64 @@ +import numpy as np + +import mcdc + +# ============================================================================= +# Set model +# ============================================================================= +# The infinite homogenous medium is modeled with reflecting slab + +# Load material data +with np.load("SHEM-361.npz") as data: + SigmaC = data["SigmaC"] # /cm + SigmaS = data["SigmaS"] + SigmaF = data["SigmaF"] + nu_p = data["nu_p"] + nu_d = data["nu_d"] + chi_p = data["chi_p"] + chi_d = data["chi_d"] + G = data["G"] + +# Set material +m = mcdc.MaterialMG( + capture=SigmaC, + scatter=SigmaS, + fission=SigmaF, + nu_p=nu_p, + chi_p=chi_p, + nu_d=nu_d, + chi_d=chi_d, +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +c = mcdc.Cell(region=+s1 & -s2, fill=m) + +# ============================================================================= +# Set initial source +# ============================================================================= + +mcdc.Source( + position=(0.0, 0.0, 0.0), isotropic=True, energy_group=np.array([[360], [1.0]]) +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mcdc.Tally(scores=["flux"], energy="all_groups") + +# Settings +mcdc.settings.N_particle = 70 +mcdc.settings.source_bank_buffer_ratio = 2.0 +mcdc.settings.census_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2) + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/inf_shem361_td/SHEM-361.npz b/mcdc/test/regression/inf_shem361_td/SHEM-361.npz new file mode 100644 index 000000000..6ba2c1f4b Binary files /dev/null and b/mcdc/test/regression/inf_shem361_td/SHEM-361.npz differ diff --git a/mcdc/test/regression/inf_shem361_td/answer.h5 b/mcdc/test/regression/inf_shem361_td/answer.h5 new file mode 100644 index 000000000..708f76e5c Binary files /dev/null and b/mcdc/test/regression/inf_shem361_td/answer.h5 differ diff --git a/mcdc/test/regression/inf_shem361_td/input.py b/mcdc/test/regression/inf_shem361_td/input.py new file mode 100644 index 000000000..58b54f997 --- /dev/null +++ b/mcdc/test/regression/inf_shem361_td/input.py @@ -0,0 +1,67 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# The infinite homogenous medium is modeled with reflecting slab + +# Load material data +with np.load("SHEM-361.npz") as data: + SigmaC = data["SigmaC"] * 2.5 # /cm + SigmaS = data["SigmaS"] + SigmaF = data["SigmaF"] + nu_p = data["nu_p"] + nu_d = data["nu_d"] + chi_p = data["chi_p"] + chi_d = data["chi_d"] + G = data["G"] + speed = data["v"] + lamd = data["lamd"] + +# Set material +m = mcdc.MaterialMG( + capture=SigmaC, + scatter=SigmaS, + fission=SigmaF, + nu_p=nu_p, + chi_p=chi_p, + nu_d=nu_d, + chi_d=chi_d, + decay_rate=lamd, + speed=speed, +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +c = mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + position=(0.0, 0.0, 0.0), isotropic=True, energy_group=np.array([[360], [1.0]]) +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mcdc.Tally( + scores=["flux"], + time=np.insert(np.logspace(-8, 1, 100), 0, 0.0), + energy="all_groups", +) + +# Swttings +mcdc.settings.N_particle = 50 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/inf_shem361_td_census/SHEM-361.npz b/mcdc/test/regression/inf_shem361_td_census/SHEM-361.npz new file mode 100644 index 000000000..6ba2c1f4b Binary files /dev/null and b/mcdc/test/regression/inf_shem361_td_census/SHEM-361.npz differ diff --git a/mcdc/test/regression/inf_shem361_td_census/answer.h5 b/mcdc/test/regression/inf_shem361_td_census/answer.h5 new file mode 100644 index 000000000..50119645c Binary files /dev/null and b/mcdc/test/regression/inf_shem361_td_census/answer.h5 differ diff --git a/mcdc/test/regression/inf_shem361_td_census/input.py b/mcdc/test/regression/inf_shem361_td_census/input.py new file mode 100644 index 000000000..525615b0e --- /dev/null +++ b/mcdc/test/regression/inf_shem361_td_census/input.py @@ -0,0 +1,73 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# The infinite homogenous medium is modeled with reflecting slab + +# Load material data +with np.load("SHEM-361.npz") as data: + SigmaC = data["SigmaC"] * 3.0 # /cm + SigmaS = data["SigmaS"] + SigmaF = data["SigmaF"] + nu_p = data["nu_p"] + nu_d = data["nu_d"] + chi_p = data["chi_p"] + chi_d = data["chi_d"] + G = data["G"] + speed = data["v"] + lamd = data["lamd"] + +# Set material +m = mcdc.MaterialMG( + capture=SigmaC, + scatter=SigmaS, + fission=SigmaF, + nu_p=nu_p, + chi_p=chi_p, + nu_d=nu_d, + chi_d=chi_d, + decay_rate=lamd, + speed=speed, +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=-1e10, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneX(x=1e10, boundary_condition="reflective") + +# Set cells +c = mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + position=(0.0, 0.0, 0.0), isotropic=True, energy_group=np.array([[360], [1.0]]) +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mcdc.Tally( + scores=["flux"], + time=np.insert(np.logspace(-8, 1, 100), 0, 0.0), + energy="all_groups", +) + +# Settings +mcdc.settings.N_particle = 40 +mcdc.settings.N_batch = 2 +mcdc.settings.set_time_census(np.logspace(-5, 1, 6)) +mcdc.settings.active_bank_buffer = 1000 +mcdc.settings.census_bank_buffer_ratio = 5.0 +mcdc.settings.source_bank_buffer_ratio = 5.0 + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/kobayashi3-TD/answer.h5 b/mcdc/test/regression/kobayashi3-TD/answer.h5 new file mode 100644 index 000000000..fdb6fbd40 Binary files /dev/null and b/mcdc/test/regression/kobayashi3-TD/answer.h5 differ diff --git a/mcdc/test/regression/kobayashi3-TD/input.py b/mcdc/test/regression/kobayashi3-TD/input.py new file mode 100644 index 000000000..09776f81c --- /dev/null +++ b/mcdc/test/regression/kobayashi3-TD/input.py @@ -0,0 +1,79 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Based on Kobayashi dog-leg benchmark problem +# (PNE 2001, https://doi.org/10.1016/S0149-1970(01)00007-5) + +# Set materials +m = mcdc.MaterialMG(capture=np.array([0.05]), scatter=np.array([[0.05]])) +m_void = mcdc.MaterialMG(capture=np.array([5e-5]), scatter=np.array([[5e-5]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="reflective") +sx2 = mcdc.Surface.PlaneX(x=10.0) +sx3 = mcdc.Surface.PlaneX(x=30.0) +sx4 = mcdc.Surface.PlaneX(x=40.0) +sx5 = mcdc.Surface.PlaneX(x=60.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="reflective") +sy2 = mcdc.Surface.PlaneY(y=10.0) +sy3 = mcdc.Surface.PlaneY(y=50.0) +sy4 = mcdc.Surface.PlaneY(y=60.0) +sy5 = mcdc.Surface.PlaneY(y=100.0, boundary_condition="vacuum") +sz1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="reflective") +sz2 = mcdc.Surface.PlaneZ(z=10.0) +sz3 = mcdc.Surface.PlaneZ(z=30.0) +sz4 = mcdc.Surface.PlaneZ(z=40.0) +sz5 = mcdc.Surface.PlaneZ(z=60.0, boundary_condition="vacuum") + +# Set cells +# Source +source_cell = mcdc.Cell(region=+sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2, fill=m) +# Voids +channel_1 = +sx1 & -sx2 & +sy2 & -sy3 & +sz1 & -sz2 +channel_2 = +sx1 & -sx3 & +sy3 & -sy4 & +sz1 & -sz2 +channel_3 = +sx3 & -sx4 & +sy3 & -sy4 & +sz1 & -sz3 +channel_4 = +sx3 & -sx4 & +sy3 & -sy5 & +sz3 & -sz4 +void_channel = channel_1 | channel_2 | channel_3 | channel_4 +void_cell = mcdc.Cell(region=void_channel, fill=m_void) +# Shield +box = +sx1 & -sx5 & +sy1 & -sy5 & +sz1 & -sz5 +shield_cell = mcdc.Cell(region=box & ~void_channel, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# The source pulses in t=[0,5] + +mcdc.Source( + x=[0.0, 10.0], + y=[0.0, 10.0], + z=[0.0, 10.0], + isotropic=True, + energy_group=0, + time=[0.0, 50.0], +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +time_grid = np.linspace(0.0, 200.0, 21) +mcdc.Tally(cell=source_cell, scores=["flux"], time=time_grid) +mcdc.Tally(cell=void_cell, scores=["flux"], time=time_grid) +mcdc.Tally(cell=shield_cell, scores=["flux"], time=time_grid) +mesh = mcdc.MeshUniform(x=(0.0, 1.0, 60), y=(0.0, 1.0, 100)) +mcdc.Tally(mesh=mesh, scores=["flux"], time=time_grid) +mcdc.Tally(scores=["density"], time=time_grid) + +# Settings +mcdc.settings.N_particle = 25 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/kornreich/answer.h5 b/mcdc/test/regression/kornreich/answer.h5 new file mode 100644 index 000000000..fe1ef13a0 Binary files /dev/null and b/mcdc/test/regression/kornreich/answer.h5 differ diff --git a/mcdc/test/regression/kornreich/input.py b/mcdc/test/regression/kornreich/input.py new file mode 100644 index 000000000..309bcc431 --- /dev/null +++ b/mcdc/test/regression/kornreich/input.py @@ -0,0 +1,80 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Based on Kornreich, ANE 2004, 31, 1477-1494, +# DOI: 10.1016/j.anucene.2004.03.012 + +# Set materials +m1 = mcdc.MaterialMG( + capture=np.array([0.0]), + scatter=np.array([[0.9]]), + fission=np.array([0.1]), + nu_p=np.array([6.0]), +) +m2 = mcdc.MaterialMG( + capture=np.array([0.68]), + scatter=np.array([[0.2]]), + fission=np.array([0.12]), + nu_p=np.array([2.5]), +) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="vacuum") +s2 = mcdc.Surface.PlaneX(x=1.5) +s3 = mcdc.Surface.PlaneX(x=2.5, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m1) +mcdc.Cell(region=+s2 & -s3, fill=m2) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source(x=[0.0, 2.5], isotropic=True, energy_group=0) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.array( + [ + 0.0, + 0.15, + 0.3, + 0.45, + 0.6, + 0.75, + 0.9, + 1.05, + 1.2, + 1.35, + 1.5, + 1.6, + 1.7, + 1.8, + 1.9, + 2, + 2.1, + 2.2, + 2.3, + 2.4, + 2.5, + ] + ) +) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.census_bank_buffer_ratio = 3.0 +mcdc.settings.source_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2, gyration_radius="only-x") + +# Run +mcdc.run() diff --git a/mcdc/test/regression/lockwood/answer.h5 b/mcdc/test/regression/lockwood/answer.h5 new file mode 100644 index 000000000..d9f5e18a6 Binary files /dev/null and b/mcdc/test/regression/lockwood/answer.h5 differ diff --git a/mcdc/test/regression/lockwood/input.py b/mcdc/test/regression/lockwood/input.py new file mode 100644 index 000000000..5464a9f98 --- /dev/null +++ b/mcdc/test/regression/lockwood/input.py @@ -0,0 +1,96 @@ +import numpy as np +import os +import math +import mcdc +from datetime import datetime + +# Set the XS library directory +os.environ["MCDC_LIB"] = "../mcdc-regression_test_data/" + +# ============================================================================= +# Set problem parameters +# ============================================================================= +# Energy and Angle Parameters +MATERIAL_SYMBOL = "Al" +ENERGY = 1e4 # eV +CSDA_RANGE = 0.569 # g/cm2 +ANGLE = 0.0 + +# MCDC Simulation Parameters +N_PARTICLES = 10 +z0 = 0.0 # Starting source position + +# Material Properties +RHO_G_CM3 = 2.70 # g/cm3 +ATOMIC_WEIGHT_G_MOL = 26.7497084 # g/mol +AREAL_DENSITY_G_CM2 = 5.05e-3 # g/cm2 + +# Standard Calculations +dz = AREAL_DENSITY_G_CM2 / RHO_G_CM3 +AVAGADRO_NUMBER = 6.02214076e23 # atoms/mol +MAT_DENSITY_ATOMS_PER_BARN_CM = ( + AVAGADRO_NUMBER / ATOMIC_WEIGHT_G_MOL * RHO_G_CM3 / 1e24 +) * 1e-2 # atoms/barn-cm +TINY = 1e-30 +L = CSDA_RANGE / RHO_G_CM3 # cm +N_LAYERS = 1 +THETA = math.radians(ANGLE) + +# Output variables for naming +np_name = len(str(N_PARTICLES)) - 1 +e_name = f"{ENERGY:.2g}" + +# ============================================================================= +# Set materials +# ============================================================================= +mat = mcdc.Material( + element_composition={MATERIAL_SYMBOL: MAT_DENSITY_ATOMS_PER_BARN_CM} +) + +# ============================================================================= +# Set geometry (surfaces and cells) +# ============================================================================= +# Z-direction surfaces for layers + +s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") + +s2 = mcdc.Surface.PlaneZ(z=L, boundary_condition="vacuum") + +mcdc.Cell(region=+s1 & -s2, fill=mat) + +# ============================================================================= +# Set source +# ============================================================================= +# Parallel beam of 1 MeV electrons entering at z=0 + +mcdc.Source( + z=[z0 + TINY, z0 + TINY], + particle_type="electron", + energy=np.array([[ENERGY - 1, ENERGY + 1], [0.5, 0.5]]), + direction=[math.sin(THETA), 0.0 + TINY, math.cos(THETA)], +) + +# ============================================================================= +# Set tally +# ============================================================================= +# Energy deposition tally along z-axis +z_bins = np.linspace(0.0, L, N_LAYERS + 1) +mesh = mcdc.MeshStructured(z=z_bins) + +mcdc.Tally(name="edep", mesh=mesh, scores=["energy_deposition"]) + +mcdc.Tally(name="flux", scores=["flux"], mesh=mesh) + +mcdc.Tally(name="s1_current", surface=s1, scores=["net-current"]) + +mcdc.Tally(name="s2_current", surface=s2, scores=["net-current"]) + +# ============================================================================= +# Settings and run +# ============================================================================= + +mcdc.settings.set_transported_particles(["electron"]) +mcdc.settings.N_particle = N_PARTICLES +mcdc.settings.active_bank_buffer = N_PARTICLES * 10 + +mcdc.run() diff --git a/mcdc/test/regression/moving_pellet/answer.h5 b/mcdc/test/regression/moving_pellet/answer.h5 new file mode 100644 index 000000000..108b611ac Binary files /dev/null and b/mcdc/test/regression/moving_pellet/answer.h5 differ diff --git a/mcdc/test/regression/moving_pellet/input.py b/mcdc/test/regression/moving_pellet/input.py new file mode 100644 index 000000000..77148b19f --- /dev/null +++ b/mcdc/test/regression/moving_pellet/input.py @@ -0,0 +1,95 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== + +# Set materials +fuel = mcdc.MaterialMG( + capture=np.array([0.5]), + fission=np.array([0.25]), + nu_p=np.array([1.5]), + speed=np.array([200000.0]), +) +air = mcdc.MaterialMG( + capture=np.array([0.002]), + scatter=np.array([[0.008]]), + speed=np.array([200000.0]), +) + +# Set surfaces +cylinder_z = mcdc.Surface.CylinderZ(center=[0.0, 0.0], radius=1.0) +top_z = mcdc.Surface.PlaneZ(z=9.0) +bot_z = mcdc.Surface.PlaneZ(z=6.0) + +# Move surfaces +cylinder_z.move([[-0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [-2.0, 0.0, 0.0]], [2.0, 5.0, 1.0]) +top_z.move([[0.0, 0.0, -2.0], [0.0, 0.0, 4.0], [0.0, 0.0, -10.0]], [5.0, 2.0, 1.0]) +bot_z.move([[0.0, 0.0, -2.0], [0.0, 0.0, 4.0], [0.0, 0.0, -10.0]], [5.0, 2.0, 1.0]) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-5.0, boundary_condition="vacuum") +max_x = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-10.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=10.0, boundary_condition="vacuum") + +# Make cells +fuel_pellet_region = +bot_z & -top_z & -cylinder_z +mcdc.Cell(region=fuel_pellet_region, fill=fuel) +mcdc.Cell( + region=~fuel_pellet_region & +min_x & -max_x & +min_y & -max_y & +min_z & -max_z, + fill=air, +) + +# ====================================================================================== +# Set source +# ====================================================================================== + +mcdc.Source( + x=[2.0, 3.0], + y=[-0.5, 0.5], + z=[-0.5, 0.5], + isotropic=True, + energy_group=0, + time=[0.0, 9.0], +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-5, 5, 101), + z=np.linspace(-10, 10, 101), +) +mcdc.Tally(mesh=mesh, scores=["fission"], time=np.linspace(0, 9, 46)) + +# Settings +mcdc.settings.N_particle = 50 +mcdc.settings.N_batch = 2 +mcdc.settings.active_bank_buffer = 1000 + +# Run (or visualize) +visualize = False +if not visualize: + mcdc.run() +else: + colors = { + fuel: "red", + air: "blue", + } + mcdc.visualize( + "xz", + y=0.0, + x=[-5.0, 5.0], + z=[-10, 10], + pixels=(100, 100), + colors=colors, + time=np.linspace(0.0, 9.0, 19), + save_as="figure", + ) diff --git a/mcdc/test/regression/moving_source/answer.h5 b/mcdc/test/regression/moving_source/answer.h5 new file mode 100644 index 000000000..20b26dd1f Binary files /dev/null and b/mcdc/test/regression/moving_source/answer.h5 differ diff --git a/mcdc/test/regression/moving_source/input.py b/mcdc/test/regression/moving_source/input.py new file mode 100644 index 000000000..9fa587501 --- /dev/null +++ b/mcdc/test/regression/moving_source/input.py @@ -0,0 +1,59 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== + +# Set materials +air = mcdc.MaterialMG( + capture=np.array([0.002]), + scatter=np.array([[0.008]]), + speed=np.array([200000.0]), +) + +# Set container cell surfaces +min_x = mcdc.Surface.PlaneX(x=-5.0, boundary_condition="vacuum") +max_x = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") +min_y = mcdc.Surface.PlaneY(y=-5.0, boundary_condition="vacuum") +max_y = mcdc.Surface.PlaneY(y=5.0, boundary_condition="vacuum") +min_z = mcdc.Surface.PlaneZ(z=-10.0, boundary_condition="vacuum") +max_z = mcdc.Surface.PlaneZ(z=10.0, boundary_condition="vacuum") + +# Make cells +mcdc.Cell(region=+min_x & -max_x & +min_y & -max_y & +min_z & -max_z, fill=air) + +# ====================================================================================== +# Set source +# ====================================================================================== + +src = mcdc.Source( + x=[-4.0, -3.0], + y=[-0.5, 0.5], + z=[-0.5, 0.5], + direction=[1.0, 1.0, 0.0], + polar_cosine=[-1.0, -0.9], + energy_group=0, + time=[0.0, 10.0], +) +src.move([[1.0, 0.0, 0.0], [-0.5, 2.0, 0.0], [0.0, -3.0, 0.0]], [7.0, 2.0, 1.0]) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured( + x=np.linspace(-5.0, 5.0, 21), + y=np.linspace(-5.0, 5.0, 21), +) +mcdc.Tally(mesh=mesh, scores=["flux"], time=np.linspace(0, 10, 11)) + + +# Settings +mcdc.settings.N_particle = 1000 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/pincell-energy_deposition/answer.h5 b/mcdc/test/regression/pincell-energy_deposition/answer.h5 new file mode 100644 index 000000000..24bef25b1 Binary files /dev/null and b/mcdc/test/regression/pincell-energy_deposition/answer.h5 differ diff --git a/mcdc/test/regression/pincell-energy_deposition/input.py b/mcdc/test/regression/pincell-energy_deposition/input.py new file mode 100644 index 000000000..9504e18ff --- /dev/null +++ b/mcdc/test/regression/pincell-energy_deposition/input.py @@ -0,0 +1,51 @@ +import mcdc +import numpy as np +import os + +os.environ["MCDC_LIB"] = "../mcdc-regression_test_data/" + +# Material +fuel = mcdc.Material( + nuclide_composition={ + "U235": 0.0001654509603995036, + "U238": 0.022801089905717036, + "O16": 0.04593308173223308, + } +) +moderator = mcdc.Material( + nuclide_composition={ + "H1": 0.05129627050184732, + "O16": 0.024622209840886707, + "B10": 4.103701640147785e-05, + } +) + +# Geometry +cylinder = mcdc.Surface.CylinderZ(radius=0.45720) +pitch = 1.25984 +x0 = mcdc.Surface.PlaneX(x=-pitch / 2, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch / 2, boundary_condition="reflective") +y0 = mcdc.Surface.PlaneY(y=-pitch / 2, boundary_condition="reflective") +y1 = mcdc.Surface.PlaneY(y=pitch / 2, boundary_condition="reflective") + +mcdc.Cell(-cylinder, fill=fuel) +mcdc.Cell(+x0 & -x1 & +y0 & -y1 & +cylinder, fill=moderator) + +# Source +mcdc.Source(position=[0.0, 0.0, 0.0], isotropic=True, time=0.0, energy=14.1e6) + +# Settings +mcdc.settings.N_particle = 20 +mcdc.settings.N_batch = 2 +mcdc.settings.time_boundary = 1.0 +mcdc.settings.active_bank_buffer = 1000 + +# Edep tally +mesh = mcdc.MeshUniform( + x=(-pitch / 2, pitch / 8, 8), + y=(-pitch / 2, pitch / 8, 8), +) + +mcdc.Tally(name="edep_mesh", mesh=mesh, scores=["energy_deposition"]) + +mcdc.run() diff --git a/mcdc/test/regression/pincell-k_eigenvalue/answer.h5 b/mcdc/test/regression/pincell-k_eigenvalue/answer.h5 new file mode 100644 index 000000000..206d85abe Binary files /dev/null and b/mcdc/test/regression/pincell-k_eigenvalue/answer.h5 differ diff --git a/mcdc/test/regression/pincell-k_eigenvalue/input.py b/mcdc/test/regression/pincell-k_eigenvalue/input.py new file mode 100644 index 000000000..b497d7662 --- /dev/null +++ b/mcdc/test/regression/pincell-k_eigenvalue/input.py @@ -0,0 +1,51 @@ +import mcdc +import numpy as np +import os + +os.environ["MCDC_LIB"] = "../mcdc-regression_test_data/" + +# Material +fuel = mcdc.Material( + nuclide_composition={ + "U235": 0.0001654509603995036, + "U238": 0.022801089905717036, + "O16": 0.04593308173223308, + } +) +moderator = mcdc.Material( + nuclide_composition={ + "H1": 0.05129627050184732, + "O16": 0.024622209840886707, + "B10": 4.103701640147785e-05, + } +) + +# Geometry +cylinder = mcdc.Surface.CylinderZ(radius=0.45720) +pitch = 1.25984 +x0 = mcdc.Surface.PlaneX(x=-pitch / 2, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch / 2, boundary_condition="reflective") +y0 = mcdc.Surface.PlaneY(y=-pitch / 2, boundary_condition="reflective") +y1 = mcdc.Surface.PlaneY(y=pitch / 2, boundary_condition="reflective") +# +mcdc.Cell(-cylinder, fill=fuel) +mcdc.Cell(+x0 & -x1 & +y0 & -y1 & +cylinder, fill=moderator) + +# Source +mcdc.Source(position=[0.0, 0.0, 0.0], isotropic=True, energy=14.1e6) + +# Setting +mcdc.settings.N_particle = 30 +mcdc.settings.active_bank_buffer = 1000 +mcdc.settings.census_bank_buffer_ratio = 3.0 +mcdc.settings.source_bank_buffer_ratio = 3.0 +mcdc.settings.set_eigenmode(N_inactive=1, N_active=2) + +# Tally +e_min, e_max = 1e-5, 20.0e6 +groups = 500 +energies = np.logspace(np.log10(e_min), np.log10(e_max), groups + 1) + +mcdc.Tally(scores=["flux"], energy=energies) + +mcdc.run() diff --git a/mcdc/test/regression/pincell/.ipynb_checkpoints/input-checkpoint.py b/mcdc/test/regression/pincell/.ipynb_checkpoints/input-checkpoint.py new file mode 100644 index 000000000..5b762177d --- /dev/null +++ b/mcdc/test/regression/pincell/.ipynb_checkpoints/input-checkpoint.py @@ -0,0 +1,51 @@ +import mcdc +import numpy as np +import os + +os.environ["MCDC_LIB"] = "../mcdc-regression_test_data/" + +# Material +fuel = mcdc.Material( + nuclide_composition={ + "U235": 0.0001654509603995036, + "U238": 0.022801089905717036, + "O16": 0.04593308173223308, + } +) +moderator = mcdc.Material( + nuclide_composition={ + "H1": 0.05129627050184732, + "O16": 0.024622209840886707, + "B10": 4.103701640147785e-05, + } +) + +# Geometry +cylinder = mcdc.Surface.CylinderZ(radius=0.45720) +pitch = 1.25984 +x0 = mcdc.Surface.PlaneX(x=-pitch / 2, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch / 2, boundary_condition="reflective") +y0 = mcdc.Surface.PlaneY(y=-pitch / 2, boundary_condition="reflective") +y1 = mcdc.Surface.PlaneY(y=pitch / 2, boundary_condition="reflective") +# +mcdc.Cell(-cylinder, fill=fuel) +mcdc.Cell(+x0 & -x1 & +y0 & -y1 & +cylinder, fill=moderator) + +# Source +mcdc.Source(position=[0.0, 0.0, 0.0], isotropic=True, time=0.0, energy=14.1e6) + +# Setting +mcdc.settings.N_particle = 20 +mcdc.settings.N_batch = 2 +mcdc.settings.time_boundary = 1.0 +mcdc.settings.active_bank_buffer = 1000 + +# Tally +t_grid = np.insert(np.logspace(-9, -4, 200), 0, 0.0) +e_min, e_max = 1e-5, 20.0e6 +groups = 500 +energies = np.logspace(np.log10(e_min), np.log10(e_max), groups + 1) + +mcdc.Tally(scores=["flux"], time=t_grid, energy=energies) + +mcdc.run() diff --git a/mcdc/test/regression/pincell/answer.h5 b/mcdc/test/regression/pincell/answer.h5 new file mode 100644 index 000000000..2596fc17c Binary files /dev/null and b/mcdc/test/regression/pincell/answer.h5 differ diff --git a/mcdc/test/regression/pincell/input.py b/mcdc/test/regression/pincell/input.py new file mode 100644 index 000000000..5b762177d --- /dev/null +++ b/mcdc/test/regression/pincell/input.py @@ -0,0 +1,51 @@ +import mcdc +import numpy as np +import os + +os.environ["MCDC_LIB"] = "../mcdc-regression_test_data/" + +# Material +fuel = mcdc.Material( + nuclide_composition={ + "U235": 0.0001654509603995036, + "U238": 0.022801089905717036, + "O16": 0.04593308173223308, + } +) +moderator = mcdc.Material( + nuclide_composition={ + "H1": 0.05129627050184732, + "O16": 0.024622209840886707, + "B10": 4.103701640147785e-05, + } +) + +# Geometry +cylinder = mcdc.Surface.CylinderZ(radius=0.45720) +pitch = 1.25984 +x0 = mcdc.Surface.PlaneX(x=-pitch / 2, boundary_condition="reflective") +x1 = mcdc.Surface.PlaneX(x=pitch / 2, boundary_condition="reflective") +y0 = mcdc.Surface.PlaneY(y=-pitch / 2, boundary_condition="reflective") +y1 = mcdc.Surface.PlaneY(y=pitch / 2, boundary_condition="reflective") +# +mcdc.Cell(-cylinder, fill=fuel) +mcdc.Cell(+x0 & -x1 & +y0 & -y1 & +cylinder, fill=moderator) + +# Source +mcdc.Source(position=[0.0, 0.0, 0.0], isotropic=True, time=0.0, energy=14.1e6) + +# Setting +mcdc.settings.N_particle = 20 +mcdc.settings.N_batch = 2 +mcdc.settings.time_boundary = 1.0 +mcdc.settings.active_bank_buffer = 1000 + +# Tally +t_grid = np.insert(np.logspace(-9, -4, 200), 0, 0.0) +e_min, e_max = 1e-5, 20.0e6 +groups = 500 +energies = np.logspace(np.log10(e_min), np.log10(e_max), groups + 1) + +mcdc.Tally(scores=["flux"], time=t_grid, energy=energies) + +mcdc.run() diff --git a/mcdc/test/regression/run.py b/mcdc/test/regression/run.py new file mode 100644 index 000000000..ab953482b --- /dev/null +++ b/mcdc/test/regression/run.py @@ -0,0 +1,263 @@ +import h5py, os, sys, argparse, fnmatch +import numpy as np +from colorama import Fore, Style + +# Option parser +parser = argparse.ArgumentParser(description="MC/DC regression test") +parser.add_argument("--mode", type=str, choices=["python", "numba"], default="python") +parser.add_argument("--target", type=str, choices=["cpu", "gpu"], default="cpu") +parser.add_argument("--mpiexec", type=int, default=0) +parser.add_argument("--srun", type=int, default=0) +parser.add_argument("--name", type=str, default="ALL") +parser.add_argument("--skip", type=str, default="NONE") +args, unargs = parser.parse_known_args() + +# Parse +mode = args.mode +target = args.target +mpiexec = args.mpiexec +srun = args.srun +name = args.name +skip = args.skip + +regtest_data_name = "mcdc-regression_test_data" +non_test_files = ["__pycache__", regtest_data_name, "tmp"] + +# Clone and update regression test data if needed +if not os.path.isdir(regtest_data_name): + os.system(f"git clone https://github.com/mcdc-project/{regtest_data_name}.git") +else: + os.chdir(regtest_data_name) + os.system("git pull") + os.chdir("..") + +# Get test names +if name == "ALL": + names = [] + for item in os.listdir(): + if os.path.isdir(item) and item not in non_test_files: + names.append(item) +else: + names = [item for item in os.listdir() if fnmatch.fnmatch(item, name)] +names.sort() + +# Remove skipped if specified +if skip != "NONE": + skips = [item for item in os.listdir() if fnmatch.fnmatch(item, skip)] + for name in skips: + print(Fore.YELLOW + "Note: Skipping %s" % name + Style.RESET_ALL) + names.remove(name) + +# Skip domain decomp tests unless there are 4 MPI processes +temp = names.copy() +parallel_run = mpiexec > 0 or srun > 0 +for name in names: + if name == "slab_reed_dd" and ( + not parallel_run or not (mpiexec % 4 == 0 and srun % 4 == 0) + ): + temp.remove(name) + print( + Fore.YELLOW + + "Note: Skipping %s (require multiple of 4 MPI ranks)" % name + + Style.RESET_ALL + ) + elif name == "slab_reed_dd_3d" and ( + not parallel_run or not (mpiexec % 16 == 0 and srun % 16 == 0) + ): + temp.remove(name) + print( + Fore.YELLOW + + "Note: Skipping %s (require multiple of 16 MPI ranks)" % name + + Style.RESET_ALL + ) + +names = temp + +# Skip iqmc if GPU run +if target == "gpu": + temp = names.copy() + for name in names: + if ("iqmc" in name) or ("eigenvalue" in name): + temp.remove(name) + print( + Fore.YELLOW + "Note: Skipping %s (GPU target)" % name + Style.RESET_ALL + ) +names = temp + +# Data for each test +printouts = [] +runtimes = [] +flags = [] +error_msgs = [] +crashes = [] +all_pass = True + +# Run all tests +for i, name in enumerate(names): + print("\n[%i/%i] " % (i + 1, len(names)) + name) + error_msgs.append([]) + crashes.append(False) + runtimes.append([0]) + + # Change directory + os.chdir(name) + + # Check test setup + if not os.path.exists("input.py"): + print(Fore.RED + " input.py is missing\n" + Style.RESET_ALL) + sys.exit() + if not os.path.exists("answer.h5"): + print(Fore.RED + " answer.h5 is missing\n" + Style.RESET_ALL) + sys.exit() + + # Delete output if exists + if os.path.exists("output.h5"): + os.remove("output.h5") + + # Run the test problem (redirect the stdout) + if mpiexec > 1: + gpus_per_task = "" + if target == "gpu": + gpus_per_task = f"--gpus-per-task=1 " + os.system( + "mpiexec -n %i %s python input.py --mode=%s --target=%s --output=output --no-progress-bar> tmp 2>&1" + % (mpiexec, gpus_per_task, mode, target) + ) + elif srun > 1: + gpus_per_task = "" + if target == "gpu": + gpus_per_task = f"--gpus-per-task=1 " + os.system( + "srun -n %i %s python input.py --mode=%s --target=%s --output=output --no-progress-bar> tmp 2>&1" + % (srun, gpus_per_task, mode, target) + ) + else: + os.system( + "python input.py --mode=%s --target=%s --output=output --no-progress-bar > tmp 2>&1" + % (mode, target) + ) + with open("tmp") as f: + printouts.append(f.read()) + os.remove("tmp") + + # Check if crashed + if not os.path.exists("output.h5"): + print(Fore.RED + " Failed: Run crashed" + Style.RESET_ALL) + all_pass = False + crashes[-1] = True + os.chdir("..") + continue + + # Get the output and the answer key + output = h5py.File("output.h5", "r") + answer = h5py.File("answer.h5", "r") + + runtimes[-1] = output["runtime/total"][()] + print(" (%.2f seconds)" % runtimes[-1][0]) + + # Compare mean, sdev, and uq_var (if available) + if "iqmc" not in output.keys(): + name_root = "tallies" + for tally in [key for key in answer[name_root].keys()]: + name_tally = name_root + "/" + tally + for score in [key for key in answer[name_tally].keys()]: + if score in ["grid"]: + continue + name_score = name_tally + "/" + score + for result in [key for key in answer[name_score].keys()]: + name = name_score + "/" + result + a = output[name][()] + b = answer[name][()] + + # if (("sdev" in result) or ("uq_var" in result)) and ( + if ("uq_var" in result) and (args.target == "gpu"): + continue + # Passed? + if np.isclose(a, b).all(): + print( + Fore.GREEN + " {}: Passed".format(name) + Style.RESET_ALL + ) + else: + all_pass = False + error_msgs[-1].append( + "Differences in %s" + % ( + name + + "/" + + result + + "\n" + + "{}\n".format(a - b) + + "Max difference: {}".format(np.max(np.abs(a - b))) + ) + ) + print(Fore.RED + " {}: Failed".format(name) + Style.RESET_ALL) + + # Other quantities + for result_name in ["k_mean", "k_sdev", "k_cycle", "k_eff"]: + if result_name not in output.keys(): + continue + + a = output[result_name][()] + b = answer[result_name][()] + + # Passed? + if np.isclose(a, b).all(): + print(Fore.GREEN + " {}: Passed".format(result_name) + Style.RESET_ALL) + else: + all_pass = False + error_msgs[-1].append( + "Differences in {}\n{}".format(result_name, a - b) + ) + print(Fore.RED + " {}: Failed".format(result_name) + Style.RESET_ALL) + + # iQMC flux + if "iqmc" in output.keys(): + for score in [key for key in output["iqmc/tally/"].keys()]: + name = f"iqmc/tally/{score}/mean" + a = np.squeeze(output[name][()]) + b = np.squeeze(answer[name][()]) + if np.isnan(b).all(): + continue + # Passed? + if np.isclose(a, b).all(): + print(Fore.GREEN + " {}: Passed".format(score) + Style.RESET_ALL) + else: + all_pass = False + error_msgs[-1].append("Differences in {}\n{}".format(score, a - b)) + print(Fore.RED + " {}: Failed".format(score) + Style.RESET_ALL) + + # Close files + output.close() + answer.close() + + # Move back up + os.chdir("..") + +# Report test results +N_fails = 0 +for i in range(len(names)): + if crashes[i] or len(error_msgs[i]) > 0: + N_fails += 1 + +print( + "\nTests passed: " + + Fore.GREEN + + "%i/%i" % (len(names) - N_fails, len(names)) + + Style.RESET_ALL +) +print("Tests failed: " + Fore.RED + "%i/%i" % (N_fails, len(names)) + Style.RESET_ALL) +print(" (%.2f seconds)\n" % np.sum(np.array(runtimes))) +for i in range(len(names)): + if crashes[i]: + print("\n" + "=" * 80) + print("\n## {} crashed:".format(names[i])) + print(printouts[i]) + if len(error_msgs[i]) > 0: + print("\n" + "=" * 80) + print("\n## {} failed:".format(names[i])) + print(printouts[i]) + print("\n===\n") + for msg in error_msgs[i]: + print("\n# " + msg + "\n") + +assert all_pass diff --git a/mcdc/test/regression/slab_absorbium/.ipynb_checkpoints/input-checkpoint.py b/mcdc/test/regression/slab_absorbium/.ipynb_checkpoints/input-checkpoint.py new file mode 100644 index 000000000..e6bb936cf --- /dev/null +++ b/mcdc/test/regression/slab_absorbium/.ipynb_checkpoints/input-checkpoint.py @@ -0,0 +1,46 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Three slab layers with different purely-absorbing materials + +# Set materials +m1 = mcdc.MaterialMG(capture=np.array([1.0])) +m2 = mcdc.MaterialMG(capture=np.array([1.5])) +m3 = mcdc.MaterialMG(capture=np.array([2.0])) + +# Set surfaces +s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") +s2 = mcdc.Surface.PlaneZ(z=2.0) +s3 = mcdc.Surface.PlaneZ(z=4.0) +s4 = mcdc.Surface.PlaneZ(z=6.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m2) +mcdc.Cell(region=+s2 & -s3, fill=m3) +mcdc.Cell(region=+s3 & -s4, fill=m1) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Uniform isotropic source throughout the domain + +mcdc.Source(z=[0.0, 6.0], isotropic=True, energy_group=0) + +# ====================================================================================== +# Set tallies, settings, and run mcdc +# ====================================================================================== + +# Tallies +mcdc.Tally(surface=s4, scores=["net-current"]) +mesh = mcdc.MeshStructured(z=np.linspace(0.0, 6.0, 61)) +mcdc.Tally(mesh=mesh, mu=np.linspace(-1.0, 1.0, 32 + 1), scores=["flux", "collision"]) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/slab_absorbium/answer.h5 b/mcdc/test/regression/slab_absorbium/answer.h5 new file mode 100644 index 000000000..5eacfc5b8 Binary files /dev/null and b/mcdc/test/regression/slab_absorbium/answer.h5 differ diff --git a/mcdc/test/regression/slab_absorbium/input.py b/mcdc/test/regression/slab_absorbium/input.py new file mode 100644 index 000000000..e6bb936cf --- /dev/null +++ b/mcdc/test/regression/slab_absorbium/input.py @@ -0,0 +1,46 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Three slab layers with different purely-absorbing materials + +# Set materials +m1 = mcdc.MaterialMG(capture=np.array([1.0])) +m2 = mcdc.MaterialMG(capture=np.array([1.5])) +m3 = mcdc.MaterialMG(capture=np.array([2.0])) + +# Set surfaces +s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") +s2 = mcdc.Surface.PlaneZ(z=2.0) +s3 = mcdc.Surface.PlaneZ(z=4.0) +s4 = mcdc.Surface.PlaneZ(z=6.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m2) +mcdc.Cell(region=+s2 & -s3, fill=m3) +mcdc.Cell(region=+s3 & -s4, fill=m1) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Uniform isotropic source throughout the domain + +mcdc.Source(z=[0.0, 6.0], isotropic=True, energy_group=0) + +# ====================================================================================== +# Set tallies, settings, and run mcdc +# ====================================================================================== + +# Tallies +mcdc.Tally(surface=s4, scores=["net-current"]) +mesh = mcdc.MeshStructured(z=np.linspace(0.0, 6.0, 61)) +mcdc.Tally(mesh=mesh, mu=np.linspace(-1.0, 1.0, 32 + 1), scores=["flux", "collision"]) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/slab_isobeam_td/answer.h5 b/mcdc/test/regression/slab_isobeam_td/answer.h5 new file mode 100644 index 000000000..67b44241f Binary files /dev/null and b/mcdc/test/regression/slab_isobeam_td/answer.h5 differ diff --git a/mcdc/test/regression/slab_isobeam_td/input.py b/mcdc/test/regression/slab_isobeam_td/input.py new file mode 100644 index 000000000..63f1927b9 --- /dev/null +++ b/mcdc/test/regression/slab_isobeam_td/input.py @@ -0,0 +1,49 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Finite homogeneous pure-absorbing slab + +# Set materials +m = mcdc.MaterialMG(capture=np.array([1.0])) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="vacuum") +s2 = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Isotropic beam from left-end + +mcdc.Source( + position=(0.0, 0.0, 0.0), + white_direction=(1.0, 0.0, 0.0), + energy_group=0, + time=[0.0, 5.0], +) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshUniform(x=(0.0, 0.1, 50)) +mcdc.Tally( + mesh=mesh, + scores=["flux"], + time=np.linspace(0.0, 5.0, 51), +) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/slab_isobeam_td_census/answer.h5 b/mcdc/test/regression/slab_isobeam_td_census/answer.h5 new file mode 100644 index 000000000..b6a2d5cb3 Binary files /dev/null and b/mcdc/test/regression/slab_isobeam_td_census/answer.h5 differ diff --git a/mcdc/test/regression/slab_isobeam_td_census/input.py b/mcdc/test/regression/slab_isobeam_td_census/input.py new file mode 100644 index 000000000..f665e5c78 --- /dev/null +++ b/mcdc/test/regression/slab_isobeam_td_census/input.py @@ -0,0 +1,54 @@ +import numpy as np + +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Finite homogeneous pure-absorbing slab + +# Set materials +m = mcdc.MaterialMG(capture=np.array([1.0])) + +# Set surfaces +s1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="vacuum") +s2 = mcdc.Surface.PlaneX(x=5.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m) + +# ====================================================================================== +# Set source +# ====================================================================================== +# Isotropic beam from left-end + +mcdc.Source( + position=(1e-10, 0.0, 0.0), + white_direction=(1.0, 0.0, 0.0), + energy_group=0, + time=[0.0, 5.0], +) + +# ====================================================================================== +# Set tallies, settings, techniques, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshUniform(x=(0.0, 0.1, 50)) +mcdc.Tally( + mesh=mesh, + scores=["flux"], + time=np.linspace(0.0, 5.0, 51), +) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 +mcdc.settings.source_bank_buffer_ratio = 5.0 +mcdc.settings.set_time_census(np.linspace(0.0, 5.0, 6)[1:]) + +# Techniques +mcdc.simulation.population_control() + +# Run +mcdc.run() diff --git a/mcdc/test/regression/slab_reed/answer.h5 b/mcdc/test/regression/slab_reed/answer.h5 new file mode 100644 index 000000000..a398c2a77 Binary files /dev/null and b/mcdc/test/regression/slab_reed/answer.h5 differ diff --git a/mcdc/test/regression/slab_reed/input.py b/mcdc/test/regression/slab_reed/input.py new file mode 100644 index 000000000..e2dcd71b6 --- /dev/null +++ b/mcdc/test/regression/slab_reed/input.py @@ -0,0 +1,53 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Three slab layers with different materials +# Based on William H. Reed, NSE (1971), 46:2, 309-314, DOI: 10.13182/NSE46-309 + +# Set materials +m1 = mcdc.MaterialMG(capture=np.array([50.0])) +m2 = mcdc.MaterialMG(capture=np.array([5.0])) +m3 = mcdc.MaterialMG(capture=np.array([0.0])) # Vacuum +m4 = mcdc.MaterialMG(capture=np.array([0.1]), scatter=np.array([[0.9]])) + +# Set surfaces +s1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="reflective") +s2 = mcdc.Surface.PlaneZ(z=2.0) +s3 = mcdc.Surface.PlaneZ(z=3.0) +s4 = mcdc.Surface.PlaneZ(z=5.0) +s5 = mcdc.Surface.PlaneZ(z=8.0, boundary_condition="vacuum") + +# Set cells +mcdc.Cell(region=+s1 & -s2, fill=m1) +mcdc.Cell(region=+s2 & -s3, fill=m2) +mcdc.Cell(region=+s3 & -s4, fill=m3) +mcdc.Cell(region=+s4 & -s5, fill=m4) + +# ====================================================================================== +# Set source +# ====================================================================================== + +# Isotropic source in the absorbing medium +mcdc.Source(z=[0.0, 2.0], isotropic=True, energy_group=0, probability=50.0) + +# Isotropic source in the first half of the outermost medium, +# with 1/100 strength +mcdc.Source(z=[5.0, 6.0], isotropic=True, energy_group=0, probability=0.5) + +# ====================================================================================== +# Set tallies, settings, and run MC/DC +# ====================================================================================== + +# Tallies +mesh = mcdc.MeshStructured(z=np.linspace(0.0, 8.0, 81)) +mcdc.Tally(mesh=mesh, scores=["flux"]) + +# Settings +mcdc.settings.N_particle = 4000 +mcdc.settings.N_batch = 2 + +# Run +mcdc.run() diff --git a/mcdc/test/regression/sphere_in_cube/answer.h5 b/mcdc/test/regression/sphere_in_cube/answer.h5 new file mode 100644 index 000000000..fc0decf82 Binary files /dev/null and b/mcdc/test/regression/sphere_in_cube/answer.h5 differ diff --git a/mcdc/test/regression/sphere_in_cube/input.py b/mcdc/test/regression/sphere_in_cube/input.py new file mode 100644 index 000000000..8bedb7bbc --- /dev/null +++ b/mcdc/test/regression/sphere_in_cube/input.py @@ -0,0 +1,64 @@ +import numpy as np +import mcdc + +# ====================================================================================== +# Set model +# ====================================================================================== +# Homogeneous pure-fission sphere inside a pure-scattering cube + +# Set materials +pure_f = mcdc.MaterialMG(fission=np.array([1.0]), nu_p=np.array([1.2])) +pure_s = mcdc.MaterialMG(scatter=np.array([[1.0]])) + +# Set surfaces +sx1 = mcdc.Surface.PlaneX(x=0.0, boundary_condition="vacuum") +sx2 = mcdc.Surface.PlaneX(x=4.0, boundary_condition="vacuum") +sy1 = mcdc.Surface.PlaneY(y=0.0, boundary_condition="vacuum") +sy2 = mcdc.Surface.PlaneY(y=4.0, boundary_condition="vacuum") +sz1 = mcdc.Surface.PlaneZ(z=0.0, boundary_condition="vacuum") +sz2 = mcdc.Surface.PlaneZ(z=4.0, boundary_condition="vacuum") +sphere = mcdc.Surface.Sphere(center=[2.0, 2.0, 2.0], radius=1.5) +inside_sphere = -sphere +inside_box = +sx1 & -sx2 & +sy1 & -sy2 & +sz1 & -sz2 + +# Set cells +# Source +mcdc.Cell(region=inside_box & ~inside_sphere, fill=pure_s) + +# Sphere +sphere_cell = mcdc.Cell(region=inside_sphere, fill=pure_f) + +# ============================================================================= +# Set source +# ============================================================================= + +mcdc.Source( + x=[0.0, 4.0], + y=[0.0, 4.0], + z=[0.0, 4.0], + isotropic=True, + energy_group=0, + time=[0.0, 50.0], +) + +# ============================================================================= +# Set tallies, settings, techniques, and run MC/DC +# ============================================================================= + +# Tallies +mesh = mcdc.MeshUniform(x=(0.0, 4.0, 1), y=(0.0, 4.0, 1), z=(0.0, 4.0, 1)) +mcdc.Tally( + mesh=mesh, + scores=["fission"], +) +mcdc.Tally(cell=sphere_cell, scores=["fission"]) + +# Settings +mcdc.settings.N_particle = 100 +mcdc.settings.N_batch = 2 + +# Techniques +mcdc.simulation.implicit_capture() + +# Run +mcdc.run() diff --git a/mcdc/test/unit/object_/tally/energy_deposition.py b/mcdc/test/unit/object_/tally/energy_deposition.py new file mode 100644 index 000000000..673d91cfc --- /dev/null +++ b/mcdc/test/unit/object_/tally/energy_deposition.py @@ -0,0 +1,16 @@ +import pytest +import mcdc + + +def test_edep_cannot_be_grouped_with_other_scores(capsys): + mesh = mcdc.MeshUniform( + x=(0.0, 1.0, 1), + y=(0.0, 1.0, 1), + z=(0.0, 1.0, 1), + ) + + with pytest.raises(SystemExit): + mcdc.Tally(mesh=mesh, scores=["flux", "energy_deposition"]) + + captured = capsys.readouterr() + assert "cannot be grouped with other scores yet" in captured.out diff --git a/mcdc/test/unit/run.py b/mcdc/test/unit/run.py new file mode 100644 index 000000000..a445eabc8 --- /dev/null +++ b/mcdc/test/unit/run.py @@ -0,0 +1,38 @@ +import time, os, sys +from pathlib import Path + +# Get all the test file paths +root = Path(".") +paths = root.rglob("*.py") + +EXCLUDE = {"run.py", "__pycache__", ".git", "conftest.py", "__init__.py"} +EXCLUDE_PREFIX = {"make_test_"} + +start = time.perf_counter() +for path in paths: + # Skip exact matches + if any(part in EXCLUDE for part in path.parts): + continue + + # Skip if any part starts with any prefix in EXCLUDE_PREFIX + PREFIX_TUPLE = tuple(EXCLUDE_PREFIX) + if any(part.startswith(PREFIX_TUPLE) for part in path.parts): + continue + + print(f"\nRunning {str(path)}") + sys.stdout.flush() + os.system(f"pytest -q {str(path)}") +end = time.perf_counter() +total_time = end - start + +if total_time >= 24 * 60 * 60: + total_time = total_time / 24 / 60 / 60 + print(f"\nTotal unit test runtime: {total_time:.3f} days") +elif total_time >= 60 * 60: + total_time = total_time / 60 / 60 + print(f"\nTotal unit test runtime: {total_time:.3f} hours") +elif total_time >= 60: + total_time = total_time / 60 + print(f"\nTotal unit test runtime: {total_time:.3f} minutes") +else: + print(f"\nTotal unit test runtime: {total_time:.3f} seconds") diff --git a/mcdc/test/unit/transport/distributions/__init__.py b/mcdc/test/unit/transport/distributions/__init__.py new file mode 100644 index 000000000..e894a02d9 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/__init__.py @@ -0,0 +1 @@ +# Package marker for distribution unit test modules. diff --git a/mcdc/test/unit/transport/distributions/conftest.py b/mcdc/test/unit/transport/distributions/conftest.py new file mode 100644 index 000000000..96ee1b6c9 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/conftest.py @@ -0,0 +1,54 @@ +import os +import pytest + +# Force pure-Python execution for numba in tests. +os.environ.setdefault("NUMBA_DISABLE_JIT", "1") + +import mcdc.transport.distribution as dist + + +class MockRNG: + def __init__(self, values): + self._values = list(values) + self._i = 0 + + def lcg(self, _state_container): + # MCDC uses dist.rng.lcg(state) as its RNG hook. We override it with this + # deterministic sequence so tests can be analytic and reproducible. + if self._i >= len(self._values): + raise IndexError("MockRNG depleted") + value = self._values[self._i] + self._i += 1 + return value + + +@pytest.fixture +def rng_state(): + """ + Return the minimal RNG state container expected by MCDC. + + MCDC passes a list of per-thread state dicts; tests only need one. + """ + return [dict(rng_seed=0)] + + +@pytest.fixture +def rng_sequence(): + """ + Temporarily replace dist.rng.lcg with a deterministic sequence. + + Usage: + rng_sequence([0.1, 0.2, 0.3]) + ... code under test that calls dist.rng.lcg(...) + """ + original_lcg = dist.rng.lcg + + def _apply(values): + # Swap in a mock LCG implementation that returns the provided values. + rng = MockRNG(values) + dist.rng.lcg = rng.lcg + return rng + + # Yield a callable to the test, then restore the real RNG after the test ends. + yield _apply + dist.rng.lcg = original_lcg diff --git a/mcdc/test/unit/transport/distributions/test_data/__init__.py b/mcdc/test/unit/transport/distributions/test_data/__init__.py new file mode 100644 index 000000000..b88f97b5a --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/__init__.py @@ -0,0 +1,13 @@ +from .make_test_kalbach_mann_data import make_test_kalbach_mann_data +from .make_test_multi_table_data import make_test_multi_table_data +from .make_test_table_data_constant import make_test_table_data_constant +from .make_test_tabulated_data import make_test_tabulated_data +from .make_test_tabulated_energy_angle_data import make_test_tabulated_energy_angle_data + +__all__ = [ + "make_test_kalbach_mann_data", + "make_test_multi_table_data", + "make_test_table_data_constant", + "make_test_tabulated_data", + "make_test_tabulated_energy_angle_data", +] diff --git a/mcdc/test/unit/transport/distributions/test_data/make_test_kalbach_mann_data.py b/mcdc/test/unit/transport/distributions/test_data/make_test_kalbach_mann_data.py new file mode 100644 index 000000000..44e069d2e --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/make_test_kalbach_mann_data.py @@ -0,0 +1,53 @@ +import numpy as np + + +def make_test_kalbach_mann_data(): + # Two incident-energy tables, each with 3 points. + Ei = [1.0, 3.0, 5.0] + L_i = [0.0, 3.0, 6.0] + + E_i_k = [1.0, 2.0, 3.0, 2.0, 4.0, 6.0] + p_i_k = [0.5, 0.5, 0.5, 0.2, 0.2, 0.2] + c_i_k = [0.0, 0.5, 1.0, 0.0, 0.2, 1.0] + + # Keep R = 0 and A = 1 for deterministic angular sampling. + R = [0.0] * 6 + A = [1.0] * 6 + + data = np.array( + Ei + L_i + E_i_k + p_i_k + c_i_k + R + A, + dtype=np.float64, + ) + + idx = 0 + grid_offset = idx + idx += len(Ei) + offset_offset = idx + idx += len(L_i) + energy_out_offset = idx + idx += len(E_i_k) + pdf_offset = idx + idx += len(p_i_k) + cdf_offset = idx + idx += len(c_i_k) + precompound_offset = idx + idx += len(R) + angular_slope_offset = idx + + kalbach = { + "energy_offset": grid_offset, + "energy_length": len(Ei), + "offset_offset": offset_offset, + "offset_length": len(L_i), + "energy_out_offset": energy_out_offset, + "energy_out_length": len(E_i_k), + "pdf_offset": pdf_offset, + "pdf_length": len(p_i_k), + "cdf_offset": cdf_offset, + "cdf_length": len(c_i_k), + "precompound_factor_offset": precompound_offset, + "precompound_factor_length": len(R), + "angular_slope_offset": angular_slope_offset, + "angular_slope_length": len(A), + } + return kalbach, data diff --git a/mcdc/test/unit/transport/distributions/test_data/make_test_multi_table_data.py b/mcdc/test/unit/transport/distributions/test_data/make_test_multi_table_data.py new file mode 100644 index 000000000..ca2c966b9 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/make_test_multi_table_data.py @@ -0,0 +1,38 @@ +import numpy as np + + +def make_test_multi_table_data(): + # Two incident-energy tables, each with 3 points. + Ei = [1.0, 3.0] + L_i = [0.0, 3.0] + + E_i_k = [10.0, 20.0, 30.0, 100.0, 200.0, 300.0] + p_i_k = [0.1, 0.1, 0.1, 0.01, 0.01, 0.01] + c_i_k = [0.0, 0.5, 1.0, 0.0, 0.6, 1.0] + + data = np.array(Ei + L_i + E_i_k + p_i_k + c_i_k, dtype=np.float64) + + idx = 0 + grid_offset = idx + idx += len(Ei) + offset_offset = idx + idx += len(L_i) + value_offset = idx + idx += len(E_i_k) + pdf_offset = idx + idx += len(p_i_k) + cdf_offset = idx + + multi_table = { + "grid_offset": grid_offset, + "grid_length": len(Ei), + "offset_offset": offset_offset, + "offset_length": len(L_i), + "value_offset": value_offset, + "value_length": len(E_i_k), + "pdf_offset": pdf_offset, + "pdf_length": len(p_i_k), + "cdf_offset": cdf_offset, + "cdf_length": len(c_i_k), + } + return multi_table, data diff --git a/mcdc/test/unit/transport/distributions/test_data/make_test_table_data_constant.py b/mcdc/test/unit/transport/distributions/test_data/make_test_table_data_constant.py new file mode 100644 index 000000000..db8353aa8 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/make_test_table_data_constant.py @@ -0,0 +1,17 @@ +import numpy as np + +from mcdc.constant import INTERPOLATION_LINEAR + + +def make_test_table_data_constant(value=1.0): + x = [0.0, 10.0] + y = [value, value] + data = np.array(x + y, dtype=np.float64) + table = { + "x_offset": 0, + "x_length": len(x), + "y_offset": len(x), + "y_length": len(y), + "interpolation": INTERPOLATION_LINEAR, + } + return table, data diff --git a/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_data.py b/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_data.py new file mode 100644 index 000000000..53a2eef9e --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_data.py @@ -0,0 +1,14 @@ +import numpy as np + + +def make_test_tabulated_data(E_i_k, c_i_k): + E_i_k = list(E_i_k) + c_i_k = list(c_i_k) + data = np.array(E_i_k + c_i_k, dtype=np.float64) + table = { + "value_offset": 0, + "value_length": len(E_i_k), + "cdf_offset": len(E_i_k), + "cdf_length": len(c_i_k), + } + return table, data diff --git a/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_energy_angle_data.py b/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_energy_angle_data.py new file mode 100644 index 000000000..ff40cb52b --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_data/make_test_tabulated_energy_angle_data.py @@ -0,0 +1,61 @@ +import numpy as np + + +def make_test_tabulated_energy_angle_data(): + Ei = [1.0, 3.0, 5.0] + L_i = [0.0, 3.0, 6.0] + + E_i_k = [1.0, 2.0, 3.0, 2.0, 4.0, 6.0] + p_i_k = [0.5, 0.5, 0.5, 0.2, 0.2, 0.2] + c_i_k = [0.0, 0.5, 1.0, 0.0, 0.2, 1.0] + + L_i_k = [0.0, 3.0, 6.0] + mu_i_j = [-1.0, 0.0, 1.0, -0.5, 0.5, 1.0] + p_mu_i_j = [0.5, 0.5, 0.5, 0.2, 0.2, 0.2] + c_mu_i_j = [0.0, 0.5, 1.0, 0.0, 0.3, 1.0] + + data = np.array( + Ei + L_i + E_i_k + p_i_k + c_i_k + L_i_k + mu_i_j + p_mu_i_j + c_mu_i_j, + dtype=np.float64, + ) + + idx = 0 + energy_offset = idx + idx += len(Ei) + offset_offset = idx + idx += len(L_i) + energy_out_offset = idx + idx += len(E_i_k) + pdf_offset = idx + idx += len(p_i_k) + cdf_offset = idx + idx += len(c_i_k) + cosine_offset__offset = idx + idx += len(L_i_k) + cosine_offset = idx + idx += len(mu_i_j) + cosine_pdf_offset = idx + idx += len(p_mu_i_j) + cosine_cdf_offset = idx + + table = { + "energy_offset": energy_offset, + "energy_length": len(Ei), + "offset_offset": offset_offset, + "offset_length": len(L_i), + "energy_out_offset": energy_out_offset, + "energy_out_length": len(E_i_k), + "pdf_offset": pdf_offset, + "pdf_length": len(p_i_k), + "cdf_offset": cdf_offset, + "cdf_length": len(c_i_k), + "cosine_offset__offset": cosine_offset__offset, + "cosine_offset__length": len(L_i_k), + "cosine_offset": cosine_offset, + "cosine_length": len(mu_i_j), + "cosine_pdf_offset": cosine_pdf_offset, + "cosine_pdf_length": len(p_mu_i_j), + "cosine_cdf_offset": cosine_cdf_offset, + "cosine_cdf_length": len(c_mu_i_j), + } + return table, data diff --git a/mcdc/test/unit/transport/distributions/test_evaporation.py b/mcdc/test/unit/transport/distributions/test_evaporation.py new file mode 100644 index 000000000..354d131b6 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_evaporation.py @@ -0,0 +1,29 @@ +import math + +import mcdc.transport.distribution as dist + +from .test_data import make_test_table_data_constant + + +def test_evaporation_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.7 (Law 9: Evaporation Spectrum) + table, data = make_test_table_data_constant(1.0) + mcdc = {"table_data": [table]} + evaporation = {"nuclear_temperature_ID": 0, "restriction_energy": 0.0} + + xi1, xi2 = 0.1, 0.2 + rng_sequence([xi1, xi2]) + + sampled_E = dist.sample_evaporation(2.0, rng_state, evaporation, mcdc, data) + # The constant temperature table gives T(E_in) = 1.0 for this test. + # With U = 0, the MCNP notation E_in - U becomes 2.0, so + # w = (E_in - U) / T(E_in) = 2.0. + w = 2.0 + # Eq. (2.75) introduces g = 1 - exp(-w) in the normalized truncated spectrum. + g = 1.0 - math.exp(-w) + # Eq. (2.76) then gives the sampled evaporation energy: + # E_out = -T(E_in) * ln[(1 - g * xi_1) (1 - g * xi_2)]. + # Since T(E_in) = 1.0 here, the prefactor is omitted in the simplified form below. + expected_E = -math.log((1.0 - g * xi1) * (1.0 - g * xi2)) + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_kalbach_mann.py b/mcdc/test/unit/transport/distributions/test_kalbach_mann.py new file mode 100644 index 000000000..2d29048c3 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_kalbach_mann.py @@ -0,0 +1,35 @@ +import math + +import mcdc.transport.distribution as dist + +from .test_data import make_test_kalbach_mann_data + + +def test_kalbach_mann_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.11 (Law 44: Kalbach-87 Correlated Energy-angle Scattering) + kalbach, data = make_test_kalbach_mann_data() + + # For E_in = 2.0 on the grid [1, 3], the interpolation fraction is r = 0.5. + # xi_1 = 0.3 < r, so the second energy table is selected. + xi1, xi2, xi3, xi4 = 0.3, 0.1, 0.7, 0.5 + rng_sequence([xi1, xi2, xi3, xi4]) + + sampled_E, sampled_mu = dist.sample_kalbach_mann(2.0, rng_state, kalbach, data) + + # Law 44 uses the Law 4 energy construction, so with xi_2 = 0.1 in the first bin + # of the selected table: + E_min, E_max = 1.5, 4.5 + E_hat = 2.0 + (xi2 - 0.0) / 0.2 + # As in the Law 4 tests, the constant bin PDF means Eq. (2.66) collapses to the + # simpler Eq. (2.65) interpolation for the sampled E'. + expected_E = E_min + (E_hat - 2.0) / (6.0 - 2.0) * (E_max - E_min) + + # Eq. (2.90) and Eq. (2.91): with constant test data, the interpolation is trivial, + # so A = 1 and R = 0 at every point. + # Since xi_3 = 0.7 > R, Eq. (2.93) and Eq. (2.94) apply: + # T = (2 * xi_4 - 1) * sinh(A) = 0 + # mu = ln(T + sqrt(T^2 + 1)) / A = 0 + expected_mu = 0.0 + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) + assert math.isclose(sampled_mu, expected_mu, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_level_scattering.py b/mcdc/test/unit/transport/distributions/test_level_scattering.py new file mode 100644 index 000000000..2ecba06e6 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_level_scattering.py @@ -0,0 +1,15 @@ +import math + +import mcdc.transport.distribution as dist + + +def test_level_scattering_sample(): + # MCNP Theory & User Manual §2.4.3.5.4.3 (Law 3: Inelastic Scattering from Nuclear Levels) + # The implementation stores the Law 3 relation in the linear form + # E_out = C2 * (E_in - C1). + # For this test, E_in = 5, C1 = 1, and C2 = 0.5, so + # E_out = 0.5 * (5 - 1) = 2. + level = {"C1": 1.0, "C2": 0.5} + sampled_E = dist.sample_level_scattering(5.0, level) + expected_E = 2.0 + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_maxwellian.py b/mcdc/test/unit/transport/distributions/test_maxwellian.py new file mode 100644 index 000000000..74fa5bec2 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_maxwellian.py @@ -0,0 +1,26 @@ +import math + +import mcdc.transport.distribution as dist +from mcdc.constant import PI + +from .test_data import make_test_table_data_constant + + +def test_maxwellian_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.6 (Law 7: Simple Maxwell Fission Spectrum) + table, data = make_test_table_data_constant(1.0) + mcdc = {"table_data": [table]} + maxwellian = {"nuclear_temperature_ID": 0, "restriction_energy": 0.0} + + xi1, xi2, xi3 = 0.9, 0.9, 0.0 + rng_sequence([xi1, xi2, xi3]) + + sampled_E = dist.sample_maxwellian(2.0, rng_state, maxwellian, mcdc, data) + # MCNP Eq. (2.73): + # E_out = -T(E_in) * [xi_1^2 / (xi_1^2 + xi_2^2) * ln(xi_3) + ln(xi_4)] + # MCDC uses the equivalent polar-form reduction + # xi_1^2 / (xi_1^2 + xi_2^2) = cos(theta)^2 + # with theta = (pi / 2) * xi3 and T(E_in) = 1 for this test table. + expected_E = -(math.log(xi1) + math.log(xi2) * math.cos(0.5 * PI * xi3) ** 2) + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_multi_table.py b/mcdc/test/unit/transport/distributions/test_multi_table.py new file mode 100644 index 000000000..102a9e5a9 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_multi_table.py @@ -0,0 +1,30 @@ +import math + +import mcdc.transport.distribution as dist + +from .test_data import make_test_multi_table_data + + +def test_multi_table_distribution_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.4 (Law 4: Tabular Distribution) + multi_table, data = make_test_multi_table_data() + # For E_in = 2.0 on the grid [1, 3], Eq. (2.62) gives r = 0.5. + # xi_2 = 0.3 < r, so Eq. (2.64) selects l = i + 1, i.e. the second table. + rng_sequence([0.3, 0.2]) + + sampled_E = dist.sample_multi_table(2.0, rng_state, multi_table, data, scale=True) + + # In the selected table, xi_1 = 0.2 falls in the first continuous bin. + # Eq. (2.65) gives E' = E_l,k + (xi_1 - c_l,k) / p_l,k = 100 + 0.2 / 0.01 = 120. + # The test data use constant p within the bin, so the linear-linear form in + # Eq. (2.66) reduces to the same result. + E_prime = 100.0 + (0.2 - 0.0) / 0.01 + # Eq. (2.67) and Eq. (2.68) give the scaled bounds: + # E_1 = 10 + 0.5 * (100 - 10) = 55 + # E_K = 30 + 0.5 * (300 - 30) = 165 + # Here E_l,1 = 100 and E_l,K = 300 because the selected table is the second one. + # Eq. (2.69) then gives + # E_out = E_1 + (E' - E_l,1) * (E_K - E_1) / (E_l,K - E_l,1) + expected_E = 55.0 + (E_prime - 100.0) * (165.0 - 55.0) / (300.0 - 100.0) + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_nbody_correlated.py b/mcdc/test/unit/transport/distributions/test_nbody_correlated.py new file mode 100644 index 000000000..a35f63361 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_nbody_correlated.py @@ -0,0 +1,37 @@ +import math + +import mcdc.transport.distribution as dist +from mcdc.constant import DISTRIBUTION_N_BODY + +from .test_data import make_test_tabulated_data + + +def test_nbody_sample_correlated(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.13 (Law 66: N-body Phase Space Distribution) + table, data = make_test_tabulated_data([2.0, 4.0, 6.0], [0.0, 0.4, 1.0]) + mcdc = {"nbody_distributions": [table]} + distribution = {"child_type": DISTRIBUTION_N_BODY, "child_ID": 0} + + # First value samples energy, second value samples isotropic cosine. + rng_sequence([0.2, 0.75]) + + sampled_E, sampled_mu = dist.sample_correlated_distribution( + 2.0, + distribution, + rng_state, + mcdc, + data, + ) + + # The current implementation samples energy from the tabulated distribution and + # samples the cosine isotropically. This test is therefore checking the current + # reduced implementation, not reconstructing the full Law 66 rejection sampler + # from Eq. (2.103) through Eq. (2.106). + # For the tabulated-energy part, xi = 0.2 gives linear interpolation in the first + # bin. For the angular part, MCNP Eq. (2.107) gives mu = 2 * xi_10 - 1 for + # isotropic center-of-mass sampling. + expected_E = 2.0 + (0.2 - 0.0) * (4.0 - 2.0) / (0.4 - 0.0) + expected_mu = 2.0 * 0.75 - 1.0 + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) + assert math.isclose(sampled_mu, expected_mu, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_tabulated_distribution.py b/mcdc/test/unit/transport/distributions/test_tabulated_distribution.py new file mode 100644 index 000000000..648e1da4f --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_tabulated_distribution.py @@ -0,0 +1,19 @@ +import math + +import mcdc.transport.distribution as dist + +from .test_data import make_test_tabulated_data + + +def test_tabulated_distribution_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.4 (Law 4: Tabular Distribution) + table, data = make_test_tabulated_data([1.0, 3.0, 7.0], [0.0, 0.4, 1.0]) + rng_sequence([0.2]) + + sampled_E = dist.sample_tabulated(table, rng_state, data) + # This is the single-table inverse-CDF interpolation used by the tabulated sampler: + # xi = 0.2 lies in the first bin, so linear interpolation between + # (c_0, E_0) = (0.0, 1.0) and (c_1, E_1) = (0.4, 3.0) gives the expected value. + expected_E = 1.0 + (0.2 - 0.0) * (3.0 - 1.0) / (0.4 - 0.0) + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/distributions/test_tabulated_energy_angle.py b/mcdc/test/unit/transport/distributions/test_tabulated_energy_angle.py new file mode 100644 index 000000000..8c7d770c7 --- /dev/null +++ b/mcdc/test/unit/transport/distributions/test_tabulated_energy_angle.py @@ -0,0 +1,38 @@ +import math + +import mcdc.transport.distribution as dist + +from .test_data import make_test_tabulated_energy_angle_data + + +def test_tabulated_energy_angle_sample(rng_sequence, rng_state): + # MCNP Theory & User Manual §2.4.3.5.4.12 (Law 61: Correlated Energy-angle Scattering) + table, data = make_test_tabulated_energy_angle_data() + + xi1, xi2, xi3 = 0.3, 0.1, 0.25 + rng_sequence([xi1, xi2, xi3]) + + sampled_E, sampled_mu = dist.sample_tabulated_energy_angle( + 2.0, rng_state, table, data + ) + + # Law 61 uses the Law 4 energy construction first. + # For E_in = 2.0 on the grid [1, 3], Eq. (2.62) gives r = 0.5. + # xi_1 = 0.3 < r, so the second outgoing-energy table is selected. + # The test data again use a constant bin PDF, so the sampled E' is the + # Eq. (2.65) form rather than the more general Eq. (2.66) expression. + E_min, E_max = 1.5, 4.5 + E_hat = 2.0 + (xi2 - 0.0) / 0.2 + # E_min and E_max are the scaled lower and upper bounds from the two incident + # energy tables, i.e. the Law 4 quantities from Eq. (2.67) through Eq. (2.69). + expected_E = E_min + (E_hat - 2.0) / (6.0 - 2.0) * (E_max - E_min) + + # For the angular table, Law 61 says the linear-interpolation case chooses the + # tabular angular distribution whose CDF point is closest to the sampled xi_2. + # Here xi_2 = 0.1 is tied between the first two CDF points in the selected energy + # bin, and the implementation keeps the lower index, so the first cosine table is + # used. Sampling that table gives mu = -1 + xi_3 / 0.5. + expected_mu = -1.0 + (xi3 - 0.0) / 0.5 + + assert math.isclose(sampled_E, expected_E, rel_tol=0.0, abs_tol=1e-12) + assert math.isclose(sampled_mu, expected_mu, rel_tol=0.0, abs_tol=1e-12) diff --git a/mcdc/test/unit/transport/geometry/surface/plane_x.py b/mcdc/test/unit/transport/geometry/surface/plane_x.py new file mode 100644 index 000000000..20fc2b365 --- /dev/null +++ b/mcdc/test/unit/transport/geometry/surface/plane_x.py @@ -0,0 +1,527 @@ +import mcdc +import numpy as np + +#### + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) +from mcdc.main import preparation + +# ====================================================================================== +# Setup +# ====================================================================================== + +# Reference surface description +X = 10.0 +durations = np.array([5.0, 5.0, 5.0]) +velocities = np.zeros((3, 3)) +velocities[:, 0] = np.array([-1.0, 2.0, -3.0]) + +# Test object: static surface +static_surface = mcdc.Surface.PlaneX(x=X) + +# Test object: moving surface +moving_surface = mcdc.Surface.PlaneX(x=X) +moving_surface.move(velocities, durations) + +# Create the dummy simulation structure and data +structure_container, data = preparation() +structure = structure_container[0] + +# Get the "compiled" test objects +static_surface = structure["surfaces"][0] +moving_surface = structure["surfaces"][1] + +# Particle object for testing +import mcdc.numba_types as type_ + +particle_container = np.zeros(1, type_.particle_data) +particle = particle_container[0] + +# Miscellanies +TINY = COINCIDENCE_TOLERANCE * 0.8 # Tiny value within coincidence tolerance + +# Load modules to be tested +from mcdc.transport.geometry.surface import ( + interface, + plane_x, +) + +# ===================================================================================== +# Plane-X core functions +# ===================================================================================== + + +def test_evaluate(): + def run(x, answer): + particle["x"] = x + result = plane_x.evaluate(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + run(x=13.0, answer=3.0) + # Negative side + run(x=6.0, answer=-4.0) + + +def test_reflect(): + def run(ux, answer): + particle["ux"] = ux + plane_x.reflect(particle_container, static_surface) + assert np.isclose(particle["ux"], answer) + + # From positive direction + run(ux=0.2, answer=-0.2) + # From negative direction + run(ux=-0.1, answer=0.1) + + +def test_get_normal_component(): + def run(ux, answer): + particle["ux"] = ux + result = plane_x.get_normal_component(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive direction + run(ux=0.4, answer=0.4) + # Negative direction + run(ux=-0.2, answer=-0.2) + # Parallel + run(ux=0.0, answer=0.0) + + +def test_get_distance(): + def run(x, ux, answer): + particle["x"] = x + particle["ux"] = ux + result = plane_x.get_distance(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + x = 12.0 + ## Moving closer + run(x, ux=-0.4, answer=5.0) + ## Moving away + run(x, ux=0.3, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + # Negative side + x = 6.0 + ## Moving closer + run(x, ux=0.4, answer=10.0) + ## Moving away + run(x, ux=-0.3, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + # At surface, within tolerance, on the positive side + x = 10.0 + TINY + ## Moving away + run(x, ux=0.4, answer=INF) + ## Moving closer + run(x, ux=-0.4, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + # At surface, within tolerance, on the negative side + x = 10.0 - TINY + ## Moving away + run(x, ux=-0.4, answer=INF) + ## Moving closer + run(x, ux=0.4, answer=INF) + ## Parallel + run(x, ux=0.0, answer=INF) + + +# ===================================================================================== +# Plane-x integrated transport interface +# ===================================================================================== + + +def test_interface_reflect(): + def run(ux, answer): + particle["ux"] = ux + interface.reflect(particle_container, static_surface) + assert np.isclose(particle["ux"], answer) + + # From positive direction + run(ux=0.2, answer=-0.2) + # From negative direction + run(ux=-0.1, answer=0.1) + + +def test_interface_evaluate(): + def run_static(x, answer): + particle["x"] = x + result = interface.evaluate(particle_container, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, ux, t, answer): + particle["x"] = x + particle["ux"] = ux + particle["t"] = t + result = interface.evaluate(particle_container, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + run_static(x=13.0, answer=3.0) + # Negative side + run_static(x=6.0, answer=-4.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin + t = 3.0 # Surface x-position = 7.0 + ux = 0.3 # Arbitrary + ## Positive side + run_moving(x=10.0, ux=ux, t=t, answer=3.0) + ## Negative side + run_moving(x=1.0, ux=ux, t=t, answer=-6.0) + + # First bin, at grid + t = 5.0 # Surface x-position = 5.0 + ux = -0.3 # Arbitrary + ## Positive side + run_moving(x=10.0, ux=ux, t=t, answer=5.0) + ## Negative side + run_moving(x=1.0, ux=ux, t=t, answer=-4.0) + + # Interior bin + t = 12.0 # Surface x-position = 9.0 + ux = 0.3 # Arbitrary + ## Positive side + run_moving(x=10.0, ux=ux, t=t, answer=1.0) + ## Negative side + run_moving(x=1.0, ux=ux, t=t, answer=-8.0) + + # Interior bin, at grid + t = 15.0 # Surface x-position = 0.0 + ux = -0.3 # Arbitrary + ## Positive side + run_moving(x=10.0, ux=ux, t=t, answer=10.0) + ## Negative side + run_moving(x=-5.0, ux=ux, t=t, answer=-5.0) + + # Final bin + t = 100.0 # Surface x-position = 0.0 + ux = 0.3 # Arbitrary + ## Positive side + run_moving(x=10.0, ux=ux, t=t, answer=10.0) + ## Negative side + run_moving(x=-5.0, ux=ux, t=t, answer=-5.0) + + +def test_interface_get_normal_component(): + def run_static(ux, answer): + particle["ux"] = ux + speed = 2.0 # Arbitrary + result = interface.get_normal_component( + particle_container, speed, static_surface, data + ) + assert np.isclose(result, answer) + + def run_moving(ux, t, speed, answer): + particle["ux"] = ux + particle["t"] = t + result = interface.get_normal_component( + particle_container, speed, moving_surface, data + ) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive direction + run_static(ux=0.4, answer=0.4) + # Negative direction + run_static(ux=-0.2, answer=-0.2) + # Parallel + run_static(ux=0.0, answer=0.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # Surface moving in the positive direction + t = 8.5 # Surface x-velocity = 2.0 + # + ## Positive direction + ux = 0.4 + ### Faster + run_moving(ux, t, speed=6.0, answer=0.4 / 6.0) + ### Slower (change sign) + run_moving(ux, t, speed=2.0, answer=-1.2 / 2.0) + ### Same speed (cancel out) + run_moving(ux, t, speed=5.0, answer=0.0) + # + ## Negative direction + run_moving(ux=-0.4, t=t, speed=6.0, answer=-4.4 / 6.0) + ## Parallel + run_moving(ux=0.0, t=t, speed=6.0, answer=-2.0 / 6.0) + + # Surface moving in the negative direction + t = 10.0 # Surface x-velocity = -3.0 + # + ## Negative direction + ux = -0.4 + ### Faster + run_moving(ux, t, speed=8.0, answer=-0.2 / 8.0) + ### Slower (change sign) + run_moving(ux, t, speed=2.0, answer=2.2 / 2.0) + ### Same speed (cancel out) + run_moving(ux, t, speed=7.5, answer=0.0) + # + ## Positive direction + run_moving(ux=0.4, t=t, speed=8.0, answer=6.2 / 8.0) + ## Parallel + run_moving(ux=0.0, t=t, speed=8.0, answer=3.0 / 8.0) + + +def test_interface_check_sense(): + def run_static(x, ux, answer): + particle["x"] = x + particle["ux"] = ux + speed = 2.0 # Arbitrary + result = interface.check_sense(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, ux, t, speed, answer): + particle["x"] = x + particle["ux"] = ux + particle["t"] = t + result = interface.check_sense(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Not at surface + ux = 0.3 # Arbitrary + ## Positive side + run_static(x=12.0, ux=ux, answer=True) + ## Negative side + run_static(x=4.0, ux=ux, answer=False) + + # At surface, positive side + x = 10.0 + TINY + ## Positive direction + run_static(x, ux=0.4, answer=True) + ## Negative direction + run_static(x, ux=-0.4, answer=False) + + # At surface, negative side + x = 10.0 - TINY + ## Positive direction + run_static(x, ux=0.2, answer=True) + ## Negative direction + run_static(x, ux=-0.2, answer=False) + + # ================================================================================= + # Moving: Surface moving in the positive direction + # ================================================================================= + t = 8.5 # Surface x-position = 12.0; surface x-velocity = 2.0 + + # Not at surface + ux = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(x=13.0, ux=ux, t=t, speed=speed, answer=True) + ## Negative side + run_moving(x=5.0, ux=ux, t=t, speed=speed, answer=False) + + # At surface, positive side + x = 12.0 + TINY + # + ## Positive direction (same direction) + ux = 0.4 + ### Faster + run_moving(x, ux, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(x, ux, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(x, ux, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(x, ux=-0.4, t=t, speed=6.0, answer=False) + + # At surface, negative side + x = 12.0 - TINY + ## Positive direction (same direction) + ux = 0.4 + ### Faster + run_moving(x, ux, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(x, ux, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(x, ux, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(x, ux=-0.4, t=t, speed=6.0, answer=False) + + # ================================================================================= + # Moving: Surface moving in the negative direction + # ================================================================================= + t = 13.0 # Surface x-position = 6.0; surface x-velocity = -3.0 + + # Not at surface + ux = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(x=13.0, ux=ux, t=t, speed=speed, answer=True) + ## Negative side + run_moving(x=5.0, ux=ux, t=t, speed=speed, answer=False) + + # At surface, positive side + x = 6.0 + TINY + # + ## Positive direction (opposite direction) + run_moving(x, ux=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + ux = -0.6 + ### Faster + run_moving(x, ux, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(x, ux, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(x, ux, t, speed=5.0, answer=False) + + # At surface, negative side + x = 6.0 - TINY + # + ## Positive direction (opposite direction) + run_moving(x, ux=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + ux = -0.6 + ### Faster + run_moving(x, ux, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(x, ux, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(x, ux, t, speed=5.0, answer=False) + + +def test_interface_get_distance(): + def run_static(x, ux, answer): + particle["x"] = x + particle["ux"] = ux + speed = 2.0 # Arbitrary + result = interface.get_distance(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, ux, t, speed, answer): + particle["x"] = x + particle["ux"] = ux + particle["t"] = t + result = interface.get_distance(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + x = 12.0 + ## Positive direction (moving away) + run_static(x, ux=0.3, answer=INF) + ## Negative direction (moving closer) + run_static(x, ux=-0.4, answer=5.0) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # Negative side + x = 6.0 + ## Positive direction (moving closer) + run_static(x, ux=0.4, answer=10.0) + ## Negative direction (moving away) + run_static(x, ux=-0.3, answer=INF) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # At surface, on the positive side + x = 10.0 + TINY + ## Positive direction + run_static(x, ux=0.4, answer=INF) + ## Negative direction + run_static(x, ux=-0.4, answer=INF) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # At surface, on the negative side + x = 10.0 - TINY + ## Positive direction + run_static(x, ux=0.4, answer=INF) + ## Negative direction + run_static(x, ux=-0.4, answer=INF) + ## Parallel + run_static(x, ux=0.0, answer=INF) + + # ================================================================================= + # Moving + # ================================================================================= + # Bin 0: t = [ 0.0, 5.0]; surface_x = [5.0, 10.0]; surface_speed = -1.0 + # Bin 1: t = [ 5.0, 10.0]; surface_x = [5.0, 15.0]; surface_speed = 2.0 + # Bin 2: t = [10.0, 15.0]; surface_x = [0.0, 15.0]; surface_speed = -3.0 + # Bin 3: t = [15.0, INF]; surface_x = [0.0, 0.0]; surface_speed = 0.0 + + def distance(x, ux, speed, bin_idx): + t0 = 0.0 + np.sum(durations[:bin_idx]) + surface_x = X + np.sum(durations[:bin_idx] * velocities[:bin_idx, 0]) + surface_speed = velocities[bin_idx, 0] + return ((surface_speed * -t0) - (x - surface_x)) / (ux - surface_speed / speed) + + # Start from the beginning + t = 0.0 + + # Positive side + x = 11.0 + # + ## Positive direction (moving away) + ux = 0.4 + ### No hit + run_moving(x, ux, t, speed=1.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(x, ux, speed=0.9, bin_idx=1) + run_moving(x, ux, t, speed=0.9, answer=answer) + # + ## Negative direction (moving closer) + ux = -0.4 + ### Hit (rear-end the surface) + answer = distance(x, ux, speed=3.0, bin_idx=0) + run_moving(x, ux, t, speed=3.0, answer=answer) + ### Hit (head-on) + answer = distance(x, ux, speed=0.1, bin_idx=1) + run_moving(x, ux, t, speed=0.1, answer=answer) + + # Negative side + x = 7.0 + # + ## Negative direction (moving away) + ux = -0.4 + ### No hit + run_moving(x, ux, t, speed=2.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(x, ux, speed=0.4, bin_idx=0) + run_moving(x, ux, t, speed=0.4, answer=answer) + # + ## Positive direction (moving closer) + ux = 0.4 + ### Hit (head-on) + answer = distance(x, ux, speed=0.1, bin_idx=0) + run_moving(x, ux, t, speed=0.1, answer=answer) + ### Hit (rear-end the surface) + x = -10.0 + answer = distance(x, ux, speed=20.0 / 3.0, bin_idx=1) + run_moving(x, ux, t, speed=20.0 / 3.0, answer=answer) diff --git a/mcdc/test/unit/transport/geometry/surface/plane_y.py b/mcdc/test/unit/transport/geometry/surface/plane_y.py new file mode 100644 index 000000000..f9e1467d3 --- /dev/null +++ b/mcdc/test/unit/transport/geometry/surface/plane_y.py @@ -0,0 +1,527 @@ +import mcdc +import numpy as np + +#### + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) +from mcdc.main import preparation + +# ====================================================================================== +# Setup +# ====================================================================================== + +# Reference surface description +Y = 10.0 +durations = np.array([5.0, 5.0, 5.0]) +velocities = np.zeros((3, 3)) +velocities[:, 1] = np.array([-1.0, 2.0, -3.0]) + +# Test object: static surface +static_surface = mcdc.Surface.PlaneY(y=Y) + +# Test object: moving surface +moving_surface = mcdc.Surface.PlaneY(y=Y) +moving_surface.move(velocities, durations) + +# Create the dummy simulation structure and data +structure_container, data = preparation() +structure = structure_container[0] + +# Get the "compiled" test objects +static_surface = structure["surfaces"][0] +moving_surface = structure["surfaces"][1] + +# Particle object for testing +import mcdc.numba_types as type_ + +particle_container = np.zeros(1, type_.particle_data) +particle = particle_container[0] + +# Miscellanies +TINY = COINCIDENCE_TOLERANCE * 0.8 # Tiny value within coincidence tolerance + +# Load modules to be tested +from mcdc.transport.geometry.surface import ( + interface, + plane_y, +) + +# ===================================================================================== +# Plane-Y core functions +# ===================================================================================== + + +def test_evaluate(): + def run(y, answer): + particle["y"] = y + result = plane_y.evaluate(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + run(y=13.0, answer=3.0) + # Negative side + run(y=6.0, answer=-4.0) + + +def test_reflect(): + def run(uy, answer): + particle["uy"] = uy + plane_y.reflect(particle_container, static_surface) + assert np.isclose(particle["uy"], answer) + + # From positive direction + run(uy=0.2, answer=-0.2) + # From negative direction + run(uy=-0.1, answer=0.1) + + +def test_get_normal_component(): + def run(uy, answer): + particle["uy"] = uy + result = plane_y.get_normal_component(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive direction + run(uy=0.4, answer=0.4) + # Negative direction + run(uy=-0.2, answer=-0.2) + # Parallel + run(uy=0.0, answer=0.0) + + +def test_get_distance(): + def run(y, uy, answer): + particle["y"] = y + particle["uy"] = uy + result = plane_y.get_distance(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + y = 12.0 + ## Moving closer + run(y, uy=-0.4, answer=5.0) + ## Moving away + run(y, uy=0.3, answer=INF) + ## Parallel + run(y, uy=0.0, answer=INF) + + # Negative side + y = 6.0 + ## Moving closer + run(y, uy=0.4, answer=10.0) + ## Moving away + run(y, uy=-0.3, answer=INF) + ## Parallel + run(y, uy=0.0, answer=INF) + + # At surface, within tolerance, on the positive side + y = 10.0 + TINY + ## Moving away + run(y, uy=0.4, answer=INF) + ## Moving closer + run(y, uy=-0.4, answer=INF) + ## Parallel + run(y, uy=0.0, answer=INF) + + # At surface, within tolerance, on the negative side + y = 10.0 - TINY + ## Moving away + run(y, uy=-0.4, answer=INF) + ## Moving closer + run(y, uy=0.4, answer=INF) + ## Parallel + run(y, uy=0.0, answer=INF) + + +# ===================================================================================== +# Plane-y integrated transport interface +# ===================================================================================== + + +def test_interface_reflect(): + def run(uy, answer): + particle["uy"] = uy + interface.reflect(particle_container, static_surface) + assert np.isclose(particle["uy"], answer) + + # From positive direction + run(uy=0.2, answer=-0.2) + # From negative direction + run(uy=-0.1, answer=0.1) + + +def test_interface_evaluate(): + def run_static(y, answer): + particle["y"] = y + result = interface.evaluate(particle_container, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(y, uy, t, answer): + particle["y"] = y + particle["uy"] = uy + particle["t"] = t + result = interface.evaluate(particle_container, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + run_static(y=13.0, answer=3.0) + # Negative side + run_static(y=6.0, answer=-4.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin + t = 3.0 # Surface y-position = 7.0 + uy = 0.3 # Arbitrary + ## Positive side + run_moving(y=10.0, uy=uy, t=t, answer=3.0) + ## Negative side + run_moving(y=1.0, uy=uy, t=t, answer=-6.0) + + # First bin, at grid + t = 5.0 # Surface y-position = 5.0 + uy = -0.3 # Arbitrary + ## Positive side + run_moving(y=10.0, uy=uy, t=t, answer=5.0) + ## Negative side + run_moving(y=1.0, uy=uy, t=t, answer=-4.0) + + # Interior bin + t = 12.0 # Surface y-position = 9.0 + uy = 0.3 # Arbitrary + ## Positive side + run_moving(y=10.0, uy=uy, t=t, answer=1.0) + ## Negative side + run_moving(y=1.0, uy=uy, t=t, answer=-8.0) + + # Interior bin, at grid + t = 15.0 # Surface y-position = 0.0 + uy = -0.3 # Arbitrary + ## Positive side + run_moving(y=10.0, uy=uy, t=t, answer=10.0) + ## Negative side + run_moving(y=-5.0, uy=uy, t=t, answer=-5.0) + + # Final bin + t = 100.0 # Surface y-position = 0.0 + uy = 0.3 # Arbitrary + ## Positive side + run_moving(y=10.0, uy=uy, t=t, answer=10.0) + ## Negative side + run_moving(y=-5.0, uy=uy, t=t, answer=-5.0) + + +def test_interface_get_normal_component(): + def run_static(uy, answer): + particle["uy"] = uy + speed = 2.0 # Arbitrary + result = interface.get_normal_component( + particle_container, speed, static_surface, data + ) + assert np.isclose(result, answer) + + def run_moving(uy, t, speed, answer): + particle["uy"] = uy + particle["t"] = t + result = interface.get_normal_component( + particle_container, speed, moving_surface, data + ) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive direction + run_static(uy=0.4, answer=0.4) + # Negative direction + run_static(uy=-0.2, answer=-0.2) + # Parallel + run_static(uy=0.0, answer=0.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # Surface moving in the positive direction + t = 8.5 # Surface y-velocity = 2.0 + # + ## Positive direction + uy = 0.4 + ### Faster + run_moving(uy, t, speed=6.0, answer=0.4 / 6.0) + ### Slower (change sign) + run_moving(uy, t, speed=2.0, answer=-1.2 / 2.0) + ### Same speed (cancel out) + run_moving(uy, t, speed=5.0, answer=0.0) + # + ## Negative direction + run_moving(uy=-0.4, t=t, speed=6.0, answer=-4.4 / 6.0) + ## Parallel + run_moving(uy=0.0, t=t, speed=6.0, answer=-2.0 / 6.0) + + # Surface moving in the negative direction + t = 10.0 # Surface y-velocity = -3.0 + # + ## Negative direction + uy = -0.4 + ### Faster + run_moving(uy, t, speed=8.0, answer=-0.2 / 8.0) + ### Slower (change sign) + run_moving(uy, t, speed=2.0, answer=2.2 / 2.0) + ### Same speed (cancel out) + run_moving(uy, t, speed=7.5, answer=0.0) + # + ## Positive direction + run_moving(uy=0.4, t=t, speed=8.0, answer=6.2 / 8.0) + ## Parallel + run_moving(uy=0.0, t=t, speed=8.0, answer=3.0 / 8.0) + + +def test_interface_check_sense(): + def run_static(y, uy, answer): + particle["y"] = y + particle["uy"] = uy + speed = 2.0 # Arbitrary + result = interface.check_sense(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(y, uy, t, speed, answer): + particle["y"] = y + particle["uy"] = uy + particle["t"] = t + result = interface.check_sense(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Not at surface + uy = 0.3 # Arbitrary + ## Positive side + run_static(y=12.0, uy=uy, answer=True) + ## Negative side + run_static(y=4.0, uy=uy, answer=False) + + # At surface, positive side + y = 10.0 + TINY + ## Positive direction + run_static(y, uy=0.4, answer=True) + ## Negative direction + run_static(y, uy=-0.4, answer=False) + + # At surface, negative side + y = 10.0 - TINY + ## Positive direction + run_static(y, uy=0.2, answer=True) + ## Negative direction + run_static(y, uy=-0.2, answer=False) + + # ================================================================================= + # Moving: Surface moving in the positive direction + # ================================================================================= + t = 8.5 # Surface y-position = 12.0; surface y-velocity = 2.0 + + # Not at surface + uy = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(y=13.0, uy=uy, t=t, speed=speed, answer=True) + ## Negative side + run_moving(y=5.0, uy=uy, t=t, speed=speed, answer=False) + + # At surface, positive side + y = 12.0 + TINY + # + ## Positive direction (same direction) + uy = 0.4 + ### Faster + run_moving(y, uy, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(y, uy, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(y, uy, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(y, uy=-0.4, t=t, speed=6.0, answer=False) + + # At surface, negative side + y = 12.0 - TINY + ## Positive direction (same direction) + uy = 0.4 + ### Faster + run_moving(y, uy, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(y, uy, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(y, uy, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(y, uy=-0.4, t=t, speed=6.0, answer=False) + + # ================================================================================= + # Moving: Surface moving in the negative direction + # ================================================================================= + t = 13.0 # Surface y-position = 6.0; surface y-velocity = -3.0 + + # Not at surface + uy = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(y=13.0, uy=uy, t=t, speed=speed, answer=True) + ## Negative side + run_moving(y=5.0, uy=uy, t=t, speed=speed, answer=False) + + # At surface, positive side + y = 6.0 + TINY + # + ## Positive direction (opposite direction) + run_moving(y, uy=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + uy = -0.6 + ### Faster + run_moving(y, uy, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(y, uy, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(y, uy, t, speed=5.0, answer=False) + + # At surface, negative side + y = 6.0 - TINY + # + ## Positive direction (opposite direction) + run_moving(y, uy=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + uy = -0.6 + ### Faster + run_moving(y, uy, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(y, uy, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(y, uy, t, speed=5.0, answer=False) + + +def test_interface_get_distance(): + def run_static(y, uy, answer): + particle["y"] = y + particle["uy"] = uy + speed = 2.0 # Arbitrary + result = interface.get_distance(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(y, uy, t, speed, answer): + particle["y"] = y + particle["uy"] = uy + particle["t"] = t + result = interface.get_distance(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + y = 12.0 + ## Positive direction (moving away) + run_static(y, uy=0.3, answer=INF) + ## Negative direction (moving closer) + run_static(y, uy=-0.4, answer=5.0) + ## Parallel + run_static(y, uy=0.0, answer=INF) + + # Negative side + y = 6.0 + ## Positive direction (moving closer) + run_static(y, uy=0.4, answer=10.0) + ## Negative direction (moving away) + run_static(y, uy=-0.3, answer=INF) + ## Parallel + run_static(y, uy=0.0, answer=INF) + + # At surface, on the positive side + y = 10.0 + TINY + ## Positive direction + run_static(y, uy=0.4, answer=INF) + ## Negative direction + run_static(y, uy=-0.4, answer=INF) + ## Parallel + run_static(y, uy=0.0, answer=INF) + + # At surface, on the negative side + y = 10.0 - TINY + ## Positive direction + run_static(y, uy=0.4, answer=INF) + ## Negative direction + run_static(y, uy=-0.4, answer=INF) + ## Parallel + run_static(y, uy=0.0, answer=INF) + + # ================================================================================= + # Moving + # ================================================================================= + # Bin 0: t = [ 0.0, 5.0]; surface_y = [5.0, 10.0]; surface_speed = -1.0 + # Bin 1: t = [ 5.0, 10.0]; surface_y = [5.0, 15.0]; surface_speed = 2.0 + # Bin 2: t = [10.0, 15.0]; surface_y = [0.0, 15.0]; surface_speed = -3.0 + # Bin 3: t = [15.0, INF]; surface_y = [0.0, 0.0]; surface_speed = 0.0 + + def distance(y, uy, speed, bin_idy): + t0 = 0.0 + np.sum(durations[:bin_idy]) + surface_y = Y + np.sum(durations[:bin_idy] * velocities[:bin_idy, 1]) + surface_speed = velocities[bin_idy, 1] + return ((surface_speed * -t0) - (y - surface_y)) / (uy - surface_speed / speed) + + # Start from the beginning + t = 0.0 + + # Positive side + y = 11.0 + # + ## Positive direction (moving away) + uy = 0.4 + ### No hit + run_moving(y, uy, t, speed=1.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(y, uy, speed=0.9, bin_idy=1) + run_moving(y, uy, t, speed=0.9, answer=answer) + # + ## Negative direction (moving closer) + uy = -0.4 + ### Hit (rear-end the surface) + answer = distance(y, uy, speed=3.0, bin_idy=0) + run_moving(y, uy, t, speed=3.0, answer=answer) + ### Hit (head-on) + answer = distance(y, uy, speed=0.1, bin_idy=1) + run_moving(y, uy, t, speed=0.1, answer=answer) + + # Negative side + y = 7.0 + # + ## Negative direction (moving away) + uy = -0.4 + ### No hit + run_moving(y, uy, t, speed=2.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(y, uy, speed=0.4, bin_idy=0) + run_moving(y, uy, t, speed=0.4, answer=answer) + # + ## Positive direction (moving closer) + uy = 0.4 + ### Hit (head-on) + answer = distance(y, uy, speed=0.1, bin_idy=0) + run_moving(y, uy, t, speed=0.1, answer=answer) + ### Hit (rear-end the surface) + y = -10.0 + answer = distance(y, uy, speed=20.0 / 3.0, bin_idy=1) + run_moving(y, uy, t, speed=20.0 / 3.0, answer=answer) diff --git a/mcdc/test/unit/transport/geometry/surface/plane_z.py b/mcdc/test/unit/transport/geometry/surface/plane_z.py new file mode 100644 index 000000000..c4f591ee6 --- /dev/null +++ b/mcdc/test/unit/transport/geometry/surface/plane_z.py @@ -0,0 +1,527 @@ +import mcdc +import numpy as np + +#### + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) +from mcdc.main import preparation + +# ====================================================================================== +# Setup +# ====================================================================================== + +# Reference surface description +Z = 10.0 +durations = np.array([5.0, 5.0, 5.0]) +velocities = np.zeros((3, 3)) +velocities[:, 2] = np.array([-1.0, 2.0, -3.0]) + +# Test object: static surface +static_surface = mcdc.Surface.PlaneZ(z=Z) + +# Test object: moving surface +moving_surface = mcdc.Surface.PlaneZ(z=Z) +moving_surface.move(velocities, durations) + +# Create the dummy simulation structure and data +structure_container, data = preparation() +structure = structure_container[0] + +# Get the "compiled" test objects +static_surface = structure["surfaces"][0] +moving_surface = structure["surfaces"][1] + +# Particle object for testing +import mcdc.numba_types as type_ + +particle_container = np.zeros(1, type_.particle_data) +particle = particle_container[0] + +# Miscellanies +TINY = COINCIDENCE_TOLERANCE * 0.8 # Tiny value within coincidence tolerance + +# Load modules to be tested +from mcdc.transport.geometry.surface import ( + interface, + plane_z, +) + +# ===================================================================================== +# Plane-Z core functions +# ===================================================================================== + + +def test_evaluate(): + def run(z, answer): + particle["z"] = z + result = plane_z.evaluate(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + run(z=13.0, answer=3.0) + # Negative side + run(z=6.0, answer=-4.0) + + +def test_reflect(): + def run(uz, answer): + particle["uz"] = uz + plane_z.reflect(particle_container, static_surface) + assert np.isclose(particle["uz"], answer) + + # From positive direction + run(uz=0.2, answer=-0.2) + # From negative direction + run(uz=-0.1, answer=0.1) + + +def test_get_normal_component(): + def run(uz, answer): + particle["uz"] = uz + result = plane_z.get_normal_component(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive direction + run(uz=0.4, answer=0.4) + # Negative direction + run(uz=-0.2, answer=-0.2) + # Parallel + run(uz=0.0, answer=0.0) + + +def test_get_distance(): + def run(z, uz, answer): + particle["z"] = z + particle["uz"] = uz + result = plane_z.get_distance(particle_container, static_surface) + assert np.isclose(result, answer) + + # Positive side + z = 12.0 + ## Moving closer + run(z, uz=-0.4, answer=5.0) + ## Moving away + run(z, uz=0.3, answer=INF) + ## Parallel + run(z, uz=0.0, answer=INF) + + # Negative side + z = 6.0 + ## Moving closer + run(z, uz=0.4, answer=10.0) + ## Moving away + run(z, uz=-0.3, answer=INF) + ## Parallel + run(z, uz=0.0, answer=INF) + + # At surface, within tolerance, on the positive side + z = 10.0 + TINY + ## Moving away + run(z, uz=0.4, answer=INF) + ## Moving closer + run(z, uz=-0.4, answer=INF) + ## Parallel + run(z, uz=0.0, answer=INF) + + # At surface, within tolerance, on the negative side + z = 10.0 - TINY + ## Moving away + run(z, uz=-0.4, answer=INF) + ## Moving closer + run(z, uz=0.4, answer=INF) + ## Parallel + run(z, uz=0.0, answer=INF) + + +# ===================================================================================== +# Plane-z integrated transport interface +# ===================================================================================== + + +def test_interface_reflect(): + def run(uz, answer): + particle["uz"] = uz + interface.reflect(particle_container, static_surface) + assert np.isclose(particle["uz"], answer) + + # From positive direction + run(uz=0.2, answer=-0.2) + # From negative direction + run(uz=-0.1, answer=0.1) + + +def test_interface_evaluate(): + def run_static(z, answer): + particle["z"] = z + result = interface.evaluate(particle_container, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(z, uz, t, answer): + particle["z"] = z + particle["uz"] = uz + particle["t"] = t + result = interface.evaluate(particle_container, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + run_static(z=13.0, answer=3.0) + # Negative side + run_static(z=6.0, answer=-4.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin + t = 3.0 # Surface z-position = 7.0 + uz = 0.3 # Arbitrary + ## Positive side + run_moving(z=10.0, uz=uz, t=t, answer=3.0) + ## Negative side + run_moving(z=1.0, uz=uz, t=t, answer=-6.0) + + # First bin, at grid + t = 5.0 # Surface z-position = 5.0 + uz = -0.3 # Arbitrary + ## Positive side + run_moving(z=10.0, uz=uz, t=t, answer=5.0) + ## Negative side + run_moving(z=1.0, uz=uz, t=t, answer=-4.0) + + # Interior bin + t = 12.0 # Surface z-position = 9.0 + uz = 0.3 # Arbitrary + ## Positive side + run_moving(z=10.0, uz=uz, t=t, answer=1.0) + ## Negative side + run_moving(z=1.0, uz=uz, t=t, answer=-8.0) + + # Interior bin, at grid + t = 15.0 # Surface z-position = 0.0 + uz = -0.3 # Arbitrary + ## Positive side + run_moving(z=10.0, uz=uz, t=t, answer=10.0) + ## Negative side + run_moving(z=-5.0, uz=uz, t=t, answer=-5.0) + + # Final bin + t = 100.0 # Surface z-position = 0.0 + uz = 0.3 # Arbitrary + ## Positive side + run_moving(z=10.0, uz=uz, t=t, answer=10.0) + ## Negative side + run_moving(z=-5.0, uz=uz, t=t, answer=-5.0) + + +def test_interface_get_normal_component(): + def run_static(uz, answer): + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.get_normal_component( + particle_container, speed, static_surface, data + ) + assert np.isclose(result, answer) + + def run_moving(uz, t, speed, answer): + particle["uz"] = uz + particle["t"] = t + result = interface.get_normal_component( + particle_container, speed, moving_surface, data + ) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive direction + run_static(uz=0.4, answer=0.4) + # Negative direction + run_static(uz=-0.2, answer=-0.2) + # Parallel + run_static(uz=0.0, answer=0.0) + + # ================================================================================= + # Moving + # ================================================================================= + + # Surface moving in the positive direction + t = 8.5 # Surface z-velocity = 2.0 + # + ## Positive direction + uz = 0.4 + ### Faster + run_moving(uz, t, speed=6.0, answer=0.4 / 6.0) + ### Slower (change sign) + run_moving(uz, t, speed=2.0, answer=-1.2 / 2.0) + ### Same speed (cancel out) + run_moving(uz, t, speed=5.0, answer=0.0) + # + ## Negative direction + run_moving(uz=-0.4, t=t, speed=6.0, answer=-4.4 / 6.0) + ## Parallel + run_moving(uz=0.0, t=t, speed=6.0, answer=-2.0 / 6.0) + + # Surface moving in the negative direction + t = 10.0 # Surface z-velocity = -3.0 + # + ## Negative direction + uz = -0.4 + ### Faster + run_moving(uz, t, speed=8.0, answer=-0.2 / 8.0) + ### Slower (change sign) + run_moving(uz, t, speed=2.0, answer=2.2 / 2.0) + ### Same speed (cancel out) + run_moving(uz, t, speed=7.5, answer=0.0) + # + ## Positive direction + run_moving(uz=0.4, t=t, speed=8.0, answer=6.2 / 8.0) + ## Parallel + run_moving(uz=0.0, t=t, speed=8.0, answer=3.0 / 8.0) + + +def test_interface_check_sense(): + def run_static(z, uz, answer): + particle["z"] = z + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.check_sense(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(z, uz, t, speed, answer): + particle["z"] = z + particle["uz"] = uz + particle["t"] = t + result = interface.check_sense(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Not at surface + uz = 0.3 # Arbitrary + ## Positive side + run_static(z=12.0, uz=uz, answer=True) + ## Negative side + run_static(z=4.0, uz=uz, answer=False) + + # At surface, positive side + z = 10.0 + TINY + ## Positive direction + run_static(z, uz=0.4, answer=True) + ## Negative direction + run_static(z, uz=-0.4, answer=False) + + # At surface, negative side + z = 10.0 - TINY + ## Positive direction + run_static(z, uz=0.2, answer=True) + ## Negative direction + run_static(z, uz=-0.2, answer=False) + + # ================================================================================= + # Moving: Surface moving in the positive direction + # ================================================================================= + t = 8.5 # Surface z-position = 12.0; surface z-velocity = 2.0 + + # Not at surface + uz = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(z=13.0, uz=uz, t=t, speed=speed, answer=True) + ## Negative side + run_moving(z=5.0, uz=uz, t=t, speed=speed, answer=False) + + # At surface, positive side + z = 12.0 + TINY + # + ## Positive direction (same direction) + uz = 0.4 + ### Faster + run_moving(z, uz, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(z, uz, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(z, uz, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(z, uz=-0.4, t=t, speed=6.0, answer=False) + + # At surface, negative side + z = 12.0 - TINY + ## Positive direction (same direction) + uz = 0.4 + ### Faster + run_moving(z, uz, t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(z, uz, t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(z, uz, t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(z, uz=-0.4, t=t, speed=6.0, answer=False) + + # ================================================================================= + # Moving: Surface moving in the negative direction + # ================================================================================= + t = 13.0 # Surface z-position = 6.0; surface z-velocity = -3.0 + + # Not at surface + uz = 0.3 # Arbitrary + speed = 3.0 # Arbitrary + ## Positive side + run_moving(z=13.0, uz=uz, t=t, speed=speed, answer=True) + ## Negative side + run_moving(z=5.0, uz=uz, t=t, speed=speed, answer=False) + + # At surface, positive side + z = 6.0 + TINY + # + ## Positive direction (opposite direction) + run_moving(z, uz=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + uz = -0.6 + ### Faster + run_moving(z, uz, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(z, uz, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(z, uz, t, speed=5.0, answer=False) + + # At surface, negative side + z = 6.0 - TINY + # + ## Positive direction (opposite direction) + run_moving(z, uz=0.6, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + uz = -0.6 + ### Faster + run_moving(z, uz, t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(z, uz, t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(z, uz, t, speed=5.0, answer=False) + + +def test_interface_get_distance(): + def run_static(z, uz, answer): + particle["z"] = z + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.get_distance(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(z, uz, t, speed, answer): + particle["z"] = z + particle["uz"] = uz + particle["t"] = t + result = interface.get_distance(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + z = 12.0 + ## Positive direction (moving away) + run_static(z, uz=0.3, answer=INF) + ## Negative direction (moving closer) + run_static(z, uz=-0.4, answer=5.0) + ## Parallel + run_static(z, uz=0.0, answer=INF) + + # Negative side + z = 6.0 + ## Positive direction (moving closer) + run_static(z, uz=0.4, answer=10.0) + ## Negative direction (moving away) + run_static(z, uz=-0.3, answer=INF) + ## Parallel + run_static(z, uz=0.0, answer=INF) + + # At surface, on the positive side + z = 10.0 + TINY + ## Positive direction + run_static(z, uz=0.4, answer=INF) + ## Negative direction + run_static(z, uz=-0.4, answer=INF) + ## Parallel + run_static(z, uz=0.0, answer=INF) + + # At surface, on the negative side + z = 10.0 - TINY + ## Positive direction + run_static(z, uz=0.4, answer=INF) + ## Negative direction + run_static(z, uz=-0.4, answer=INF) + ## Parallel + run_static(z, uz=0.0, answer=INF) + + # ================================================================================= + # Moving + # ================================================================================= + # Bin 0: t = [ 0.0, 5.0]; surface_z = [5.0, 10.0]; surface_speed = -1.0 + # Bin 1: t = [ 5.0, 10.0]; surface_z = [5.0, 15.0]; surface_speed = 2.0 + # Bin 2: t = [10.0, 15.0]; surface_z = [0.0, 15.0]; surface_speed = -3.0 + # Bin 3: t = [15.0, INF]; surface_z = [0.0, 0.0]; surface_speed = 0.0 + + def distance(z, uz, speed, bin_idz): + t0 = 0.0 + np.sum(durations[:bin_idz]) + surface_z = Z + np.sum(durations[:bin_idz] * velocities[:bin_idz, 2]) + surface_speed = velocities[bin_idz, 2] + return ((surface_speed * -t0) - (z - surface_z)) / (uz - surface_speed / speed) + + # Start from the beginning + t = 0.0 + + # Positive side + z = 11.0 + # + ## Positive direction (moving away) + uz = 0.4 + ### No hit + run_moving(z, uz, t, speed=1.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(z, uz, speed=0.9, bin_idz=1) + run_moving(z, uz, t, speed=0.9, answer=answer) + # + ## Negative direction (moving closer) + uz = -0.4 + ### Hit (rear-end the surface) + answer = distance(z, uz, speed=3.0, bin_idz=0) + run_moving(z, uz, t, speed=3.0, answer=answer) + ### Hit (head-on) + answer = distance(z, uz, speed=0.1, bin_idz=1) + run_moving(z, uz, t, speed=0.1, answer=answer) + + # Negative side + z = 7.0 + # + ## Negative direction (moving away) + uz = -0.4 + ### No hit + run_moving(z, uz, t, speed=2.0, answer=INF) + ### Hit (rear-ended by the surface) + answer = distance(z, uz, speed=0.4, bin_idz=0) + run_moving(z, uz, t, speed=0.4, answer=answer) + # + ## Positive direction (moving closer) + uz = 0.4 + ### Hit (head-on) + answer = distance(z, uz, speed=0.1, bin_idz=0) + run_moving(z, uz, t, speed=0.1, answer=answer) + ### Hit (rear-end the surface) + z = -10.0 + answer = distance(z, uz, speed=20.0 / 3.0, bin_idz=1) + run_moving(z, uz, t, speed=20.0 / 3.0, answer=answer) diff --git a/mcdc/test/unit/transport/geometry/surface/torus_z.py b/mcdc/test/unit/transport/geometry/surface/torus_z.py new file mode 100644 index 000000000..c3f83468d --- /dev/null +++ b/mcdc/test/unit/transport/geometry/surface/torus_z.py @@ -0,0 +1,839 @@ +import mcdc +import math +import numpy as np + +#### + +from mcdc.constant import ( + COINCIDENCE_TOLERANCE, + INF, +) +from mcdc.main import preparation + +# ====================================================================================== +# Setup +# ====================================================================================== + +# Reference surface description +A = 0.0 +B = 0.0 +C = 0.0 +R = 1.0 +r = 0.5 +durations = np.array([5.0, 5.0, 5.0]) +velocities = np.zeros((3, 3)) +velocities[:, 0] = np.array([-1.0, 2.0, -3.0]) + +# Test object: static surface +static_surface = mcdc.Surface.TorusZ(A=A, B=B, C=C, R=R, r=r) + +# Test object: moving surface +moving_surface = mcdc.Surface.TorusZ(A=A, B=B, C=C, R=R, r=r) +moving_surface.move(velocities, durations) + +# Create the dummy simulation structure and data +structure_container, data = preparation() +structure = structure_container[0] + +# Get the "compiled" test objects +static_surface = structure["surfaces"][0] +moving_surface = structure["surfaces"][1] + +# Particle object for testing +import mcdc.numba_types as type_ + +particle_container = np.zeros(1, type_.particle_data) +particle = particle_container[0] + +# Miscellanies +TINY = COINCIDENCE_TOLERANCE * 0.1 # Tiny value within coincidence tolerance + +# Load modules to be tested +from mcdc.transport.geometry.surface import ( + interface, + torus_z, +) + +# ===================================================================================== +# Torus-Z core functions +# ===================================================================================== + + +def test_evaluate(): + def run(x, y, z, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + result = torus_z.evaluate(particle_container, static_surface) + assert np.isclose(result, answer) + + # Inside + run(x=1.0, y=0.0, z=0.0, answer=-0.9375) + # Outside + run(x=0.0, y=-1.0, z=5.0, answer=711.5625) + + +# Answers parameter is a numpy array of the correct [ux, uy, uz] values of the reflected particle +def test_reflect(): + def run(x, y, z, ux, uy, uz, answers): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + torus_z.reflect(particle_container, static_surface) + directions = np.array([particle["ux"], particle["uy"], particle["uz"]]) + assert np.allclose(directions, answers) + + # Particle traveling in through the Top of the torus + run(x=R, y=0.0, z=(r + TINY), ux=0.0, uy=0.0, uz=-1.0, answers=np.array([0, 0, 1])) + # Particle traveling out through the Top of the torus + run(x=R, y=0.0, z=(r - TINY), ux=0.0, uy=0.0, uz=1.0, answers=np.array([0, 0, -1])) + + # Particle traveling in through the Bottom of the torus + run(x=0.0, y=R, z=-(r + TINY), ux=0.0, uy=0.0, uz=1.0, answers=np.array([0, 0, -1])) + # Particle traveling out through the Bottom of the torus + run(x=0.0, y=R, z=-(r - TINY), ux=0.0, uy=0.0, uz=-1.0, answers=np.array([0, 0, 1])) + + root = math.sqrt(2) / 2 # 45 degree X-Y lengths on the unit circle + d = ( + R + r + ) * root # X-Y values for a particle at 45 degrees on a torus of given dimensions + + # Particle traveling in head on through the Side of the torus at the 45 degrees from the x-axis + run( + x=(d + TINY), + y=(d + TINY), + z=0.0, + ux=-root, + uy=-root, + uz=0.0, + answers=np.array([root, root, 0]), + ) + # Particle traveling out through the Side of the torus as above + run( + x=(d - TINY), + y=(d - TINY), + z=0.0, + ux=root, + uy=root, + uz=0.0, + answers=np.array([-root, -root, 0]), + ) + + +def test_get_normal_component(): + def run(x, y, z, ux, uy, uz, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + result = torus_z.get_normal_component(particle_container, static_surface) + assert np.isclose(result, answer) + + # Particle traveling in through the Top of the torus + run(x=R, y=0.0, z=(r + TINY), ux=0.0, uy=0.0, uz=-1.0, answer=-1) + + # Particle traveling out through the Top of the torus + run(x=0.0, y=R, z=(r - TINY), ux=0.0, uy=0.0, uz=1.0, answer=1) + + # Particle moving parallel to the torus + run(x=0.0, y=R, z=-(0.5 + TINY), ux=0.0, uy=1, uz=0.0, answer=0) + + +def test_get_distance(): + def run(x, y, z, ux, uy, uz, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + result = torus_z.get_distance(particle_container, static_surface) + assert np.isclose(result, answer) + + # Outside Tours + x = R + y = 0.0 + z = r + 1 + ## Moving closer + run(x, y, z, ux=0.0, uy=0.0, uz=-1.0, answer=1.0) + ## Moving away + run(x, y, z, ux=0.0, uy=0.0, uz=1.0, answer=INF) + ## Parallel + run(x, y, z, ux=1.0, uy=0.0, uz=0.0, answer=INF) + + # Inside Torus + x = 0.0 + y = R + z = r / 2 + ## Moving Up + run(x, y, z, ux=0.0, uy=0.0, uz=1, answer=(r / 2)) + ## Moving Down + run(x, y, z, ux=0.0, uy=0.0, uz=-1.0, answer=(3 * (r / 2))) + + # At surface, within tolerance, on the outside + x = R + y = 0.0 + z = r + TINY + ## Moving away + run(x, y, z, ux=0.0, uy=0.0, uz=1.0, answer=INF) + ## Moving closer + run(x, y, z, ux=0.0, uy=0.0, uz=-1.0, answer=(2 * r)) + ## Parallel + run(x, y, z, ux=1.0, uy=0.0, uz=0.0, answer=INF) + + # At surface, within tolerance, on the inside + x = 0.0 + y = R + z = r - TINY + ## Moving up + run(x, y, z, ux=0.0, uy=0.0, uz=1.0, answer=INF) + ## Moving down + run(x, y, z, ux=0.0, uy=0.0, uz=-1.0, answer=(2 * r)) + + +# ===================================================================================== +# Torus-Z integrated transport interface +# ===================================================================================== + + +def test_interface_reflect(): + def run(x, y, z, ux, uy, uz, answers): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + interface.reflect(particle_container, static_surface) + directions = np.array([particle["ux"], particle["uy"], particle["uz"]]) + assert np.allclose(directions, answers) + + run(x=R, y=0.0, z=(r + TINY), ux=0.0, uy=0.0, uz=-1.0, answers=np.array([0, 0, 1])) + run(x=R, y=0.0, z=(r - TINY), ux=0.0, uy=0.0, uz=1.0, answers=np.array([0, 0, -1])) + run(x=0.0, y=R, z=-(r + TINY), ux=0.0, uy=0.0, uz=1.0, answers=np.array([0, 0, -1])) + run(x=0.0, y=R, z=-(r - TINY), ux=0.0, uy=0.0, uz=-1.0, answers=np.array([0, 0, 1])) + + root = math.sqrt(2) / 2 # 45 degree X-Y lengths on the unit circle + d = ( + R + r + ) * root # X-Y values for a particle at 45 degrees on a torus of given dimensions + + # Particle traveling in head on through the Side of the torus at the 45 degrees from the x-axis + run( + x=(d + TINY), + y=(d + TINY), + z=0.0, + ux=-root, + uy=-root, + uz=0.0, + answers=np.array([root, root, 0]), + ) + # Particle traveling out through the Side of the torus as above + run( + x=(d - TINY), + y=(d - TINY), + z=0.0, + ux=root, + uy=root, + uz=0.0, + answers=np.array([-root, -root, 0]), + ) + + +def test_interface_evaluate(): + + def run_static(x, y, z, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + result = interface.evaluate(particle_container, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, y, z, ux, uy, uz, t, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["t"] = t + result = interface.evaluate(particle_container, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Inside + run_static(x=1.0, y=0.0, z=0.0, answer=-0.9375) + # Outside + run_static(x=0.0, y=-1.0, z=5.0, answer=711.5625) + + # ================================================================================= + # Moving + # ================================================================================= + + # First bin + t = 3.0 # Torus center x-position = -3.0 as the torus has a velocity of -1 and started in the center + ux = 0.3 # Arbitrary + uy = 0.3 # Arbitrary + uz = 0.3 # Arbitrary + ## Inside side + run_moving(x=-3.0, y=1.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, answer=-0.9375) + ## Outside side + run_moving(x=-3.0, y=-1.0, z=5.0, ux=ux, uy=uy, uz=uz, t=t, answer=711.5625) + + # First bin, at grid + t = 5.0 # Torus center x-position = -5.0 + ux = -0.3 # Arbitrary + uy = -0.3 # Arbitrary + uz = -0.3 # Arbitrary + ## Inside side + run_moving(x=-5.0, y=1.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, answer=-0.9375) + ## Outside side + run_moving(x=-5.0, y=-1.0, z=5.0, ux=ux, uy=uy, uz=uz, t=t, answer=711.5625) + + # Interior bin + t = 12.0 # Torus center x-position = -1.0 due to velocity and duration values + ux = 0.3 # Arbitrary + uy = 0.3 # Arbitrary + uz = 0.3 # Arbitrary + ## Inside side + run_moving(x=-1.0, y=1.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, answer=-0.9375) + ## Outside side + run_moving(x=-1.0, y=-1.0, z=5.0, ux=ux, uy=uy, uz=uz, t=t, answer=711.5625) + + # Interior bin, at grid + t = 15.0 # Surface x-position = -10.0 + ux = -0.3 # Arbitrary + uy = -0.3 # Arbitrary + uz = -0.3 # Arbitrary + ## Inside side + run_moving(x=-10.0, y=1.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, answer=-0.9375) + ## Outside side + run_moving(x=-10.0, y=-1.0, z=5.0, ux=ux, uy=uy, uz=uz, t=t, answer=711.5625) + + # Final bin + t = 100.0 # Surface x-position = -10.0 + ux = 0.3 # Arbitrary + uy = 0.3 # Arbitrary + uz = 0.3 # Arbitrary + ## Inside side + run_moving(x=-10.0, y=1.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, answer=-0.9375) + ## Outside side + run_moving(x=-10.0, y=-1.0, z=5.0, ux=ux, uy=uy, uz=uz, t=t, answer=711.5625) + + +def test_interface_get_normal_component(): + def run_static(x, y, z, ux, uy, uz, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.get_normal_component( + particle_container, speed, static_surface, data + ) + assert np.isclose(result, answer) + + def run_moving(x, y, z, ux, uy, uz, t, speed, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["t"] = t + result = interface.get_normal_component( + particle_container, speed, moving_surface, data + ) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Particle traveling in through the Top of the torus + run_static(x=R, y=0.0, z=(r + TINY), ux=0.0, uy=0.0, uz=-1.0, answer=-1) + # Particle traveling out through the Top of the torus + run_static(x=0.0, y=R, z=(r - TINY), ux=0.0, uy=0.0, uz=1.0, answer=1) + # Particle moving parallel to the torus + run_static(x=0.0, y=R, z=-(0.5 + TINY), ux=0.0, uy=1, uz=0.0, answer=0) + + # ================================================================================= + # Moving + # ================================================================================= + + # Surface moving in the positive direction + t = 8.0 # Surface x-velocity = 2.0, center of torus x position at 1 + # + ## Positive direction + ux = 0.4 + uy = 0.0 + uz = 0.0 + ### Faster (normal component values on the very centerline of the torus should match an x-plane) + run_moving( + x=(1 + R + r), + y=0.0, + z=0.0, + ux=ux, + uy=uy, + uz=uz, + t=t, + speed=6.0, + answer=0.4 / 6.0, + ) + ### Slower (change sign) + run_moving( + x=(1 + R + r), + y=0.0, + z=0.0, + ux=ux, + uy=uy, + uz=uz, + t=t, + speed=2.0, + answer=-1.2 / 2.0, + ) + ### Same speed (cancel out) + run_moving( + x=(1 + R + r), y=0.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, speed=5.0, answer=0.0 + ) + # + ## Negative direction + run_moving( + x=(1 + R + r), + y=0.0, + z=0.0, + ux=-0.4, + uy=uy, + uz=uz, + t=t, + speed=6.0, + answer=-4.4 / 6.0, + ) + ## Parallel + run_moving( + x=(1 + R + r), + y=0.0, + z=0.0, + ux=-0.0, + uy=uy, + uz=uz, + t=t, + speed=6.0, + answer=-2.0 / 6.0, + ) + + # Surface moving in the negative direction + t = 10.0 # Surface x-velocity = -3.0, center of torus x position at 5 + # + ## Negative direction + ux = -0.4 + uy = 0.0 + uz = 0.0 + ### Faster + run_moving( + x=(5 + R + r), + y=0.0, + z=0.0, + ux=ux, + uy=uy, + uz=uz, + t=t, + speed=8.0, + answer=-0.2 / 8.0, + ) + ### Slower (change sign) + run_moving( + x=(5 + R + r), + y=0.0, + z=0.0, + ux=ux, + uy=uy, + uz=uz, + t=t, + speed=2.0, + answer=2.2 / 2.0, + ) + ### Same speed (cancel out) + run_moving( + x=(5 + R + r), y=0.0, z=0.0, ux=ux, uy=uy, uz=uz, t=t, speed=7.5, answer=0.0 + ) + # + ## Positive direction + run_moving( + x=(5 + R + r), + y=0.0, + z=0.0, + ux=0.4, + uy=uy, + uz=uz, + t=t, + speed=8.0, + answer=6.2 / 8.0, + ) + ## Parallel + run_moving( + x=(5 + R + r), + y=0.0, + z=0.0, + ux=0.0, + uy=uy, + uz=uz, + t=t, + speed=8.0, + answer=3.0 / 8.0, + ) + + +def test_interface_check_sense(): # Returns true if the particle is on the outside of the torus, and false if it's on the inside (particle direction and speed tiebreak) + def run_static(x, y, z, ux, uy, uz, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.check_sense(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, y, z, ux, uy, uz, t, speed, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["t"] = t + result = interface.check_sense(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Not at surface + y = 0.0 + z = 0.0 + ux = 0.3 # Arbitrary + uy = 0.0 + uz = 0.0 + ## Positive side + run_static(x=3.0, y=y, z=z, ux=ux, uy=uy, uz=uz, answer=True) + ## Negative side + run_static(x=-4.0, y=y, z=z, ux=ux, uy=uy, uz=uz, answer=True) + + # At surface, outside + x = R + r + TINY + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Outward direction + run_static(x=x, y=y, z=z, ux=0.4, uy=uy, uz=uz, answer=True) + ## Inward direction + run_static(x=x, y=y, z=z, ux=-0.4, uy=uy, uz=uz, answer=False) + + # At surface, inside + x = R + r - TINY + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Outward direction + run_static(x=x, y=y, z=z, ux=0.2, uy=uy, uz=uz, answer=True) + ## Inward direction + run_static(x=x, y=y, z=z, ux=-0.2, uy=uy, uz=uz, answer=False) + + # ================================================================================= + # Moving: Surface moving in the positive X direction + # ================================================================================= + t = 8.5 # Surface x-center = 2.0; surface x-velocity = 2.0 + + # Not at surface + y = 0.0 + z = 0.0 + ux = 0.3 # Arbitrary + uy = 0.0 + uz = 0.0 + speed = 3.0 # Arbitrary + ## Outside + run_moving(x=13.0, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=speed, answer=True) + ## Inside + run_moving(x=3.0, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=speed, answer=False) + + # At surface, outside + x = 3.5 + TINY + y = 0.0 + z = 0.0 + ## Positive direction (same direction) + ux = 0.4 + uy = 0.0 + uz = 0.0 + ### Faster + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(x=x, y=y, z=z, ux=-0.4, uy=uy, uz=uz, t=t, speed=6.0, answer=False) + + # At surface, inside + x = 3.5 - TINY + y = 0.0 + z = 0.0 + ## Positive direction (same direction) + ux = 0.4 + uy = 0.0 + uz = 0.0 + ### Faster + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=6.0, answer=True) + ### Slower (passed by the surface) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=4.0, answer=False) + ### Same speed (undefined, but False is returned) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=5.0, answer=False) + # + ## Negative direction (opposite direction) + run_moving(x=x, y=y, z=z, ux=-0.4, uy=uy, uz=uz, t=t, speed=6.0, answer=False) + + # ================================================================================= + # Moving: Surface moving in the negative direction + # ================================================================================= + t = 13.0 # Surface x-center = -4.0; surface x-velocity = -3.0 + + # Not at surface + y = 0.0 + z = 0.0 + ux = 0.3 # Arbitrary + uy = 0.0 + uz = 0.0 + speed = 3.0 # Arbitrary + ## Outside + run_moving(x=0.0, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=speed, answer=True) + ## Inside + run_moving(x=-5.0, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=speed, answer=False) + + # At surface, outside + x = -2.5 + TINY + y = 0.0 + z = 0.0 + ## Positive direction (opposite direction) + uy = 0.0 + uz = 0.0 + run_moving(x=x, y=y, z=z, ux=0.6, uy=uy, uz=uz, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + ux = -0.6 + ### Faster + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=5.0, answer=False) + + # At surface, inside + x = -2.5 - TINY + y = 0.0 + z = 0.0 + ## Positive direction (opposite direction) + uy = 0.0 + uz = 0.0 + run_moving(x=x, y=y, z=z, ux=0.6, uy=uy, uz=uz, t=t, speed=6.0, answer=True) + # + ## Negative direction (same direction) + ux = -0.6 + ### Faster + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=6.0, answer=False) + ### Slower (passed by surface) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=4.0, answer=True) + ### Same speed (undefined, but False is returned) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=5.0, answer=False) + + +def test_interface_get_distance(): + def run_static(x, y, z, ux, uy, uz, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + speed = 2.0 # Arbitrary + result = interface.get_distance(particle_container, speed, static_surface, data) + assert np.isclose(result, answer) + + def run_moving(x, y, z, ux, uy, uz, t, speed, answer): + particle["x"] = x + particle["y"] = y + particle["z"] = z + particle["ux"] = ux + particle["uy"] = uy + particle["uz"] = uz + particle["t"] = t + result = interface.get_distance(particle_container, speed, moving_surface, data) + assert np.isclose(result, answer) + + # ================================================================================= + # Static + # ================================================================================= + + # Positive side + x = 6.5 # Arbitrary + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Positive direction (moving away) + run_static(x=x, y=y, z=z, ux=0.3, uy=uy, uz=uz, answer=INF) + ## Negative direction (moving closer) + run_static(x=x, y=y, z=z, ux=-0.3, uy=uy, uz=uz, answer=5.0 / 0.3) + ## Parallel + run_static(x=x, y=y, z=z, ux=0.0, uy=1.0, uz=uz, answer=INF) + + # Negative side + x = -5.5 # Arbitrary + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Positive direction (moving closer) + run_static(x=x, y=y, z=z, ux=0.3, uy=uy, uz=uz, answer=4.0 / 0.3) + ## Negative direction (moving away) + run_static(x=x, y=y, z=z, ux=-0.3, uy=uy, uz=uz, answer=INF) + ## Parallel + run_static(x=x, y=y, z=z, ux=0.0, uy=1.0, uz=uz, answer=INF) + + # At surface, on the outside + x = 1.5 + TINY + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Positive direction + run_static(x=x, y=y, z=z, ux=0.4, uy=uy, uz=uz, answer=INF) + ## Negative direction + run_static(x=x, y=y, z=z, ux=-0.4, uy=uy, uz=uz, answer=1.0 / 0.4) + ## Parallel + run_static(x=x, y=y, z=z, ux=0.0, uy=1.0, uz=uz, answer=INF) + + # At surface, on the inside + x = 1.5 - TINY + y = 0.0 + z = 0.0 + uy = 0.0 + uz = 0.0 + ## Positive direction + run_static(x=x, y=y, z=z, ux=0.4, uy=uy, uz=uz, answer=INF) + ## Negative direction + run_static(x=x, y=y, z=z, ux=-0.4, uy=uy, uz=uz, answer=1.0 / 0.4) + ## Parallel + run_static(x=x, y=y, z=z, ux=0.0, uy=1.0, uz=uz, answer=INF) + + # TODO: Numerical Risk - Add off-midplane and near-tangent moving-distance cases. + # ============================================================================================ + # Moving (Only testing a hit on the midline of the torus in positive and negative x directions) + # ============================================================================================ + # Bin 0: t = [ 0.0, 5.0]; surface_x_center = [-5.0, 0.0]; surface_speed = -1.0 + # Bin 1: t = [ 5.0, 10.0]; surface_x_center = [-5.0, 5.0]; surface_speed = 2.0 + # Bin 2: t = [10.0, 15.0]; surface_x_center = [-10.0, 5.0]; surface_speed = -3.0 + # Bin 3: t = [15.0, INF]; surface_x_center = [-10.0, -10.0]; surface_speed = 0.0 + + # surface_x_center (0.0 ---> -5.0) + # surface_x_center (-5.0 ---> 5.0) + # surface_x_center (5.0, ---> -10.0) + # surface_x_center (-10.0 ---> -10.0) + + # This is the distance from the particle to the center of the torus, not counting the inner or outer radii + def center_distance(x, ux, speed, bin_idx): + # Time when the surface enters the final bin to be evaluated (0 when evaluating bin 0, and 5 when evaluating bin 1) + t0 = 0.0 + np.sum(durations[:bin_idx]) + + # Starting position of the surface at the beginning of the last bin to be evaluated (0 for bin 0, and -5 for bin 1) + surface_x = A + np.sum(durations[:bin_idx] * velocities[:bin_idx, 0]) + + # Speed of the surface in the final bin to be evaluated (-1 for bin 0) + surface_speed = velocities[bin_idx, 0] + + return ((surface_speed * -t0) - (x - surface_x)) / (ux - surface_speed / speed) + + def outer_surface_distance(x, ux, speed, bin_idx): + surface_speed = velocities[bin_idx, 0] + relative_ux = ux - surface_speed / speed + return center_distance(x, ux, speed, bin_idx) - (R + r) / abs(relative_ux) + + # Start from the beginning + t = 0.0 + + # Positive x side of the torus + x = 2.0 + y = 0.0 + z = 0.0 + # + ## Positive direction (moving away) + ux = 0.4 + uy = 0.0 + uz = 0.0 + ### Surface catches up after reversing direction in bin 1 + answer = outer_surface_distance(x, ux, speed=1.0, bin_idx=1) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=1.0, answer=answer) + ### Hit (rear-ended by the surface) + answer = outer_surface_distance( + x, ux, speed=0.9, bin_idx=1 + ) # Collision in bin 1 where the surface catches up to the particle + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=0.9, answer=answer) + # + ## Negative direction (moving closer) + ux = -0.4 + uy = 0.0 + uz = 0.0 + ### Hit (rear-end the surface) + answer = outer_surface_distance( + x, ux, speed=3.0, bin_idx=0 + ) # Collision in bin 0 where the surface is running away from particle + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=3.0, answer=answer) + ### Hit (head-on opposite directions) + answer = outer_surface_distance(x, ux, speed=0.1, bin_idx=1) # Collision in bin 1 + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=0.1, answer=answer) + + # Negative x side of the torus + x = -2.0 + y = 0.0 + z = 0.0 + # + ## Negative direction (moving away) + ux = -0.4 + uy = 0.0 + uz = 0.0 + ### Surface catches up in bin 0 + answer = outer_surface_distance(x, ux, speed=2.0, bin_idx=0) + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=2.0, answer=answer) + ### Hit (rear-ended by the surface) + answer = outer_surface_distance(x, ux, speed=0.4, bin_idx=0) # Collision in bin 0 + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=0.4, answer=answer) + # + ## Positive direction (moving closer) + ux = 0.4 + uy = 0.0 + uz = 0.0 + ### Hit (head-on) + answer = outer_surface_distance(x, ux, speed=0.1, bin_idx=0) # Collision in bin 0 + run_moving(x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=0.1, answer=answer) + ### Hit (rear-end the surface) + x = -20.0 + answer = outer_surface_distance( + x, ux, speed=(20.0 / 3.0), bin_idx=1 + ) # Collision in bin 1 + run_moving( + x=x, y=y, z=z, ux=ux, uy=uy, uz=uz, t=t, speed=(20.0 / 3.0), answer=answer + ) diff --git a/mcdc/test/unit/transport/util/find_bin.py b/mcdc/test/unit/transport/util/find_bin.py new file mode 100644 index 000000000..f864a5e48 --- /dev/null +++ b/mcdc/test/unit/transport/util/find_bin.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +#### + +from mcdc.transport.util import find_bin + + +@pytest.fixture +def grid(): + return np.array([0.0, 1.0, 2.0, 5.0, 10.0]) + + +@pytest.fixture +def eps(): + return 1e-5 + + +def test_inside_bins(grid): + assert find_bin(0.5, grid) == 0 + assert find_bin(1.5, grid) == 1 + assert find_bin(4.9, grid) == 2 + assert find_bin(9.9, grid) == 3 + + +def test_exact_interior_edges(grid): + assert find_bin(1.0, grid, 0.0, True) == 0 + assert find_bin(1.0, grid, 0.0, False) == 1 + assert find_bin(5.0, grid, 0.0, True) == 2 + assert find_bin(5.0, grid, 0.0, False) == 3 + + +def test_first_edge(grid): + assert find_bin(0.0, grid, 0.0, True) == -1 + assert find_bin(0.0, grid, 0.0, False) == 0 + + +def test_last_edge(grid): + assert find_bin(10.0, grid, 0.0, True) == 3 + assert find_bin(10.0, grid, 0.0, False) == -1 + + +def test_near_interior_edges_with_epsilon(grid, eps): + assert find_bin(1.0 - 1e-6, grid, eps, True) == 0 + assert find_bin(1.0 - 1e-6, grid, eps, False) == 1 + assert find_bin(1.0 + 1e-6, grid, eps, True) == 0 + assert find_bin(1.0 + 1e-6, grid, eps, False) == 1 + + +def test_near_first_edge_with_epsilon(grid, eps): + assert find_bin(0.0 + 1e-6, grid, eps, True) == -1 + assert find_bin(0.0 + 1e-6, grid, eps, False) == 0 + + +def test_near_last_edge_with_epsilon(grid, eps): + assert find_bin(10.0 - 1e-6, grid, eps, True) == 3 + assert find_bin(10.0 - 1e-6, grid, eps, False) == -1 + + +def test_out_of_range(grid): + assert find_bin(-1.0, grid) == -1 + assert find_bin(11.0, grid) == -1 diff --git a/mcdc/test/unit/transport/util/linear_interpolation.py b/mcdc/test/unit/transport/util/linear_interpolation.py new file mode 100644 index 000000000..f517c7068 --- /dev/null +++ b/mcdc/test/unit/transport/util/linear_interpolation.py @@ -0,0 +1,42 @@ +from mcdc.transport.util import linear_interpolation + + +def test_exact_endpoints(): + assert linear_interpolation(0, 0, 10, 0, 100) == 0 + assert linear_interpolation(10, 0, 10, 0, 100) == 100 + + +def test_midpoint(): + assert linear_interpolation(5, 0, 10, 0, 100) == 50 + assert linear_interpolation(2.5, 0, 10, 0, 100) == 25 + + +def test_negative_slopes(): + assert linear_interpolation(5, 0, 10, 100, 0) == 50 + assert linear_interpolation(2, 0, 4, 4, 0) == 2 + + +def test_non_uniform_interval(): + # Interval [2, 4] mapped to [10, 30] + assert linear_interpolation(3, 2, 4, 10, 30) == 20 + + +def test_floats(): + result = linear_interpolation(0.5, 0, 1, 0.0, 1.0) + assert abs(result - 0.5) < 1e-12 + + +def test_extrapolation(): + # x before x1 + assert linear_interpolation(-5, 0, 10, 0, 100) == -50 + # x beyond x2 + assert linear_interpolation(20, 0, 10, 0, 100) == 200 + + +def test_x1_equals_x2_raises_zero_division(): + try: + linear_interpolation(1, 2, 2, 0, 10) + except ZeroDivisionError: + assert True + else: + assert False, "Expected ZeroDivisionError when x1 == x2" diff --git a/mcdc/tools/data_library_generator/README.md b/mcdc/tools/data_library_generator/README.md new file mode 100644 index 000000000..43065cd76 --- /dev/null +++ b/mcdc/tools/data_library_generator/README.md @@ -0,0 +1,76 @@ +# MC/DC Data Library Generator + +Converts ACE-format nuclear data files into MC/DC's per-nuclide HDF5 format +for continuous-energy neutron transport. + +## Prerequisites + +```bash +pip install ACEtk h5py numpy tqdm +``` + +You need a collection of ACE files (e.g., from NJOY or an ENDF/B distribution). + +## Environment Variables + +| Variable | Description | +|---------------|-------------------------------------------------------| +| `MCDC_ACELIB` | Path to the directory containing your ACE files. | +| `MCDC_LIB` | Path to the output directory for MC/DC HDF5 files. | + +## Usage + +```bash +export MCDC_ACELIB=/path/to/ace/files +export MCDC_LIB=/path/to/mcdc/library + +python generate.py # Convert only missing nuclides +python generate.py --rewrite # Regenerate all files +python generate.py --verbose # Print detailed per-nuclide info +``` + +## What it Does + +For each ACE file in `$MCDC_ACELIB`, the generator: + +1. Reads the ACE header to identify the nuclide (Z, A, isomeric state) and temperature. +2. Extracts pointwise cross sections (elastic, capture, inelastic, fission) and the energy grid. +3. Extracts angular distributions (tabulated cosine PDFs) and energy distributions + (level scattering, evaporation, Maxwellian, Kalbach-Mann, N-body, tabulated) per reaction channel. +4. For fissionable nuclides, extracts prompt/delayed ν(E), precursor fractions, decay constants, and energy spectra. +5. Writes a single HDF5 file per nuclide-temperature combination (e.g., `U235-293.6K.h5`). + +## Output HDF5 Schema + +``` +-K.h5 +├── nuclide_name (string) +├── temperature (float, K) +├── atomic_weight_ratio (float) +├── fissionable (bool) +└── neutron_reactions/ + ├── xs_energy_grid (1-D array, MeV) + ├── elastic_scattering/ + │ └── MT-002/ + │ ├── xs (1-D array, barns) + │ ├── cosine/ + │ └── energy/ + ├── capture/ + │ └── MT-102/ ... + ├── inelastic_scattering/ + │ └── MT-051/ ... + └── fission/ + └── MT-018/ + ├── xs + ├── cosine/ + ├── energy/ + ├── nu_total/ + ├── nu_prompt/ + ├── nu_delayed/ + └── delayed_neutron/ ... +``` + +## See Also + +- [Continuous Energy Theory Guide](../../docs/source/theory/cont_energy.rst) +- [Installation Guide — CE Library Configuration](../../docs/source/install.rst) diff --git a/mcdc/tools/data_library_generator/generate.py b/mcdc/tools/data_library_generator/generate.py new file mode 100644 index 000000000..06a63dc76 --- /dev/null +++ b/mcdc/tools/data_library_generator/generate.py @@ -0,0 +1,484 @@ +import ACEtk +import argparse +import h5py +import numpy as np +import os + +from tqdm import tqdm + +#### + +import util +from util import print_error, print_note + +parser = argparse.ArgumentParser(description="MC/DC data generator") +parser.add_argument("--rewrite", dest="rewrite", action="store_true", default=False) +parser.add_argument("--verbose", dest="verbose", action="store_true", default=False) +args, unargs = parser.parse_known_args() +rewrite = args.rewrite +verbose = args.verbose + +# Directories +output_dir = os.getenv("MCDC_LIB") +ace_dir = os.getenv("MCDC_ACELIB") + +if output_dir is None: + print_error("Environment variable $MCDC_LIB is not set") +if ace_dir is None: + print_error("Environment variable $MCDC_ACELIB is not set") + +# Create output directory if needed +os.makedirs(output_dir, exist_ok=True) +print(f"\nACE directory: {ace_dir}") +print(f"Output directory: {output_dir}\n") + +# Select the files +if rewrite: + target_files = os.listdir(ace_dir) +else: + target_files = [] + for file_name in os.listdir(ace_dir): + # File header + with open(f"{ace_dir}/{file_name}", "r") as f: + header = ACEtk.Header.from_string(f.readline()) + + # Decode ACE name to MC/DC name + Z, A, S, T = util.decode_ace_name(header.zaid) + symbol = util.Z_TO_SYMBOL[Z] + nuclide_name = f"{symbol}{A}" if S == 0 else f"{symbol}{A}m{S}" + mcdc_name = f"{nuclide_name}-{T}K.h5" + + if not os.path.exists(f"{output_dir}/{mcdc_name}"): + target_files.append(file_name) + +# Loop over all files +pbar = tqdm( + target_files, + disable=verbose, + bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}{postfix}", +) +for ace_name in pbar: + # File header + with open(f"{ace_dir}/{ace_name}", "r") as f: + header = ACEtk.Header.from_string(f.readline()) + + # Decode ACE name to MC/DC name + Z, A, S, T = util.decode_ace_name(header.zaid) + symbol = util.Z_TO_SYMBOL[Z] + nuclide_name = f"{symbol}{A}" if S == 0 else f"{symbol}{A}m{S}" + mcdc_name = f"{nuclide_name}-{T}K.h5" + + if not rewrite and os.path.exists(f"{output_dir}/{mcdc_name}"): + continue + + # Create MC/DC file + if verbose: + print("\n" + "=" * 80 + "\n") + print(f"Create {mcdc_name} from {ace_name}\n") + pbar.set_postfix_str(f"{mcdc_name[:-3]} from {ace_name}") + file = h5py.File(f"{output_dir}/{mcdc_name}", "w") + + # ================================================================================== + # Basic properties + # ================================================================================== + + # Load ACE tables + ace_table = ACEtk.ContinuousEnergyTable.from_file(f"{ace_dir}/{ace_name}") + + # ACE data source description + header = ace_table.header + file.attrs["source_title"] = header.title + file.attrs["source_version"] = header.version + file.attrs["source_date"] = header.date + if "comments" in dir(header): + file.attrs["source_comments"] = header.comments + + # Name and excitation level + file.create_dataset("nuclide_name", data=nuclide_name) + file.create_dataset("excitation_level", data=S) + + # Temperature + temperature = file.create_dataset("temperature", data=T) + temperature.attrs["unit"] = "K" + + # Atomic number and weight ratio + atomic_number = ace_table.atom_number + atomic_weight_ratio = ace_table.atomic_weight_ratio + file.create_dataset("atomic_number", data=atomic_number) + file.create_dataset("atomic_weight_ratio", data=atomic_weight_ratio) + + # Fissionable? + fissionable = ace_table.fission_multiplicity_block is not None + file.create_dataset("fissionable", data=fissionable) + + # ================================================================================== + # Reaction groups + # ================================================================================== + # Elastic scattering: MT=2 + # Capture: Reactions with zero multiplicity + # Fission: MT=18 or MT=(19, 20, 21, and 38) if given + # Inelastic: Non-fission reactions with non-zero multiplicity + # Ignored: MT=(1, 3, 4, 10) and MT>117 + + reactions = file.create_group("neutron_reactions") + + # ACE blocks + nu_block = ace_table.frame_and_multiplicity_block + rx_block = ace_table.reaction_number_block + N_reaction = nu_block.number_reactions + + if nu_block.number_reactions != rx_block.number_reactions: + print_error("Non-equal reaction number in reaction and multiplicity blocks") + + # The groups + elastic_group = reactions.create_group("elastic_scattering") + capture_group = reactions.create_group("capture") + inelastic_group = reactions.create_group("inelastic_scattering") + fission_group = reactions.create_group("fission") + + # MT groups + elastic_MTs = [2] + capture_MTs = [] + inelastic_MTs = [] + fission_MTs = [] + + # Redundant MTs + fission_chance_MTs = [19, 20, 21, 38] + redundant_MTs = [1, 3, 4, 10] + + # Set fission MTs + total_fission_given = rx_block.has_MT(18) + if total_fission_given: + fission_MTs = [18] + # The component should not be given + for MT in fission_chance_MTs: + if rx_block.has_MT(MT): + print_error("Both total fission and its components are given") + else: + for MT in fission_chance_MTs: + if rx_block.has_MT(MT): + fission_MTs.append(MT) + + # Capture and inelastic MTs + for i in range(N_reaction): + idx = i + 1 + MT = rx_block.MT(idx) + + if MT in redundant_MTs + elastic_MTs + fission_MTs or MT > 117: + continue + + nu = nu_block.multiplicity(idx) + + if type(nu) != int: + print_error(f"Non-integer multiplicity for inelastic scattering") + + if nu == 0: + capture_MTs.append(MT) + elif nu > 0: + inelastic_MTs.append(MT) + else: + print_error(f"Negative multiplicity for MT-{MT:03}") + + # Create MTs + for rx_group, rx_MTs in [ + (elastic_group, elastic_MTs), + (capture_group, capture_MTs), + (inelastic_group, inelastic_MTs), + (fission_group, fission_MTs), + ]: + for MT in rx_MTs: + MT_group = rx_group.create_group(f"MT-{MT:03}") + MT_group.attrs["MT"] = MT + + # Report MT groups + if verbose: + print(f" Reaction group MTs") + print(f" - Elastic scattering MTs: {elastic_MTs}") + print(f" - Capture MTs: {capture_MTs}") + print(f" - Inelastic scattering MTs: {inelastic_MTs}") + if fissionable: + print(f" - Fission MT: {fission_MTs}") + + # Delete empty groups + if not fissionable: + del file["neutron_reactions/fission"] + if len(inelastic_MTs) == 0: + del file["neutron_reactions/inelastic_scattering"] + + # ================================================================================== + # Cross-sections + # ================================================================================== + + xs0_block = ace_table.principal_cross_section_block + xs_block = ace_table.cross_section_block + + xs_energy = xs0_block.energies + xs_elastic = xs0_block.elastic + cross_sections = xs_block.cross_sections + offsets = xs_block.energy_index + + # Energy grid + xs_energy = np.array(xs_energy) + dataset = reactions.create_dataset("xs_energy_grid", data=xs_energy) + dataset.attrs["unit"] = "MeV" + + # Elastic scattering + xs = elastic_group.create_dataset("MT-002/xs", data=xs_elastic) + xs.attrs["offset"] = 0 + xs.attrs["unit"] = "barns" + + # Capture, inelastic scattering, and fission + for MTs, group in [ + (capture_MTs, capture_group), + (inelastic_MTs, inelastic_group), + (fission_MTs, fission_group), + ]: + for MT in MTs: + idx = rx_block.index(MT) + xs = group.create_dataset(f"MT-{MT:03}/xs", data=cross_sections(idx)) + xs.attrs["offset"] = offsets(idx) - 1 + xs.attrs["unit"] = "barns" + + # ================================================================================== + # Q-value + # ================================================================================== + + q_value_block = ace_table.reaction_qvalue_block + + # Elastic scattering: zero Q-value + for MT in elastic_MTs: + dataset = elastic_group.create_dataset(f"MT-{MT:03}/Q-value", data=0.0) + dataset.attrs["unit"] = "MeV" + + for MTs, group in [ + (capture_MTs, capture_group), + (inelastic_MTs, inelastic_group), + (fission_MTs, fission_group), + ]: + for MT in MTs: + idx = rx_block.index(MT) + dataset = group.create_dataset( + f"MT-{MT:03}/Q-value", data=q_value_block.q_value(idx) + ) + dataset.attrs["unit"] = "MeV" + + # ================================================================================== + # Reference frames and inelastic scattering multiplicities + # ================================================================================== + # Elastic is always in COM frame (per ACE standard) + + # Elastic scattering reference frame + for MT in elastic_MTs: + elastic_group.create_dataset(f"MT-{MT:03}/reference_frame", data="COM") + + # Reference frames of the others + for MTs, group in [ + (capture_MTs, capture_group), + (inelastic_MTs, inelastic_group), + (fission_MTs, fission_group), + ]: + for MT in MTs: + idx = rx_block.index(MT) + reference_frame = nu_block.reference_frame(idx) + if reference_frame == ACEtk.ReferenceFrame.Laboratory: + reference_frame = "LAB" + elif reference_frame == ACEtk.ReferenceFrame.CentreOfMass: + reference_frame = "COM" + else: + print_error(f"Unknown reaction reference frame type for MT-{MT:03}") + group.create_dataset(f"MT-{MT:03}/reference_frame", data=reference_frame) + + # Inelastic multiplicity + for MT in inelastic_MTs: + idx = rx_block.index(MT) + nu = nu_block.multiplicity(idx) + inelastic_group.create_dataset(f"MT-{MT:03}/multiplicity", data=nu) + + # ================================================================================== + # Angular distributions + # ================================================================================== + + angle_block = ace_table.angular_distribution_block + + # Elastic scattering + angle_group = elastic_group.create_group("MT-002/angular_cosine_distribution") + data = angle_block.angular_distribution_data(0) + for subdata in data.distributions: + if not isinstance(subdata, ACEtk.continuous.TabulatedAngularDistribution): + print_error("Unsupported elastic scattering angular distribution") + util.load_cosine_distribution(data, angle_group) + + # Inelastic scattering and fission + for MTs, group in [ + (inelastic_MTs, inelastic_group), + (fission_MTs, fission_group), + ]: + for MT in MTs: + idx = rx_block.index(MT) + angle_group = group.create_group(f"MT-{MT:03}/angular_cosine_distribution") + data = angle_block.angular_distribution_data(idx) + util.load_cosine_distribution(data, angle_group) + + # ================================================================================== + # Energy distributions + # ================================================================================== + + energy_block = ace_table.energy_distribution_block + + for MTs, group in [ + (inelastic_MTs, inelastic_group), + (fission_MTs, fission_group), + ]: + for MT in MTs: + idx = rx_block.index(MT) + data = energy_block.energy_distribution_data(idx) + + if not isinstance(data, ACEtk.continuous.MultiDistributionData): + # Probabilities + dataset = group.create_dataset( + f"MT-{MT:03}/spectrum_probability_grid", data=np.array([0.0, 30.0]) + ) + dataset.attrs["unit"] = "MeV" + dataset = group.create_dataset( + f"MT-{MT:03}/spectrum_probability", data=np.array([[1.0]]) + ) + + # The distributions + energy_group = group.create_group(f"MT-{MT:03}/energy_spectrum-1") + util.load_energy_distribution(data, energy_group) + + else: + N_dist = data.number_distributions + + # ====================================================================== + # Probabilities + # ====================================================================== + + # Constant probability + if all( + np.array( + [x.number_interpolation_regions for x in data.probabilities] + ) + == 0 + ): + probability_grid = np.array([0.0, 30.0]) + probability = np.zeros((1, N_dist)) + for i in range(N_dist): + probability[0, i] = max(data.probability(i + 1).probabilities) + + # Histogram probability + elif all( + np.array( + [x.number_interpolation_regions for x in data.probabilities] + ) + == 1 + ) and all(np.array([x.interpolants for x in data.probabilities]) == 1): + probability_grid = np.array(data.probability(1).energies) + probability = np.zeros((len(probability_grid) - 1, N_dist)) + for i in range(N_dist): + if not all( + probability_grid + == np.array(data.probability(i + 1).energies) + ): + print_error("Unsupported multi-distribution energy spetrum") + probability[:, i] = np.array( + data.probability(i + 1).probabilities[:-1] + ) + + else: + print_error("Unsupported multi-distribution energy spetrum") + + dataset = group.create_dataset( + f"MT-{MT:03}/spectrum_probability_grid", data=probability_grid + ) + dataset.attrs["unit"] = "MeV" + dataset = group.create_dataset( + f"MT-{MT:03}/spectrum_probability", data=probability + ) + + # ====================================================================== + # The disributions + # ====================================================================== + + for i in range(N_dist): + energy_group = group.create_group( + f"MT-{MT:03}/energy_spectrum-{i+1}" + ) + distribution = data.distribution(i + 1) + util.load_energy_distribution(distribution, energy_group) + + # Fissionable zone below + if not fissionable: + continue + + # ================================================================================== + # Fission multiplicities and delayed neutron precursor fractions and decay rates + # ================================================================================== + + prompt_block = ace_table.fission_multiplicity_block + delayed_block = ace_table.delayed_fission_multiplicity_block + dnp_block = ace_table.delayed_neutron_precursor_block + + # Prompt multiplicity + data = prompt_block.multiplicity + h5_group = fission_group.create_group("prompt_multiplicity") + util.load_fission_multiplicity(data, h5_group) + + # Delayed multiplicity + if delayed_block is not None: + data = delayed_block.multiplicity + h5_group = fission_group.create_group("delayed_multiplicity") + util.load_fission_multiplicity(data, h5_group) + + # Delayed neutron precursor fractions and decay rates + if dnp_block is not None: + N_DNP = dnp_block.number_delayed_precursors + fractions = np.zeros(N_DNP) + decay_rates = np.zeros(N_DNP) + + for i in range(N_DNP): + idx = 1 + 1 + data = dnp_block.precursor_group_data(idx) + + if ( + not data.number_interpolation_regions == 0 + or not len(data.probabilities[:]) == 2 + or not data.probabilities[0] == data.probabilities[1] + ): + print_error("Non-constant delayed neutron precursor fraction") + + fractions[i] = data.probabilities[0] + decay_rates[i] = data.decay_constant + + precursors = fission_group.create_group("delayed_neutron_precursors") + precursors.create_dataset("fractions", data=fractions) + decay_rates = precursors.create_dataset("decay_rates", data=decay_rates) + decay_rates.attrs["unit"] = "/s" + + # ================================================================================== + # Delayed fission spectra + # ================================================================================== + + delayed_spectrum_block = ace_table.delayed_neutron_energy_distribution_block + if dnp_block is not None: + N_DNP = dnp_block.number_delayed_precursors + + for i in range(N_DNP): + idx = 1 + 1 + data = delayed_spectrum_block.energy_distribution_data(idx) + + if not isinstance(data, ACEtk.continuous.OutgoingEnergyDistributionData): + print_error(f"Unsupported delayed fission neutron spectrum: {data}") + + energy_group = fission_group.create_group( + f"delayed_neutron_precursors/energy_spectrum-{i+1}" + ) + util.load_energy_distribution(data, energy_group) + + # ================================================================================== + # Finalize + # ================================================================================== + + file.close() + +print("") diff --git a/mcdc/tools/data_library_generator/util.py b/mcdc/tools/data_library_generator/util.py new file mode 100644 index 000000000..eca313517 --- /dev/null +++ b/mcdc/tools/data_library_generator/util.py @@ -0,0 +1,459 @@ +import ACEtk +import h5py +import numpy as np + + +def print_error(message): + print(f"\n [ERROR]: {message}\n") + exit() + + +def print_note(message): + print(f"\n [NOTE]: {message}\n") + + +def decode_interpolation(code): + if code not in INTERPOLATION_MAP.keys(): + print_error(f"Unsupported interpolation law: {code}") + return INTERPOLATION_MAP[code] + + +def decode_ace_name(name: str): + """ + Decode an ACE file name into atomic number Z, mass number A, excitation state S, + following the rule: + ZAID = 1000*Z + A, (ground state), + ZAID = 1000*Z + A + 300 + 100*S, (excited, S >= 1), + and temperature T. + Returns (Z, A, S, T) + """ + zaid, extension = name.split(".") + + zaid = int(zaid) + Z = zaid // 1000 + remainder = zaid % 1000 + + if remainder < 300: + # ground state + A = remainder + S = 0 + else: + # excited state + offset = remainder - 300 + S = offset // 100 + A = offset % 100 + + T = ACE_TEMPERATURE_LIB81[extension] + + return Z, A, S, T + + +def get_zaid(nuclide_name): + nuclide_name = nuclide_name.strip().capitalize() + + # Find where the letters end and digits begin + symbol = "" + mass = 0 + for i, ch in enumerate(nuclide_name): + if ch.isdigit(): + symbol = nuclide_name[:i] + mass = int(nuclide_name[i:]) + break + else: + raise ValueError(f"No mass number found in '{nuclide_name}'") + + if symbol not in Z_MAP.keys(): + raise ValueError(f"Unknown element symbol '{symbol}'") + + Z = Z_MAP[symbol] + A = mass + return Z, A + + +def get_ace_name(Z, A, T, S=None): + ID = Z * 1000 + A + if S is not None: + ID += 300 + S * 100 + extension = ACE_EXTENSION_LIB81[T] + return f"{ID}{extension}" + + +def load_fission_multiplicity(data, h5_group: h5py.Group): + # Polynomial + if data.type == 1: + h5_group.attrs["type"] = "polynomial" + + C = np.array(data.coefficients) + dataset = h5_group.create_dataset("coefficient", data=C) + dataset.attrs["unit-base"] = "MeV" + + # Tabulated + elif data.type == 2: + h5_group.attrs["type"] = "tabulated" + + if not data.interpolation_data.is_linear_linear: + print(f"[ERROR] Non linear-linear tabulated multiplicity is not supported") + exit() + + energy = np.array(data.energies) + + h5_group.create_dataset("value", data=data.multiplicities) + dataset = h5_group.create_dataset("energy", data=energy) + dataset.attrs["unit"] = "MeV" + + ## Yield - Unsupported + else: + print(f"[ERROR] Unsupported multiplicity type: {data.type}") + exit() + + +def load_cosine_distribution(data, h5_group: h5py.Group): + if isinstance(data, ACEtk.continuous.FullyIsotropicDistribution): + h5_group.attrs["type"] = "isotropic" + + elif isinstance(data, ACEtk.continuous.DistributionGivenElsewhere): + h5_group.attrs["type"] = "energy-correlated" + + else: + h5_group.attrs["type"] = "tabulated" + + # Check distribution support: all tabulated + NE = data.number_incident_energies + for i in range(NE): + idx = i + 1 + if data.distribution_type(idx) != ACEtk.AngularDistributionType.Tabulated: + print_error("Angular distribution is not all-tabulated") + + # Incident energy + energy = np.array(data.incident_energies) + energy = h5_group.create_dataset("energy", data=energy) + energy.attrs["unit"] = "MeV" + + # Tabulated disstributions + interpolation = np.zeros(NE, dtype=int) + offset = np.zeros(NE, dtype=int) + cosine = [] + pdf = [] + for i, distribution in enumerate(data.distributions): + interpolation[i] = distribution.interpolation + offset[i] = len(cosine) + cosine.extend(distribution.cosines) + pdf.extend(distribution.pdf) + cosine = np.array(cosine) + pdf = np.array(pdf) + h5_group.create_dataset("offset", data=offset) + h5_group.create_dataset("value", data=cosine) + h5_group.create_dataset("pdf", data=pdf) + + if not all(interpolation == 2): + print_error("Angular distribution is not linearly-iterpolable") + + +def load_energy_distribution(data, h5_group: h5py.Group): + if isinstance(data, ACEtk.continuous.LevelScatteringDistribution): + h5_group.attrs["type"] = "level-scattering" + + C1 = np.array(data.C1) + C1 = h5_group.create_dataset("C1", data=C1) + C1.attrs["unit"] = "MeV" + + h5_group.create_dataset("C2", data=data.C2) + + elif isinstance(data, ACEtk.continuous.EvaporationSpectrum): + h5_group.attrs["type"] = "evaporation" + + if not data.interpolation_data.is_linear_linear: + print_error( + "Evaporation distribution temperature is not linearly interpolable" + ) + + energy = np.array(data.energies) + temperature = np.array(data.temperatures) + restriction_energy = np.array(data.restriction_energy) + + dataset = h5_group.create_dataset("temperature_energy_grid", data=energy) + dataset.attrs["unit"] = "MeV" + dataset = h5_group.create_dataset("temperature", data=temperature) + dataset.attrs["unit"] = "MeV" + dataset = h5_group.create_dataset("restriction_energy", data=restriction_energy) + dataset.attrs["unit"] = "MeV" + + elif isinstance(data, ACEtk.continuous.SimpleMaxwellianFissionSpectrum): + h5_group.attrs["type"] = "maxwellian" + + if all(np.array(data.interpolation_data.interpolants) == 2): + interpolation = "linear" + elif all(np.array(data.interpolation_data.interpolants) == 5): + interpolation = "log" + else: + print_error( + "Unsupported temperature interpolation law in Maxwellian distribution" + ) + + energy = np.array(data.energies) + temperature = np.array(data.temperatures) + restriction_energy = np.array(data.restriction_energy) + + h5_group.create_dataset("temperature_interpolation", data=interpolation) + dataset = h5_group.create_dataset("temperature_energy_grid", data=energy) + dataset.attrs["unit"] = "MeV" + dataset = h5_group.create_dataset("temperature", data=temperature) + dataset.attrs["unit"] = "MeV" + dataset = h5_group.create_dataset("restriction_energy", data=restriction_energy) + dataset.attrs["unit"] = "MeV" + + elif isinstance(data, ACEtk.continuous.OutgoingEnergyDistributionData): + h5_group.attrs["type"] = "tabulated" + + if not data.interpolation_data.is_linear_linear: + print_error( + "Non-linearly-interpolated energy distribution is not supported" + ) + + # Incident energy + energy = np.array(data.incident_energies) + energy = h5_group.create_dataset("energy", data=energy) + energy.attrs["unit"] = "MeV" + + # Tabulated disstributions + NE = data.number_incident_energies + offset = np.zeros(NE, dtype=int) + energy_out = [] + pdf = [] + for i in range(NE): + distribution = data.distribution(i + 1) + offset[i] = len(energy_out) + energy_out.extend(distribution.outgoing_energies) + pdf.extend(distribution.pdf) + + energy_out = np.array(energy_out) + pdf = np.array(pdf) + + h5_group.create_dataset("offset", data=offset) + dataset = h5_group.create_dataset("value", data=energy_out) + dataset.attrs["unit"] = ["MeV"] + h5_group.create_dataset("pdf", data=pdf) + + elif isinstance(data, ACEtk.continuous.KalbachMannDistributionData): + h5_group.attrs["type"] = "kalbach-mann" + + if not data.interpolation_data.is_linear_linear: + print_error("Non-linearly-interpolated kalbach-mann is not supported") + + # Check distribution support: all kalbach-mann + NE = data.number_incident_energies + + # Incident energy + energy = np.array(data.incident_energies) + energy = h5_group.create_dataset("energy", data=energy) + energy.attrs["unit"] = "MeV" + + # Tabulated distributions + offset = np.zeros(NE, dtype=int) + energy_out = [] + pdf = [] + precompound_factor = [] + angular_slope = [] + for i, distribution in enumerate(data.distributions): + offset[i] = len(pdf) + energy_out.extend(distribution.outgoing_energies) + pdf.extend(distribution.pdf) + precompound_factor.extend(distribution.precompound_fraction_values) + angular_slope.extend(distribution.angular_distribution_slope_values) + + energy_out = np.array(energy_out) + pdf = np.array(pdf) + precompound_factor = np.array(precompound_factor) + angular_slope = np.array(angular_slope) + + h5_group.create_dataset("offset", data=offset) + dataset = h5_group.create_dataset("energy_out", data=energy_out) + dataset.attrs["unit"] = "MeV" + h5_group.create_dataset("pdf", data=pdf) + h5_group.create_dataset("precompound_factor", data=precompound_factor) + h5_group.create_dataset("angular_slope", data=angular_slope) + + elif isinstance(data, ACEtk.continuous.EnergyAngleDistributionData): + h5_group.attrs["type"] = "energy-angle-tabulated" + + if not data.interpolation_data.is_linear_linear: + print_error( + "Non-linearly-interpolated correlated-energy-angle is not supported" + ) + + # Check distribution support: all kalbach-mann + NE = data.number_incident_energies + + # Incident energy + energy = np.array(data.incident_energies) + dataset = h5_group.create_dataset("energy", data=energy) + dataset.attrs["unit"] = "MeV" + + # Tabulated distributions + offset = np.zeros(NE, dtype=int) + energy_out = [] + pdf = [] + cosine_offset = [] + cosine = [] + cosine_pdf = [] + for i, distribution in enumerate(data.distributions): + offset[i] = len(pdf) + energy_out.extend(distribution.outgoing_energies) + pdf.extend(distribution.pdf) + + for inner_distribution in distribution.distributions: + cosine_offset.append(len(cosine_pdf)) + cosine.extend(inner_distribution.cosines) + cosine_pdf.extend(inner_distribution.pdf) + + energy_out = np.array(energy_out) + pdf = np.array(pdf) + cosine_offset = np.array(cosine_offset) + cosine = np.array(cosine) + cosine_pdf = np.array(cosine_pdf) + + h5_group.create_dataset("offset", data=offset) + dataset = h5_group.create_dataset("energy_out", data=energy_out) + dataset.attrs["unit"] = "MeV" + h5_group.create_dataset("pdf", data=pdf) + h5_group.create_dataset("cosine_offset", data=cosine_offset) + h5_group.create_dataset("cosine", data=cosine) + h5_group.create_dataset("cosine_pdf", data=cosine_pdf) + + elif isinstance(data, ACEtk.continuous.NBodyPhaseSpaceDistribution): + h5_group.attrs["type"] = "N-body" + + if data.interpolation != 2: + print_error("Non-linearly-interpolable N-body energy distribution") + + dataset = h5_group.create_dataset("value", data=data.values) + dataset.attrs["unit"] = "MeV" + h5_group.create_dataset("pdf", data=data.pdf) + + else: + print_error(f"Unsupported energy distribution: {data}") + + +# ====================================================================================== +# Constants +# ====================================================================================== + +INTERPOLATION_MAP = {2: "linear-linear"} + +ACE_TEMPERATURE_LIB81 = { + "10c": 293.6, + "11c": 600.0, + "12c": 900.0, + "13c": 1200.0, + "14c": 2500.0, + "15c": 0.1, + "16c": 233.15, + "17c": 273.15, +} + +TEMPERATURE_TO_ACELIB81 = {value: key for key, value in ACE_TEMPERATURE_LIB81.items()} + +SYMBOL_TO_Z = { + "H": 1, + "He": 2, + "Li": 3, + "Be": 4, + "B": 5, + "C": 6, + "N": 7, + "O": 8, + "F": 9, + "Ne": 10, + "Na": 11, + "Mg": 12, + "Al": 13, + "Si": 14, + "P": 15, + "S": 16, + "Cl": 17, + "Ar": 18, + "K": 19, + "Ca": 20, + "Sc": 21, + "Ti": 22, + "V": 23, + "Cr": 24, + "Mn": 25, + "Fe": 26, + "Co": 27, + "Ni": 28, + "Cu": 29, + "Zn": 30, + "Ga": 31, + "Ge": 32, + "As": 33, + "Se": 34, + "Br": 35, + "Kr": 36, + "Rb": 37, + "Sr": 38, + "Y": 39, + "Zr": 40, + "Nb": 41, + "Mo": 42, + "Tc": 43, + "Ru": 44, + "Rh": 45, + "Pd": 46, + "Ag": 47, + "Cd": 48, + "In": 49, + "Sn": 50, + "Sb": 51, + "Te": 52, + "I": 53, + "Xe": 54, + "Cs": 55, + "Ba": 56, + "La": 57, + "Ce": 58, + "Pr": 59, + "Nd": 60, + "Pm": 61, + "Sm": 62, + "Eu": 63, + "Gd": 64, + "Tb": 65, + "Dy": 66, + "Ho": 67, + "Er": 68, + "Tm": 69, + "Yb": 70, + "Lu": 71, + "Hf": 72, + "Ta": 73, + "W": 74, + "Re": 75, + "Os": 76, + "Ir": 77, + "Pt": 78, + "Au": 79, + "Hg": 80, + "Tl": 81, + "Pb": 82, + "Bi": 83, + "Po": 84, + "At": 85, + "Rn": 86, + "Fr": 87, + "Ra": 88, + "Ac": 89, + "Th": 90, + "Pa": 91, + "U": 92, + "Np": 93, + "Pu": 94, + "Am": 95, + "Cm": 96, + "Bk": 97, + "Cf": 98, + "Es": 99, + "Fm": 100, +} + +Z_TO_SYMBOL = {value: key for key, value in SYMBOL_TO_Z.items()}