From f2211fb04d0dac728b985e704d1dc35279a24581 Mon Sep 17 00:00:00 2001 From: saumya1317 Date: Mon, 27 Oct 2025 02:31:17 +0530 Subject: [PATCH 1/5] feat: add weekly/monthly rebalancing option (#4) - Update _should_rebalance to respect W and M frequencies - Refactor run method to use conditional rebalancing logic - Add comprehensive tests for D/W/M rebalancing - Document rebalance_freq parameter in README - All tests pass; daily default unchanged --- README.md | 26 ++++++- .../backtest/vectorized.py | 57 +++++++++++--- tests/test_backtest.py | 74 +++++++++++++++++++ 3 files changed, 144 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index c6d50e8e..c6434c77 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ QuantResearchStarter aims to provide a clean, well-documented starting point for * **Data management** — download market data or generate synthetic price series for experiments. * **Factor library** — example implementations of momentum, value, size, and volatility factors. -* **Vectorized backtesting engine** — supports transaction costs, slippage, and portfolio constraints. +* **Vectorized backtesting engine** — supports transaction costs, slippage, portfolio constraints, and configurable rebalancing frequencies (daily, weekly, monthly). * **Risk & performance analytics** — returns, drawdowns, Sharpe, turnover, and other risk metrics. * **CLI & scripts** — small tools to generate data, compute factors, and run backtests from the terminal. * **Production-ready utilities** — type hints, tests, continuous integration, and documentation scaffolding. @@ -93,6 +93,30 @@ results = bt.run() print(results.performance.summary()) ``` +### Rebalancing Frequency + +The backtester supports different rebalancing frequencies to match your strategy needs: + +```python +from quant_research_starter.backtest import VectorizedBacktest + +# Daily rebalancing (default) +bt_daily = VectorizedBacktest(prices, signals, rebalance_freq="D") + +# Weekly rebalancing (reduces turnover and transaction costs) +bt_weekly = VectorizedBacktest(prices, signals, rebalance_freq="W") + +# Monthly rebalancing (lowest turnover) +bt_monthly = VectorizedBacktest(prices, signals, rebalance_freq="M") + +results = bt_monthly.run() +``` + +Supported frequencies: +- `"D"`: Daily rebalancing (default) +- `"W"`: Weekly rebalancing (rebalances when the week changes) +- `"M"`: Monthly rebalancing (rebalances when the month changes) + > The code above is illustrative—see `examples/` for fully working notebooks and scripts. --- diff --git a/src/quant_research_starter/backtest/vectorized.py b/src/quant_research_starter/backtest/vectorized.py index f5ea7826..5537dbc5 100644 --- a/src/quant_research_starter/backtest/vectorized.py +++ b/src/quant_research_starter/backtest/vectorized.py @@ -64,16 +64,26 @@ def run(self, weight_scheme: str = "rank") -> Dict: """ print("Running backtest...") - # Vectorized returns-based backtest with daily rebalancing + # Vectorized returns-based backtest with configurable rebalancing returns_df = self.prices.pct_change().dropna() aligned_signals = self.signals.loc[returns_df.index] - # Compute daily target weights from signals - weights = aligned_signals.apply( - lambda row: self._calculate_weights(row, weight_scheme), axis=1 - ) - # Ensure full DataFrame with same columns order - weights = weights.reindex(columns=self.prices.columns).fillna(0.0) + # Track rebalancing + prev_rebalance_date = None + current_weights = pd.Series(0.0, index=self.prices.columns) + + # Compute daily weights from signals (rebalance only on rebalance dates) + weights_list = [] + for date in returns_df.index: + if self._should_rebalance(date, prev_rebalance_date): + # Rebalance: compute new target weights + current_weights = self._calculate_weights(aligned_signals.loc[date], weight_scheme) + prev_rebalance_date = date + + # Append current weights (maintain between rebalances) + weights_list.append(current_weights) + + weights = pd.DataFrame(weights_list, index=returns_df.index, columns=self.prices.columns).fillna(0.0) # Previous day weights for PnL calculation weights_prev = weights.shift(1).fillna(0.0) @@ -104,11 +114,34 @@ def run(self, weight_scheme: str = "rank") -> Dict: return self._generate_results() - def _should_rebalance(self, date: pd.Timestamp) -> bool: - """Check if we should rebalance on given date.""" - # Simple daily rebalancing for now - # Could be extended for weekly/monthly rebalancing - return True + def _should_rebalance(self, date: pd.Timestamp, prev_rebalance_date: Optional[pd.Timestamp] = None) -> bool: + """Check if we should rebalance on given date. + + Args: + date: Current date to check + prev_rebalance_date: Last rebalance date (None for first rebalance) + + Returns: + True if should rebalance, False otherwise + """ + # Always rebalance on first date + if prev_rebalance_date is None: + return True + + if self.rebalance_freq == "D": + # Daily rebalancing + return True + elif self.rebalance_freq == "W": + # Weekly rebalancing - rebalance if week changed + return date.isocalendar()[1] != prev_rebalance_date.isocalendar()[1] or \ + date.year != prev_rebalance_date.year + elif self.rebalance_freq == "M": + # Monthly rebalancing - rebalance if month changed + return date.month != prev_rebalance_date.month or \ + date.year != prev_rebalance_date.year + else: + raise ValueError(f"Unsupported rebalance frequency: {self.rebalance_freq}. " + f"Supported frequencies: 'D' (daily), 'W' (weekly), 'M' (monthly)") def _calculate_weights(self, signals: pd.Series, scheme: str) -> pd.Series: """Convert signals to portfolio weights.""" diff --git a/tests/test_backtest.py b/tests/test_backtest.py index 1347cbf3..4dd95f37 100644 --- a/tests/test_backtest.py +++ b/tests/test_backtest.py @@ -125,3 +125,77 @@ def test_transaction_costs(self, sample_data): # With costs should have lower final value (or equal) assert results_with_cost["final_value"] <= results_no_cost["final_value"] + + def test_rebalance_frequency_daily(self, sample_data): + """Test daily rebalancing (default behavior).""" + prices, signals = sample_data + backtest = VectorizedBacktest(prices, signals, rebalance_freq="D") + results = backtest.run() + + # Check that backtest runs successfully + assert results["final_value"] > 0 + assert len(results["portfolio_value"]) == len(prices) + + def test_rebalance_frequency_weekly(self, sample_data): + """Test weekly rebalancing.""" + prices, signals = sample_data + backtest = VectorizedBacktest(prices, signals, rebalance_freq="W") + results = backtest.run() + + # Check that backtest runs successfully + assert results["final_value"] > 0 + assert len(results["portfolio_value"]) == len(prices) + + # Weekly rebalancing should result in fewer position changes + # Count the number of times weights change + positions = results["positions"] + position_changes = (positions.diff().abs().sum(axis=1) > 0).sum() + + # Should be significantly fewer than daily (100 days) + # Approximately ~14 weeks in 100 days + assert position_changes < len(prices) - 1 + + def test_rebalance_frequency_monthly(self, sample_data): + """Test monthly rebalancing.""" + prices, signals = sample_data + backtest = VectorizedBacktest(prices, signals, rebalance_freq="M") + results = backtest.run() + + # Check that backtest runs successfully + assert results["final_value"] > 0 + assert len(results["portfolio_value"]) == len(prices) + + # Monthly rebalancing should result in fewer position changes than weekly + positions = results["positions"] + position_changes = (positions.diff().abs().sum(axis=1) > 0).sum() + + # Should be significantly fewer than daily + # Approximately ~3 months in 100 days + assert position_changes < len(prices) - 1 + + def test_rebalance_frequency_invalid(self, sample_data): + """Test that invalid rebalance frequency raises error.""" + prices, signals = sample_data + backtest = VectorizedBacktest(prices, signals, rebalance_freq="X") + + with pytest.raises(ValueError, match="Unsupported rebalance frequency"): + backtest.run() + + def test_rebalance_reduces_turnover(self, sample_data): + """Test that less frequent rebalancing reduces turnover.""" + prices, signals = sample_data + + # Daily rebalancing + backtest_daily = VectorizedBacktest(prices, signals, rebalance_freq="D", transaction_cost=0.001) + results_daily = backtest_daily.run() + + # Monthly rebalancing + backtest_monthly = VectorizedBacktest(prices, signals, rebalance_freq="M", transaction_cost=0.001) + results_monthly = backtest_monthly.run() + + # Count position changes as proxy for turnover + daily_changes = (results_daily["positions"].diff().abs().sum(axis=1) > 0).sum() + monthly_changes = (results_monthly["positions"].diff().abs().sum(axis=1) > 0).sum() + + # Monthly should have fewer rebalances + assert monthly_changes < daily_changes From 34917f86c72654570bbc3b8cbbe76ed05356cf03 Mon Sep 17 00:00:00 2001 From: saumya1317 Date: Mon, 27 Oct 2025 02:34:39 +0530 Subject: [PATCH 2/5] chore: update build artifacts and coverage data --- .coverage | Bin 53248 -> 53248 bytes src/quant_research_starter.egg-info/PKG-INFO | 197 ++++++++++++++++-- .../SOURCES.txt | 2 + 3 files changed, 185 insertions(+), 14 deletions(-) diff --git a/.coverage b/.coverage index 8585ff2429b17c697b5b83e6dc8bc01a68dce58b..75041d0e7d1ba93230cda4eda50e51f926d1a481 100644 GIT binary patch literal 53248 zcmeI4UyR&F9mnmpZ@u1sv&khKY1+g{w3kaRms~+YBt>n=9U>%ZnkK2LWYdk;J9p>Y zuDxe_FF7RCZdwru2_BFtpsHwiLP7`u5k-o4BBBogBtnH$@c;!?G*Sf=2@o`u!f*WN zb~njYd5F^_e68CZk3BQr`F&=7GoJO?2M_GC18&T?Uds%OPe>I>mZirGLy{z&J|+4j zn?^fo5}{D;+h1u{mnObER$@;{#q3ul_DpF%+gg0N^ibhav88{d@NLc1Ejob>1V8`; zK;ZwIK=(jV8(FtbKK-S@tT%b!nHKk=@6zrkr*|EiHV*Cj?7nFuI%aIj8?&nKgnro}}*hydfq!I!05SNH`JZ7kf3kLAe4xOGUIjyJdPy z#ynpd4->>{0Y4Q)2dEHk&pIN7u_?|zW_Wytd)%?OAJt-$-5AS{J^yT88yOy!&+yPD z)3fNWFklTG=$W!-h6=J=uOVt*_e{r{$ouJc$72N>Z8-ke1n{E@;LJhRnBn}ti6mvnlguV zb~>#(AwSV+wMFAljz~CgSIW6k-oTBGz3H~OL0k6h9ca!3xoFWSSI4mzI()hD$Bkaj z@z4y%9XEs#)B_$YWJf+dEKAfY`6+*)N!MF7vlF;syhe>*o20Me3#)S4$i|KGnT4>U z2t2jA9r(*T`aq{yKDJBje3X47zILYEP_ji_;`H~0I@wgiUh2}d*B zI3d;aL+x#%vQ!77-gN7OeJ$)UJ|_pEu^>q{({U`}D!#D6au>O0&hm-)Y&0x<*d2+w z&ZM3lnclck*IDiYwaMk(e0N<|8(F_zKCOq{C~mDd&<46t(vmkAMw3%5Zu%WB8Av{8 zkW6y)M#;oS;xlB*#OZ32@lLL*soKcewQ@J|2r({+&(MGo;z-X0u9*mK zzWQx5m@}@gyEsf0A05h$>|DE2#YGirlj1x3{LxHUqG9(Gb5F0nQ31Vp!k$u%g~`dipKyKO zrfF47TmFvZs4+Tc`eGFFM~%AcHo56Ua|7krk zQEJO`=QPxJbY>~U2*cqd7rsmV6-ZGPF$I;9{C%NWiufOc40hg|J`umAs*M~zy zBLy+IMJ-s5C?u)kYn}XgT0mS|PHJT-{Z?Ff;KPvAo{kBvqzFT~@xVmt*ur2@d{2~2MdRtd=ujF=UzoCTK zKmY_l00ck)1a5}F{2Ep29^Jj8df2B28mqq9X)RUhDZVMsUgXs&?#~BqySmUZP16ag z2RiiqB=^=Y&X`uVwk(ia1L5WzcCivBA zB)Cu}!P&F~lis)JHtBsE^lF18l5Cbp@>uGU;+bjDe8n`IFEP-!zbM4qLYmE&h_&1n zcY;of1lxHM6v%Ye(q43NJGhnd(w&Yd=hM`B$%4Jhk|_#=qaRTr`3RU z)0z*sA5<5)m{!T@8)RWSLz<~M;j(6*m9sJ_9!$&1J|S`ce~s3V*f-fjwnJe)E8)Izy<;!00JNY0w8cx1e$91sGQbZo~#+- z1@jtP?H*1|EW8+tm4+K?cW-J+vB+vm?e0lUBwGKkO0|8GC3(F5ue8;!m6}0`SlvcT<;eNCW~P00JNY0w4eaAOHd&00JNY0(UC` zMOLJexc@J+_a*wn1_B@e0w4eaAOHd&00JNY0w4eaAaD;7P?VA`?*CWVu*Cku-etdL zzhFOM-=lW{e2txEi)@Y^W&7A8?30uL8wh{^2!H?xfB*=900@8p2!H?x+*<@TR#aJk zta9Sjch3F(_5D{b|KVWuS1-K%$o-H0?x|NM&b>dH$ude%W}C}p3OAIGWS^dX{H+Vc zOsPci`%6c%$GiI;e=S=mQuL$6BiX`KLn#(0a$jMy^2Qe?U)q(=Q|u%8i$8pEkDS-V zzJBE2pL;^laF8_W1Gv}m-{eutx_pL3`{7(;^Klk><>XqX!Ud?7i z(#+x3Kys3Isp^1V8`;KmY_l00ck)1V8`; zK;VueAnyNT{eQ;GGd6or8R2!H?x VfB*=900@8p2!H?xfWRF`;D6ciPe}j( delta 1323 zcmZva%TE(g6vkWT@|f2>g{f_Tn1U<_G#HB;ON^ocBh_}G;ENb0Elgx$+i7TBvB zkc}i5XK6I}4-ieGiMA_UX_UmJ8dth-2OAPyn0Py(WNwG;_dDl1=brnmZE3hgS+>bmY zG%F}KxYRr%8YmcLy>2fy+edq{ zW>(J??`0`Y3KS1H-J-o|Gm|lLcl4F|^1RzQohsPQ9d2h$s~=L+Xs6mv@qp9WK4ovA zlq)XlMPrfH%(l@Q=8bkusF2F$GrE>BQ%m+1Gp)^|>U9bv5Y?Gm*!?|@HRd>4GQzF_ zY5q&NOq@kz$Y~a1BPJYirGtp^M>sqF zwqqKR0mj)rA``K8*9+|$SH#*}qiEfN-*t(Y-R?{nx`xO|(-xowvFq6f0n;Qrg-DQh z<|0zr*E;)71U>Kv_TUSAfH$xWPv9}ELm4ul!8DA+83@u`?P5#Wd>cB>AuoTH8|HrR dstXm*sCD(+Ut##i*Mh#i`r_mL%HF1L{694Rs)_&r diff --git a/src/quant_research_starter.egg-info/PKG-INFO b/src/quant_research_starter.egg-info/PKG-INFO index e934ef4e..189a179b 100644 --- a/src/quant_research_starter.egg-info/PKG-INFO +++ b/src/quant_research_starter.egg-info/PKG-INFO @@ -43,24 +43,43 @@ Dynamic: license-file # QuantResearchStarter -A modular, open-source quantitative research and backtesting framework designed for clarity and extensibility. Perfect for researchers, students, and developers interested in quantitative finance. - -![Python Version](https://img.shields.io/badge/python-3.10%2B-blue) -![License](https://img.shields.io/badge/license-MIT-green) +[![Python Version](https://img.shields.io/badge/python-3.10%2B-blue)](https://www.python.org/) +[![License: MIT](https://img.shields.io/badge/license-MIT-green)](LICENSE) [![CI](https://github.com/username/QuantResearchStarter/actions/workflows/ci.yml/badge.svg)](https://github.com/username/QuantResearchStarter/actions) -## Features +A modular, open-source quantitative research and backtesting framework built for clarity, reproducibility, and extensibility. Ideal for researchers, students, and engineers building and testing systematic strategies. + +--- + +## Why this project + +QuantResearchStarter aims to provide a clean, well-documented starting point for quantitative research and backtesting. It focuses on: + +* **Readability**: idiomatic Python, type hints, and small modules you can read and change quickly. +* **Testability**: deterministic vectorized backtests with unit tests and CI. +* **Extensibility**: plug-in friendly factor & data adapters so you can try new ideas fast. + +--- + +## Key features + +* **Data management** — download market data or generate synthetic price series for experiments. +* **Factor library** — example implementations of momentum, value, size, and volatility factors. +* **Vectorized backtesting engine** — supports transaction costs, slippage, portfolio constraints, and configurable rebalancing frequencies (daily, weekly, monthly). +* **Risk & performance analytics** — returns, drawdowns, Sharpe, turnover, and other risk metrics. +* **CLI & scripts** — small tools to generate data, compute factors, and run backtests from the terminal. +* **Production-ready utilities** — type hints, tests, continuous integration, and documentation scaffolding. -- **Data Management**: Download real data or generate synthetic data for testing -- **Factor Library**: Implement momentum, value, size, and volatility factors -- **Backtesting Engine**: Vectorized backtester with transaction costs and constraints -- **Risk Metrics**: Comprehensive performance and risk analytics -- **Modular Design**: Easy to extend with new factors and strategies -- **Production Ready**: Type hints, tests, CI/CD, and documentation +--- -## Quick Start +## Quick start -### Installation +### Requirements + +* Python 3.10+ +* pip + +### Install locally ```bash # Clone the repository @@ -70,5 +89,155 @@ cd QuantResearchStarter # Install package in development mode pip install -e . -# Install development dependencies +# Install development dependencies (tests, linters, docs) pip install -e ".[dev]" + +# Optional UI dependencies +pip install streamlit plotly +``` + +### Demo (one-line) + +```bash +make demo +``` + +### Step-by-step demo + +```bash +# generate synthetic sample price series +qrs generate-data -o data_sample/sample_prices.csv -s 5 -d 365 + +# compute example factors +qrs compute-factors -d data_sample/sample_prices.csv -f momentum -f value -o output/factors.csv + +# run a backtest +qrs backtest -d data_sample/sample_prices.csv -s output/factors.csv -o output/backtest_results.json + +# optional: start the Streamlit dashboard +streamlit run src/quant_research_starter/dashboard/streamlit_app.py +``` + +--- + +## Example: small strategy (concept) + +```python +from quant_research_starter.backtest import Backtester +from quant_research_starter.data import load_prices +from quant_research_starter.factors import Momentum + +prices = load_prices("data_sample/sample_prices.csv") +factor = Momentum(window=63) +scores = factor.compute(prices) + +bt = Backtester(prices, signals=scores, capital=1_000_000) +results = bt.run() +print(results.performance.summary()) +``` + +### Rebalancing Frequency + +The backtester supports different rebalancing frequencies to match your strategy needs: + +```python +from quant_research_starter.backtest import VectorizedBacktest + +# Daily rebalancing (default) +bt_daily = VectorizedBacktest(prices, signals, rebalance_freq="D") + +# Weekly rebalancing (reduces turnover and transaction costs) +bt_weekly = VectorizedBacktest(prices, signals, rebalance_freq="W") + +# Monthly rebalancing (lowest turnover) +bt_monthly = VectorizedBacktest(prices, signals, rebalance_freq="M") + +results = bt_monthly.run() +``` + +Supported frequencies: +- `"D"`: Daily rebalancing (default) +- `"W"`: Weekly rebalancing (rebalances when the week changes) +- `"M"`: Monthly rebalancing (rebalances when the month changes) + +> The code above is illustrative—see `examples/` for fully working notebooks and scripts. + +--- + +## CLI reference + +Run `qrs --help` or `qrs --help` for full usage. Main commands include: + +* `qrs generate-data` — create synthetic price series or download data from adapters +* `qrs compute-factors` — calculate and export factor scores +* `qrs backtest` — run the vectorized backtest and export results + +--- + +## Project structure (overview) + +``` +QuantResearchStarter/ +├─ src/quant_research_starter/ +│ ├─ data/ # data loaders & adapters +│ ├─ factors/ # factor implementations +│ ├─ backtest/ # backtester & portfolio logic +│ ├─ analytics/ # performance and risk metrics +│ ├─ cli/ # command line entry points +│ └─ dashboard/ # optional Streamlit dashboard +├─ examples/ # runnable notebooks & example strategies +├─ tests/ # unit + integration tests +└─ docs/ # documentation source +``` + +--- + +## Tests & CI + +We include unit tests and a CI workflow (GitHub Actions). Run tests locally with: + +```bash +pytest -q +``` + +The CI pipeline runs linting, unit tests, and builds docs on push/PR. + +--- + +## Contributing + +Contributions are very welcome. Please follow these steps: + +1. Fork the repository +2. Create a feature branch +3. Add tests for new behavior +4. Open a pull request with a clear description and rationale + +Please review `CONTRIBUTING.md` and the `CODE_OF_CONDUCT.md` before submitting. + +--- + +## AI policy — short & practical + +**Yes — you are allowed to use AI tools** (ChatGPT, Copilot, Codeium, etc.) to help develop, prototype, or document code in this repository. + +A few friendly guidelines: + +* **Be transparent** when a contribution is substantially generated by an AI assistant — add a short note in the PR or commit message (e.g., "Generated with ChatGPT; reviewed and adapted by "). +* **Review and test** all AI-generated code. Treat it as a helpful draft, not final production-quality code. +* **Follow licensing** and attribution rules for any external snippets the AI suggests. Don’t paste large verbatim copyrighted material. +* **Security & correctness**: double-check numerical logic, data handling, and anything that affects trading decisions. + +This policy is intentionally permissive: we want the community to move fast while keeping quality and safety in mind. + +--- + +## License + +This project is licensed under the MIT License — see the `LICENSE` file for details. + +--- + +## Acknowledgements + +Built with inspiration from open-source quant libraries and the research community. If you use this project in papers or public work, a short citation or mention is appreciated. diff --git a/src/quant_research_starter.egg-info/SOURCES.txt b/src/quant_research_starter.egg-info/SOURCES.txt index 8364ef9b..1578571b 100644 --- a/src/quant_research_starter.egg-info/SOURCES.txt +++ b/src/quant_research_starter.egg-info/SOURCES.txt @@ -12,6 +12,7 @@ src/quant_research_starter.egg-info/requires.txt src/quant_research_starter.egg-info/top_level.txt src/quant_research_starter/backtest/__init__.py src/quant_research_starter/backtest/vectorized.py +src/quant_research_starter/dashboard/streamlit_app.py src/quant_research_starter/data/__init__.py src/quant_research_starter/data/downloaders.py src/quant_research_starter/data/init.py @@ -19,6 +20,7 @@ src/quant_research_starter/data/sample_loader.py src/quant_research_starter/data/synthetic.py src/quant_research_starter/factors/__init__.py src/quant_research_starter/factors/base.py +src/quant_research_starter/factors/bollinger.py src/quant_research_starter/factors/init.py src/quant_research_starter/factors/momentum.py src/quant_research_starter/factors/size.py From aaa94dd944bdc2e171e7a01cf6a2233c57b14987 Mon Sep 17 00:00:00 2001 From: saumya1317 Date: Mon, 27 Oct 2025 02:38:11 +0530 Subject: [PATCH 3/5] fix: remove trailing whitespace (ruff W293) --- src/quant_research_starter/backtest/vectorized.py | 12 ++++++------ tests/test_backtest.py | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/quant_research_starter/backtest/vectorized.py b/src/quant_research_starter/backtest/vectorized.py index 5537dbc5..88a7d107 100644 --- a/src/quant_research_starter/backtest/vectorized.py +++ b/src/quant_research_starter/backtest/vectorized.py @@ -71,7 +71,7 @@ def run(self, weight_scheme: str = "rank") -> Dict: # Track rebalancing prev_rebalance_date = None current_weights = pd.Series(0.0, index=self.prices.columns) - + # Compute daily weights from signals (rebalance only on rebalance dates) weights_list = [] for date in returns_df.index: @@ -79,10 +79,10 @@ def run(self, weight_scheme: str = "rank") -> Dict: # Rebalance: compute new target weights current_weights = self._calculate_weights(aligned_signals.loc[date], weight_scheme) prev_rebalance_date = date - + # Append current weights (maintain between rebalances) weights_list.append(current_weights) - + weights = pd.DataFrame(weights_list, index=returns_df.index, columns=self.prices.columns).fillna(0.0) # Previous day weights for PnL calculation @@ -116,18 +116,18 @@ def run(self, weight_scheme: str = "rank") -> Dict: def _should_rebalance(self, date: pd.Timestamp, prev_rebalance_date: Optional[pd.Timestamp] = None) -> bool: """Check if we should rebalance on given date. - + Args: date: Current date to check prev_rebalance_date: Last rebalance date (None for first rebalance) - + Returns: True if should rebalance, False otherwise """ # Always rebalance on first date if prev_rebalance_date is None: return True - + if self.rebalance_freq == "D": # Daily rebalancing return True diff --git a/tests/test_backtest.py b/tests/test_backtest.py index 4dd95f37..954efac2 100644 --- a/tests/test_backtest.py +++ b/tests/test_backtest.py @@ -150,7 +150,7 @@ def test_rebalance_frequency_weekly(self, sample_data): # Count the number of times weights change positions = results["positions"] position_changes = (positions.diff().abs().sum(axis=1) > 0).sum() - + # Should be significantly fewer than daily (100 days) # Approximately ~14 weeks in 100 days assert position_changes < len(prices) - 1 @@ -168,7 +168,7 @@ def test_rebalance_frequency_monthly(self, sample_data): # Monthly rebalancing should result in fewer position changes than weekly positions = results["positions"] position_changes = (positions.diff().abs().sum(axis=1) > 0).sum() - + # Should be significantly fewer than daily # Approximately ~3 months in 100 days assert position_changes < len(prices) - 1 @@ -177,7 +177,7 @@ def test_rebalance_frequency_invalid(self, sample_data): """Test that invalid rebalance frequency raises error.""" prices, signals = sample_data backtest = VectorizedBacktest(prices, signals, rebalance_freq="X") - + with pytest.raises(ValueError, match="Unsupported rebalance frequency"): backtest.run() From 171375f5958509e6c342ac819204b90ad82ea974 Mon Sep 17 00:00:00 2001 From: saumya1317 Date: Mon, 27 Oct 2025 20:03:30 +0530 Subject: [PATCH 4/5] fix: resolve merge conflict and fix ruff W293 linting errors --- .coverage | Bin 53248 -> 0 bytes .../factors/__init__.py | 20 ------------------ 2 files changed, 20 deletions(-) delete mode 100644 .coverage diff --git a/.coverage b/.coverage deleted file mode 100644 index 75041d0e7d1ba93230cda4eda50e51f926d1a481..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 53248 zcmeI4UyR&F9mnmpZ@u1sv&khKY1+g{w3kaRms~+YBt>n=9U>%ZnkK2LWYdk;J9p>Y zuDxe_FF7RCZdwru2_BFtpsHwiLP7`u5k-o4BBBogBtnH$@c;!?G*Sf=2@o`u!f*WN zb~njYd5F^_e68CZk3BQr`F&=7GoJO?2M_GC18&T?Uds%OPe>I>mZirGLy{z&J|+4j zn?^fo5}{D;+h1u{mnObER$@;{#q3ul_DpF%+gg0N^ibhav88{d@NLc1Ejob>1V8`; zK;ZwIK=(jV8(FtbKK-S@tT%b!nHKk=@6zrkr*|EiHV*Cj?7nFuI%aIj8?&nKgnro}}*hydfq!I!05SNH`JZ7kf3kLAe4xOGUIjyJdPy z#ynpd4->>{0Y4Q)2dEHk&pIN7u_?|zW_Wytd)%?OAJt-$-5AS{J^yT88yOy!&+yPD z)3fNWFklTG=$W!-h6=J=uOVt*_e{r{$ouJc$72N>Z8-ke1n{E@;LJhRnBn}ti6mvnlguV zb~>#(AwSV+wMFAljz~CgSIW6k-oTBGz3H~OL0k6h9ca!3xoFWSSI4mzI()hD$Bkaj z@z4y%9XEs#)B_$YWJf+dEKAfY`6+*)N!MF7vlF;syhe>*o20Me3#)S4$i|KGnT4>U z2t2jA9r(*T`aq{yKDJBje3X47zILYEP_ji_;`H~0I@wgiUh2}d*B zI3d;aL+x#%vQ!77-gN7OeJ$)UJ|_pEu^>q{({U`}D!#D6au>O0&hm-)Y&0x<*d2+w z&ZM3lnclck*IDiYwaMk(e0N<|8(F_zKCOq{C~mDd&<46t(vmkAMw3%5Zu%WB8Av{8 zkW6y)M#;oS;xlB*#OZ32@lLL*soKcewQ@J|2r({+&(MGo;z-X0u9*mK zzWQx5m@}@gyEsf0A05h$>|DE2#YGirlj1x3{LxHUqG9(Gb5F0nQ31Vp!k$u%g~`dipKyKO zrfF47TmFvZs4+Tc`eGFFM~%AcHo56Ua|7krk zQEJO`=QPxJbY>~U2*cqd7rsmV6-ZGPF$I;9{C%NWiufOc40hg|J`umAs*M~zy zBLy+IMJ-s5C?u)kYn}XgT0mS|PHJT-{Z?Ff;KPvAo{kBvqzFT~@xVmt*ur2@d{2~2MdRtd=ujF=UzoCTK zKmY_l00ck)1a5}F{2Ep29^Jj8df2B28mqq9X)RUhDZVMsUgXs&?#~BqySmUZP16ag z2RiiqB=^=Y&X`uVwk(ia1L5WzcCivBA zB)Cu}!P&F~lis)JHtBsE^lF18l5Cbp@>uGU;+bjDe8n`IFEP-!zbM4qLYmE&h_&1n zcY;of1lxHM6v%Ye(q43NJGhnd(w&Yd=hM`B$%4Jhk|_#=qaRTr`3RU z)0z*sA5<5)m{!T@8)RWSLz<~M;j(6*m9sJ_9!$&1J|S`ce~s3V*f-fjwnJe)E8)Izy<;!00JNY0w8cx1e$91sGQbZo~#+- z1@jtP?H*1|EW8+tm4+K?cW-J+vB+vm?e0lUBwGKkO0|8GC3(F5ue8;!m6}0`SlvcT<;eNCW~P00JNY0w4eaAOHd&00JNY0(UC` zMOLJexc@J+_a*wn1_B@e0w4eaAOHd&00JNY0w4eaAaD;7P?VA`?*CWVu*Cku-etdL zzhFOM-=lW{e2txEi)@Y^W&7A8?30uL8wh{^2!H?xfB*=900@8p2!H?x+*<@TR#aJk zta9Sjch3F(_5D{b|KVWuS1-K%$o-H0?x|NM&b>dH$ude%W}C}p3OAIGWS^dX{H+Vc zOsPci`%6c%$GiI;e=S=mQuL$6BiX`KLn#(0a$jMy^2Qe?U)q(=Q|u%8i$8pEkDS-V zzJBE2pL;^laF8_W1Gv}m-{eutx_pL3`{7(;^Klk><>XqX!Ud?7i z(#+x3Kys3Isp^1V8`;KmY_l00ck)1V8`; zK;VueAnyNT{eQ;GGd6or8R2!H?x VfB*=900@8p2!H?xfWRF`;D6ciPe}j( diff --git a/src/quant_research_starter/factors/__init__.py b/src/quant_research_starter/factors/__init__.py index a95d6320..feea7ea8 100644 --- a/src/quant_research_starter/factors/__init__.py +++ b/src/quant_research_starter/factors/__init__.py @@ -1,22 +1,3 @@ -<<<<<<< HEAD -"""Factors module public API.""" - -from .base import Factor -from .momentum import CrossSectionalMomentum, MomentumFactor -from .size import SizeFactor -from .value import ValueFactor -from .volatility import IdiosyncraticVolatility, VolatilityFactor - -__all__ = [ - "Factor", - "MomentumFactor", - "CrossSectionalMomentum", - "ValueFactor", - "SizeFactor", - "VolatilityFactor", - "IdiosyncraticVolatility", -] -======= """Factors module public API.""" from .base import Factor @@ -36,4 +17,3 @@ "IdiosyncraticVolatility", "VolatilityFactor", ] ->>>>>>> 8d5d7c51f2ba0fc6db1b9abe844cee2182014d3c From d9d48f6e1f6283f684130058bdfda2b61bce3d50 Mon Sep 17 00:00:00 2001 From: saumya1317 Date: Mon, 27 Oct 2025 20:10:12 +0530 Subject: [PATCH 5/5] style: apply black formatting to backtest and test files --- .../backtest/vectorized.py | 30 +++++++++++++------ tests/test_backtest.py | 12 ++++++-- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/src/quant_research_starter/backtest/vectorized.py b/src/quant_research_starter/backtest/vectorized.py index 88a7d107..4fc2bbf8 100644 --- a/src/quant_research_starter/backtest/vectorized.py +++ b/src/quant_research_starter/backtest/vectorized.py @@ -77,13 +77,17 @@ def run(self, weight_scheme: str = "rank") -> Dict: for date in returns_df.index: if self._should_rebalance(date, prev_rebalance_date): # Rebalance: compute new target weights - current_weights = self._calculate_weights(aligned_signals.loc[date], weight_scheme) + current_weights = self._calculate_weights( + aligned_signals.loc[date], weight_scheme + ) prev_rebalance_date = date # Append current weights (maintain between rebalances) weights_list.append(current_weights) - weights = pd.DataFrame(weights_list, index=returns_df.index, columns=self.prices.columns).fillna(0.0) + weights = pd.DataFrame( + weights_list, index=returns_df.index, columns=self.prices.columns + ).fillna(0.0) # Previous day weights for PnL calculation weights_prev = weights.shift(1).fillna(0.0) @@ -114,7 +118,9 @@ def run(self, weight_scheme: str = "rank") -> Dict: return self._generate_results() - def _should_rebalance(self, date: pd.Timestamp, prev_rebalance_date: Optional[pd.Timestamp] = None) -> bool: + def _should_rebalance( + self, date: pd.Timestamp, prev_rebalance_date: Optional[pd.Timestamp] = None + ) -> bool: """Check if we should rebalance on given date. Args: @@ -133,15 +139,21 @@ def _should_rebalance(self, date: pd.Timestamp, prev_rebalance_date: Optional[pd return True elif self.rebalance_freq == "W": # Weekly rebalancing - rebalance if week changed - return date.isocalendar()[1] != prev_rebalance_date.isocalendar()[1] or \ - date.year != prev_rebalance_date.year + return ( + date.isocalendar()[1] != prev_rebalance_date.isocalendar()[1] + or date.year != prev_rebalance_date.year + ) elif self.rebalance_freq == "M": # Monthly rebalancing - rebalance if month changed - return date.month != prev_rebalance_date.month or \ - date.year != prev_rebalance_date.year + return ( + date.month != prev_rebalance_date.month + or date.year != prev_rebalance_date.year + ) else: - raise ValueError(f"Unsupported rebalance frequency: {self.rebalance_freq}. " - f"Supported frequencies: 'D' (daily), 'W' (weekly), 'M' (monthly)") + raise ValueError( + f"Unsupported rebalance frequency: {self.rebalance_freq}. " + f"Supported frequencies: 'D' (daily), 'W' (weekly), 'M' (monthly)" + ) def _calculate_weights(self, signals: pd.Series, scheme: str) -> pd.Series: """Convert signals to portfolio weights.""" diff --git a/tests/test_backtest.py b/tests/test_backtest.py index 954efac2..f904a401 100644 --- a/tests/test_backtest.py +++ b/tests/test_backtest.py @@ -186,16 +186,22 @@ def test_rebalance_reduces_turnover(self, sample_data): prices, signals = sample_data # Daily rebalancing - backtest_daily = VectorizedBacktest(prices, signals, rebalance_freq="D", transaction_cost=0.001) + backtest_daily = VectorizedBacktest( + prices, signals, rebalance_freq="D", transaction_cost=0.001 + ) results_daily = backtest_daily.run() # Monthly rebalancing - backtest_monthly = VectorizedBacktest(prices, signals, rebalance_freq="M", transaction_cost=0.001) + backtest_monthly = VectorizedBacktest( + prices, signals, rebalance_freq="M", transaction_cost=0.001 + ) results_monthly = backtest_monthly.run() # Count position changes as proxy for turnover daily_changes = (results_daily["positions"].diff().abs().sum(axis=1) > 0).sum() - monthly_changes = (results_monthly["positions"].diff().abs().sum(axis=1) > 0).sum() + monthly_changes = ( + results_monthly["positions"].diff().abs().sum(axis=1) > 0 + ).sum() # Monthly should have fewer rebalances assert monthly_changes < daily_changes