diff --git a/lectures/_config.yml b/lectures/_config.yml index ab05579b..8493f847 100644 --- a/lectures/_config.yml +++ b/lectures/_config.yml @@ -31,7 +31,7 @@ latex: targetname: quantecon-python-advanced.tex sphinx: - extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_tojupyter, sphinx_exercise, sphinx_togglebutton, sphinx_proof] + extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_tojupyter, sphinx_exercise, sphinx_togglebutton, sphinx_proof, sphinx.ext.intersphinx] config: intersphinx_mapping: intro: diff --git a/lectures/_static/lecture_specific/hansen_jagannathan_1991/hansen_jagannathan_1991_data.json b/lectures/_static/lecture_specific/hansen_jagannathan_1991/hansen_jagannathan_1991_data.json new file mode 100644 index 00000000..001d1f6f --- /dev/null +++ b/lectures/_static/lecture_specific/hansen_jagannathan_1991/hansen_jagannathan_1991_data.json @@ -0,0 +1 @@ +{"annual": {"columns": ["year", "stock", "bond", "consumption"], "index": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94"], "data": [[1891.0, 1.26076009559031, 1.1285118622613837, 2505.2209983139105], [1892.0, 0.9849555744758888, 0.9641693077480137, 2571.527148332612], [1893.0, 0.9390728015162085, 1.2509953876569442, 2533.8398938566115], [1894.0, 1.0772937485955516, 1.078120801015998, 2412.1565708898397], [1895.0, 1.0344208960164112, 1.0161735722509198, 2663.096135547202], [1896.0, 1.0607519828153165, 1.0887056905224535, 2606.622799336879], [1897.0, 1.164793705575722, 1.0048458913400324, 2761.2276374940006], [1898.0, 1.268759447994158, 1.0209149670353503, 2757.6784199906374], [1899.0, 0.8877813244282009, 0.8841637913990887, 3029.8799481466567], [1900.0, 1.2380298392237883, 1.0722379898180472, 3005.068496408957], [1901.0, 1.1650097860017037, 1.0178665654116614, 3309.620984406744], [1902.0, 0.9873460561636554, 0.9551381313900328, 3271.12086504941], [1903.0, 0.8691744739879494, 1.1035053958299639, 3403.7330976587305], [1904.0, 1.278987688321596, 1.0199578531676148, 3383.722322259444], [1905.0, 1.2099644128113878, 1.0417, 3506.9625604068206], [1906.0, 0.9657156884364688, 1.0093366588536368, 3820.044250418694], [1907.0, 0.7793080095234896, 1.085642122060444, 3822.601089962035], [1908.0, 1.3369500533721754, 1.0195897678699035, 3512.700444319252], [1909.0, 1.0494952291004105, 0.9368331133993284, 3820.57858858141], [1910.0, 1.0360068575530894, 1.1285675819368224, 3810.672527432815], [1911.0, 1.0452892312190867, 1.0508263212030209, 3932.740294652187], [1912.0, 0.999491685687433, 0.9725851242346939, 3972.8075025232697], [1913.0, 0.9325806451612901, 1.03537, 4023.963275450602], [1914.0, 0.9345020523557734, 1.036039603960396, 3898.6198131594797], [1915.0, 1.26717400246812, 1.0066009615384615, 3776.1570546663875], [1916.0, 0.965106585685364, 0.9212444444444445, 4059.4665602138475], [1917.0, 0.6898790864308105, 0.8712321428571428, 3921.6120477265004], [1918.0, 0.9908796704913209, 0.8992242424242425, 3905.9534372023554], [1919.0, 1.0193722979439623, 0.9024559585492228, 4027.3943939117557], [1920.0, 0.8765929546402813, 1.089942105263158, 4144.830211686945], [1921.0, 1.227040837556904, 1.207905325443787, 4328.600034403758], [1922.0, 1.2967139595564252, 1.052025, 4426.514896913006], [1923.0, 1.0212898616613626, 1.0192647398843933, 4746.434487569654], [1924.0, 1.260475651189128, 1.0434, 5001.556115493811], [1925.0, 1.210384302626438, 1.0038832402234636, 4783.358865822076], [1926.0, 1.1392930547713152, 1.0666354285714286, 5105.5940360554205], [1927.0, 1.3814597532568373, 1.0546531791907514, 5148.574384702256], [1928.0, 1.4837821879284636, 1.0586385964912282, 5200.718746046329], [1929.0, 0.9123089300080449, 1.0601, 5426.7972623848045], [1930.0, 0.8401658222017504, 1.1201037735849058, 5082.415562313239], [1931.0, 0.6345694355706872, 1.138906993006993, 4888.3619380667305], [1932.0, 1.013701316895489, 1.145773643410853, 4426.103756580036], [1933.0, 1.5134632645210924, 0.9915409090909092, 4304.567278914536], [1934.0, 0.8941567139189643, 0.9803911764705882, 4579.622900824867], [1935.0, 1.5144458008576702, 0.9928985507246376, 4827.48204391129], [1936.0, 1.3023565066798615, 0.9860638297872342, 5281.926833064161], [1937.0, 0.6836110466093892, 1.0016957746478874, 5443.862339536304], [1938.0, 1.1667424529493495, 1.0232114285714284, 5314.526515573341], [1939.0, 1.0410359712230215, 1.0128345323741008, 5564.752730599151], [1940.0, 0.8992561840512022, 0.991336170212766, 5805.856595403538], [1941.0, 0.8206236604582365, 0.9028490445859874, 6160.873994429618], [1942.0, 1.1110477944830601, 0.9348467455621302, 5955.341171089541], [1943.0, 1.1994007951425676, 0.9779660919540228, 6041.573731925754], [1944.0, 1.165609443891338, 0.9845662921348314, 6143.04138652481], [1945.0, 1.3542958153780988, 0.985357142857143, 6451.503744327477], [1946.0, 0.7478615491830782, 0.8529451162790698, 7164.175566877387], [1947.0, 0.9346088654754672, 0.9163354430379748, 7157.652521017822], [1948.0, 1.0847184760620365, 1.00083125, 7191.183848813162], [1949.0, 1.1981382978723403, 1.0374127659574468, 7262.631055325057], [1950.0, 1.243096242116655, 0.9374094488188978, 7599.870083434612], [1951.0, 1.1568768736711947, 0.978810566037736, 7590.90088434916], [1952.0, 1.1362661511156973, 1.0200507518796993, 7698.078187051303], [1953.0, 1.0164204918195734, 1.0143598513011154, 7931.965450667205], [1954.0, 1.4696858698818152, 1.025625468164794, 7952.464555894071], [1955.0, 1.281436567164179, 1.014301119402985, 8382.943872767635], [1956.0, 1.0374349631526254, 1.0021840579710144, 8473.709230523631], [1957.0, 0.9115045201331795, 1.0022853146853146, 8528.457528111863], [1958.0, 1.375940896283376, 1.011256551724138, 8454.613480727261], [1959.0, 1.0652121354927944, 1.026778156996587, 8776.510070496133], [1960.0, 1.0448952000064766, 1.025303355704698, 8837.013605332057], [1961.0, 1.1824525563741908, 1.0222393333333333, 8872.954299546916], [1962.0, 0.9599814833159344, 1.0202960526315792, 9170.22714900666], [1963.0, 1.1905326126642373, 1.018252427184466, 9411.698184837614], [1964.0, 1.1480429642300145, 1.030891346153846, 9839.089668400744], [1965.0, 1.0941468245274255, 1.0248905660377357, 10330.708395054593], [1966.0, 0.9044201277559842, 1.0191465045592707, 10792.733708735357], [1967.0, 1.119156036385166, 1.0183563049853372, 10993.88415466346], [1968.0, 1.0593561433624636, 1.0169654494382023, 11509.848624756905], [1969.0, 0.8627003830845462, 1.0176137566137566, 11820.322193461518], [1970.0, 1.0162132776605872, 1.0362708542713568, 11955.15269101887], [1971.0, 1.1017809261253073, 1.0231795620437956, 12256.037436947081], [1972.0, 1.1354246485690525, 1.0093619718309859, 12867.973360705895], [1973.0, 0.7680297739985633, 0.9866562231759657, 13370.797921716601], [1974.0, 0.7087719984958055, 0.9930898272552784, 13147.831088159355], [1975.0, 1.2983881362385283, 1.0048928057553959, 13320.272905984406], [1976.0, 1.0583635558238078, 1.0046017094017095, 13918.84182978114], [1977.0, 0.8558435603506407, 0.9854208, 14363.677568282039], [1978.0, 1.0624059570981859, 0.9862737920937045, 14836.913057434838], [1979.0, 1.025894889524093, 0.9734066838046272, 15030.523692717241], [1980.0, 1.1221917768454097, 0.9959294252873562, 14816.460418384046], [1981.0, 0.8597263704783233, 1.0852396606574761, 14878.66840161682], [1982.0, 1.242591271269428, 1.1049877300613495, 14944.286223295518], [1983.0, 1.1540856804515411, 1.049694406280667, 15655.812643210249], [1984.0, 1.039889211351507, 1.0731856872037915, 16342.950922576823], [1985.0, 1.2120875312678512, 1.042967609489051, 17039.720699516736]]}, "monthly": {"columns": ["stock", "bill", "cons_ratio"], "index": ["1959-03-31 00:00:00", "1959-04-30 00:00:00", "1959-05-31 00:00:00", "1959-06-30 00:00:00", "1959-07-31 00:00:00", "1959-08-31 00:00:00", "1959-09-30 00:00:00", "1959-10-31 00:00:00", "1959-11-30 00:00:00", "1959-12-31 00:00:00", "1960-01-31 00:00:00", "1960-02-29 00:00:00", "1960-03-31 00:00:00", "1960-04-30 00:00:00", "1960-05-31 00:00:00", "1960-06-30 00:00:00", "1960-07-31 00:00:00", "1960-08-31 00:00:00", "1960-09-30 00:00:00", "1960-10-31 00:00:00", "1960-11-30 00:00:00", "1960-12-31 00:00:00", "1961-01-31 00:00:00", "1961-02-28 00:00:00", "1961-03-31 00:00:00", "1961-04-30 00:00:00", "1961-05-31 00:00:00", "1961-06-30 00:00:00", "1961-07-31 00:00:00", "1961-08-31 00:00:00", "1961-09-30 00:00:00", "1961-10-31 00:00:00", "1961-11-30 00:00:00", "1961-12-31 00:00:00", "1962-01-31 00:00:00", "1962-02-28 00:00:00", "1962-03-31 00:00:00", "1962-04-30 00:00:00", "1962-05-31 00:00:00", "1962-06-30 00:00:00", "1962-07-31 00:00:00", "1962-08-31 00:00:00", "1962-09-30 00:00:00", "1962-10-31 00:00:00", "1962-11-30 00:00:00", "1962-12-31 00:00:00", "1963-01-31 00:00:00", "1963-02-28 00:00:00", "1963-03-31 00:00:00", "1963-04-30 00:00:00", "1963-05-31 00:00:00", "1963-06-30 00:00:00", "1963-07-31 00:00:00", "1963-08-31 00:00:00", "1963-09-30 00:00:00", "1963-10-31 00:00:00", "1963-11-30 00:00:00", "1963-12-31 00:00:00", "1964-01-31 00:00:00", "1964-02-29 00:00:00", "1964-03-31 00:00:00", "1964-04-30 00:00:00", "1964-05-31 00:00:00", "1964-06-30 00:00:00", "1964-07-31 00:00:00", "1964-08-31 00:00:00", "1964-09-30 00:00:00", "1964-10-31 00:00:00", "1964-11-30 00:00:00", "1964-12-31 00:00:00", "1965-01-31 00:00:00", "1965-02-28 00:00:00", "1965-03-31 00:00:00", "1965-04-30 00:00:00", "1965-05-31 00:00:00", "1965-06-30 00:00:00", "1965-07-31 00:00:00", "1965-08-31 00:00:00", "1965-09-30 00:00:00", "1965-10-31 00:00:00", "1965-11-30 00:00:00", "1965-12-31 00:00:00", "1966-01-31 00:00:00", "1966-02-28 00:00:00", "1966-03-31 00:00:00", "1966-04-30 00:00:00", "1966-05-31 00:00:00", "1966-06-30 00:00:00", "1966-07-31 00:00:00", "1966-08-31 00:00:00", "1966-09-30 00:00:00", "1966-10-31 00:00:00", "1966-11-30 00:00:00", "1966-12-31 00:00:00", "1967-01-31 00:00:00", "1967-02-28 00:00:00", "1967-03-31 00:00:00", "1967-04-30 00:00:00", "1967-05-31 00:00:00", "1967-06-30 00:00:00", "1967-07-31 00:00:00", "1967-08-31 00:00:00", "1967-09-30 00:00:00", "1967-10-31 00:00:00", "1967-11-30 00:00:00", "1967-12-31 00:00:00", "1968-01-31 00:00:00", "1968-02-29 00:00:00", "1968-03-31 00:00:00", "1968-04-30 00:00:00", "1968-05-31 00:00:00", "1968-06-30 00:00:00", "1968-07-31 00:00:00", "1968-08-31 00:00:00", "1968-09-30 00:00:00", "1968-10-31 00:00:00", "1968-11-30 00:00:00", "1968-12-31 00:00:00", "1969-01-31 00:00:00", "1969-02-28 00:00:00", "1969-03-31 00:00:00", "1969-04-30 00:00:00", "1969-05-31 00:00:00", "1969-06-30 00:00:00", "1969-07-31 00:00:00", "1969-08-31 00:00:00", "1969-09-30 00:00:00", "1969-10-31 00:00:00", "1969-11-30 00:00:00", "1969-12-31 00:00:00", "1970-01-31 00:00:00", "1970-02-28 00:00:00", "1970-03-31 00:00:00", "1970-04-30 00:00:00", "1970-05-31 00:00:00", "1970-06-30 00:00:00", "1970-07-31 00:00:00", "1970-08-31 00:00:00", "1970-09-30 00:00:00", "1970-10-31 00:00:00", "1970-11-30 00:00:00", "1970-12-31 00:00:00", "1971-01-31 00:00:00", "1971-02-28 00:00:00", "1971-03-31 00:00:00", "1971-04-30 00:00:00", "1971-05-31 00:00:00", "1971-06-30 00:00:00", "1971-07-31 00:00:00", "1971-08-31 00:00:00", "1971-09-30 00:00:00", "1971-10-31 00:00:00", "1971-11-30 00:00:00", "1971-12-31 00:00:00", "1972-01-31 00:00:00", "1972-02-29 00:00:00", "1972-03-31 00:00:00", "1972-04-30 00:00:00", "1972-05-31 00:00:00", "1972-06-30 00:00:00", "1972-07-31 00:00:00", "1972-08-31 00:00:00", "1972-09-30 00:00:00", "1972-10-31 00:00:00", "1972-11-30 00:00:00", "1972-12-31 00:00:00", "1973-01-31 00:00:00", "1973-02-28 00:00:00", "1973-03-31 00:00:00", "1973-04-30 00:00:00", "1973-05-31 00:00:00", "1973-06-30 00:00:00", "1973-07-31 00:00:00", "1973-08-31 00:00:00", "1973-09-30 00:00:00", "1973-10-31 00:00:00", "1973-11-30 00:00:00", "1973-12-31 00:00:00", "1974-01-31 00:00:00", "1974-02-28 00:00:00", "1974-03-31 00:00:00", "1974-04-30 00:00:00", "1974-05-31 00:00:00", "1974-06-30 00:00:00", "1974-07-31 00:00:00", "1974-08-31 00:00:00", "1974-09-30 00:00:00", "1974-10-31 00:00:00", "1974-11-30 00:00:00", "1974-12-31 00:00:00", "1975-01-31 00:00:00", "1975-02-28 00:00:00", "1975-03-31 00:00:00", "1975-04-30 00:00:00", "1975-05-31 00:00:00", "1975-06-30 00:00:00", "1975-07-31 00:00:00", "1975-08-31 00:00:00", "1975-09-30 00:00:00", "1975-10-31 00:00:00", "1975-11-30 00:00:00", "1975-12-31 00:00:00", "1976-01-31 00:00:00", "1976-02-29 00:00:00", "1976-03-31 00:00:00", "1976-04-30 00:00:00", "1976-05-31 00:00:00", "1976-06-30 00:00:00", "1976-07-31 00:00:00", "1976-08-31 00:00:00", "1976-09-30 00:00:00", "1976-10-31 00:00:00", "1976-11-30 00:00:00", "1976-12-31 00:00:00", "1977-01-31 00:00:00", "1977-02-28 00:00:00", "1977-03-31 00:00:00", "1977-04-30 00:00:00", "1977-05-31 00:00:00", "1977-06-30 00:00:00", "1977-07-31 00:00:00", "1977-08-31 00:00:00", "1977-09-30 00:00:00", "1977-10-31 00:00:00", "1977-11-30 00:00:00", "1977-12-31 00:00:00", "1978-01-31 00:00:00", "1978-02-28 00:00:00", "1978-03-31 00:00:00", "1978-04-30 00:00:00", "1978-05-31 00:00:00", "1978-06-30 00:00:00", "1978-07-31 00:00:00", "1978-08-31 00:00:00", "1978-09-30 00:00:00", "1978-10-31 00:00:00", "1978-11-30 00:00:00", "1978-12-31 00:00:00", "1979-01-31 00:00:00", "1979-02-28 00:00:00", "1979-03-31 00:00:00", "1979-04-30 00:00:00", "1979-05-31 00:00:00", "1979-06-30 00:00:00", "1979-07-31 00:00:00", "1979-08-31 00:00:00", "1979-09-30 00:00:00", "1979-10-31 00:00:00", "1979-11-30 00:00:00", "1979-12-31 00:00:00", "1980-01-31 00:00:00", "1980-02-29 00:00:00", "1980-03-31 00:00:00", "1980-04-30 00:00:00", "1980-05-31 00:00:00", "1980-06-30 00:00:00", "1980-07-31 00:00:00", "1980-08-31 00:00:00", "1980-09-30 00:00:00", "1980-10-31 00:00:00", "1980-11-30 00:00:00", "1980-12-31 00:00:00", "1981-01-31 00:00:00", "1981-02-28 00:00:00", "1981-03-31 00:00:00", "1981-04-30 00:00:00", "1981-05-31 00:00:00", "1981-06-30 00:00:00", "1981-07-31 00:00:00", "1981-08-31 00:00:00", "1981-09-30 00:00:00", "1981-10-31 00:00:00", "1981-11-30 00:00:00", "1981-12-31 00:00:00", "1982-01-31 00:00:00", "1982-02-28 00:00:00", "1982-03-31 00:00:00", "1982-04-30 00:00:00", "1982-05-31 00:00:00", "1982-06-30 00:00:00", "1982-07-31 00:00:00", "1982-08-31 00:00:00", "1982-09-30 00:00:00", "1982-10-31 00:00:00", "1982-11-30 00:00:00", "1982-12-31 00:00:00", "1983-01-31 00:00:00", "1983-02-28 00:00:00", "1983-03-31 00:00:00", "1983-04-30 00:00:00", "1983-05-31 00:00:00", "1983-06-30 00:00:00", "1983-07-31 00:00:00", "1983-08-31 00:00:00", "1983-09-30 00:00:00", "1983-10-31 00:00:00", "1983-11-30 00:00:00", "1983-12-31 00:00:00", "1984-01-31 00:00:00", "1984-02-29 00:00:00", "1984-03-31 00:00:00", "1984-04-30 00:00:00", "1984-05-31 00:00:00", "1984-06-30 00:00:00", "1984-07-31 00:00:00", "1984-08-31 00:00:00", "1984-09-30 00:00:00", "1984-10-31 00:00:00", "1984-11-30 00:00:00", "1984-12-31 00:00:00", "1985-01-31 00:00:00", "1985-02-28 00:00:00", "1985-03-31 00:00:00", "1985-04-30 00:00:00", "1985-05-31 00:00:00", "1985-06-30 00:00:00", "1985-07-31 00:00:00", "1985-08-31 00:00:00", "1985-09-30 00:00:00", "1985-10-31 00:00:00", "1985-11-30 00:00:00", "1985-12-31 00:00:00", "1986-01-31 00:00:00", "1986-02-28 00:00:00", "1986-03-31 00:00:00", "1986-04-30 00:00:00", "1986-05-31 00:00:00", "1986-06-30 00:00:00", "1986-07-31 00:00:00", "1986-08-31 00:00:00", "1986-09-30 00:00:00", "1986-10-31 00:00:00", "1986-11-30 00:00:00", "1986-12-31 00:00:00"], "data": [[1.0280719371918932, 1.0033713036474514, 0.9988706136070924], [1.0158591300614828, 1.002112419484702, 1.000351058698918], [1.0176639375364858, 1.0002956611570246, 0.998650059226844], [0.9905313495818503, 1.0002638955685332, 1.0014261201715742], [1.0387159886985111, 1.0012907947398513, 1.0005221650412652], [0.9968241965182455, 1.0017856694082705, 1.0007381973025329], [0.9596903812489704, 1.0009654472934473, 1.0011940133487716], [0.998369792927396, 0.999956345826235, 1.0012170108342249], [1.006700774853801, 1.0034583333333333, 0.9984761483208922], [1.0346409225930457, 1.001693910801315, 0.9996053036036991], [0.9885564061653042, 1.0049918709567587, 0.996922049714096], [0.9606814311484831, 1.0019354301258077, 0.9984203635942394], [0.9892733357236764, 1.0027583333333334, 0.9992612698731316], [1.012404215724328, 0.9982790086887837, 1.0027605196191545], [0.993759599856451, 1.0017243433660241, 0.9987964631357693], [1.0363727828244758, 1.0006963356973997, 0.9987783700852044], [0.9780387705204332, 1.0039510152284261, 1.0002291165968225], [1.0149086676217767, 0.9998864403917596, 1.0004755844556936], [0.9727924261192709, 1.0020666666666667, 0.999701166002841], [0.9766612911718211, 0.9972017647058823, 1.0001818984577422], [1.0354085241019915, 1.000965622901276, 1.0018989405519048], [1.0269064359113032, 1.0008667393492117, 1.0002674180686617], [1.0542644806338028, 1.0008594280607686, 0.9988149952623906], [1.0437365064746598, 1.0020166666666666, 0.999605618223917], [1.0339660071846015, 1.0019916666666666, 0.9975225233275804], [1.0291900603035975, 1.0029166275299117, 0.9965272667280022], [1.0126335510658768, 1.0009010528373548, 0.9986093553624658], [0.9891979949874687, 1.0019416666666667, 0.9984026740682473], [0.9930642235598901, 0.9991878787878788, 1.00124466122684], [1.041871092823675, 1.0013223335560009, 0.9991457835037952], [0.9912758355050727, 1.000563242161441, 0.9988043813694619], [1.0134552482902173, 1.0019166666666666, 0.9981308484428043], [1.0477450980392158, 1.0020666666666667, 0.9992124538274398], [1.0116535359219658, 1.001164833944241, 0.9984916954366303], [0.9651364533965244, 1.0012657345761207, 1.000859474127377], [1.0157172751222099, 0.9999449020259051, 1.0022866754437012], [1.0034178296781546, 1.0002734283504586, 0.9998090094648865], [0.9673447140857948, 1.0009479228732208, 1.0006766265300344], [0.928157396522165, 1.0012473792989418, 0.9988184811037534], [0.8858813568291264, 1.0032703078450844, 0.9983556542597287], [1.023793534188939, 1.0021016214427532, 0.9971765709609803], [1.0302400824995614, 1.0003638375165125, 1.0006653374624586], [0.9908061121703784, 0.9977037694499232, 1.006142423876752], [0.9714607327586207, 1.0036029953917052, 0.9974550590298172], [1.0720333066286865, 1.0023583333333332, 0.9992106039864934], [1.0462608261159227, 1.0023916666666666, 0.9977598211098885], [1.0414759871221797, 1.0004491294349538, 1.0021729646710849], [1.0159638923045395, 1.001117804024497, 0.9998406602646049], [0.9956502997506499, 1.001422681088168, 0.9972243473677314], [1.0498028907162074, 1.0034032972440947, 0.9984006498962644], [1.0227158837502424, 1.001455981645362, 1.0004770298062273], [0.9989109859956403, 0.9992166203854949, 1.0015388580717515], [0.9845675230222214, 1.0000363799283154, 1.0017632841409216], [1.0303154649872113, 1.0008100487804876, 1.0005511264518878], [1.0289400770169999, 1.0037959798177083, 0.9962176709272776], [1.0017624734079693, 1.0018965853658537, 1.0007968684555888], [0.9969609178876261, 1.0019558154645873, 1.0008525590218411], [1.0206465539907612, 0.9996854922279792, 1.0009601377362716], [1.0333206035683788, 1.0009884076707605, 1.0016255180572833], [1.0148172334859384, 1.003915081419174, 1.000024249403329], [1.0207283456088212, 1.0019775210084034, 0.9994829803550476], [1.0169486780879864, 1.0025676305869684, 0.9996113321962515], [1.0122209678091902, 1.0019288250484182, 0.9985376501524158], [0.9932960212922407, 1.001929764592067, 1.0006196214262137], [1.0362882413851693, 1.0025600311626908, 1.0002984498139085], [0.9909496915805496, 1.0019476650563608, 0.9992541009496088], [1.016396099652315, 1.0019735762548263, 1.000956615867475], [1.019721855892579, 1.001685829048843, 0.999759163971496], [1.0061538965192474, 1.0001408950122825, 1.0004091384465639], [0.985116260923845, 1.001915904, 1.0005560716344668], [1.0282244223439736, 1.002212875639386, 1.0003516590170527], [1.0097667305310418, 1.003275, 0.9993404376938423], [1.0001660973566213, 1.00231370169275, 1.0010661810121293], [1.0123612273586307, 1.0010369741873804, 1.0012721523349608], [1.017344928195218, 1.000054749047014, 1.0017725085185702], [0.9489011600539903, 0.9990410207740168, 1.005378797002348], [1.0010452630134836, 1.004153008233059, 0.9995996037903918], [1.021202184666117, 1.004153914421553, 0.9980557393989961], [1.0359771842602228, 1.0010456462154753, 0.9985751581632717], [1.0217535498076598, 1.0024072827804107, 1.0007451099918447], [1.010777984462195, 1.0002479921259841, 1.000731666782737], [0.9947639149094939, 1.0004988226059652, 1.0026947898198315], [1.0198226679748537, 1.002880371706399, 1.0023092960536062], [0.9894904629232748, 0.9976164276807981, 1.0044227996227477], [0.9583996140732846, 1.000705593536358, 1.0027102984958496], [1.0268275213841043, 1.000740179677819, 1.0026015427824753], [0.9499393467976711, 1.0016944667697063, 0.9996474341827011], [0.9913515576022618, 1.002820027794935, 1.0012814796319274], [0.9971257977439711, 1.0018342064714945, 1.000382554969286], [0.9365614131214416, 0.9979824400204187, 1.0041621076121736], [0.9677722669973133, 1.0014079007633587, 1.002435848631442], [0.9883040928745066, 1.0014006215119227, 1.001238337174596], [1.0531534098275637, 1.0035168795620437, 1.0001888408671744], [1.007151088611763, 1.0029132442284325, 1.0003466806702261], [1.0413131685724824, 1.0045436271529888, 0.9995297117438152], [1.0373100453917505, 1.0007581818181817, 1.0004679782073898], [1.0232367747992748, 1.00355, 0.999330477856776], [1.016843356413706, 1.0001691842900302, 0.999966715533551], [1.0175027837824473, 1.003, 1.000949176033177], [0.9871085039029746, 0.9969262762762763, 1.0027962388740181], [1.0168765638088364, 1.0005038173652694, 1.002397261829141], [1.0154821319164846, 1.0005626368159204, 1.0022811032677532], [1.013519529235478, 1.0006961805555554, 1.0016951321158216], [0.9980039005239413, 1.0008213649851632, 1.000998123823461], [0.9683093393309063, 0.9980187069813177, 1.0026050040369892], [1.0280757024815137, 1.0011883088235294, 1.0009389825389794], [0.9939697297929366, 1.0012218963831867, 1.004181013047089], [0.9546394439029672, 1.001213888888889, 1.0036551612744964], [0.9815468761294366, 1.0013803206997087, 1.0022570930239154], [1.0735000094354112, 1.0015633236434107, 1.0025301756466845], [1.0226158217988668, 1.0018044444444445, 1.0032043555675563], [1.0234850293970503, 0.9988097982708932, 1.0021473365885287], [0.9947667477084493, 0.998668982808023, 1.0020941483206136], [0.9778699563927264, 1.0013724047619048, 1.0033922841181462], [1.0321391972090168, 1.0014636752136752, 1.002115791953096], [1.0213630006096386, 0.9987673512747876, 1.0037894535414413], [1.0149928981880625, 1.0017039783427495, 1.002300683093753], [1.0100105566988267, 0.9993207865168537, 1.0014631156904303], [0.9574594257178526, 1.0023012138188607, 1.0035290814620998], [0.9920492386898895, 1.002292458100559, 1.0010423293037605], [0.9727190025881399, 0.9966647276084949, 1.003827833283473], [1.0171159318165637, 0.9995539715335171, 1.004393522647467], [1.0322993393575821, 1.0022722527472525, 1.0035367489108808], [0.9451019061251642, 0.999872859744991, 1.0052389370340384], [0.952746018887432, 1.0003668478260872, 1.0037975683474671], [0.9917796132527842, 1.0030908762420956, 1.0016586707307285], [1.0035787695638163, 1.0004856469002696, 1.0037704520562558], [1.0080329798285101, 1.000440125111707, 1.0014291101349522], [1.004591005746324, 1.0006678222222223, 1.0041373908543318], [0.9446896805369114, 1.0011770557029178, 1.0047541461169873], [0.9914828021760622, 1.001246679859279, 1.0033638204799875], [0.9629472199558246, 1.000661132983377, 1.0036336945846291], [1.0147848493512965, 1.0002742167101828, 1.0008233880924504], [0.964948035003162, 1.000202012987013, 1.0048515350775364], [0.8857125079498563, 1.003094559585492, 1.00193460929362], [0.992164013217997, 1.0003833333333336, 1.0010347008659388], [1.0000742083642862, 1.0027904884318766, 1.0024490440708314], [1.0325614874977989, 1.0027638675213673, 1.0007845875950103], [1.05779197266689, 0.9999719387755103, 1.0034325230172905], [1.0196758016325451, 0.9998238578680204, 1.0032997037785942], [0.9969902892362444, 0.9993272727272725, 1.0013866188266827], [1.0661822401041756, 0.9990128140703518, 1.0025582321701185], [1.04109753840459, 1.0011844611528822, 0.9993260352513255], [1.0388914847821231, 1.0030833333333333, 1.0012050580670957], [1.025739045927299, 1.000309625, 1.0023892963612429], [1.0341504588595445, 1.0007148794679968, 1.003110382539153], [0.9840107674038563, 0.9984700992555832, 1.0032745375250745], [0.9767674918867899, 0.9990005144032921, 1.0037857091283202], [0.9929219669702521, 1.0020258620689655, 1.0012826427115236], [0.9824122460511984, 1.0016495495495497, 1.0035005601237021], [1.0248611682435214, 1.0014477736928107, 1.0013116911223408], [0.9789581798861627, 1.0012625916870415, 1.0007023544437332], [0.9562790540309042, 1.0010690650406502, 1.0008567617283595], [1.066415276210876, 1.0009004460665043, 1.0033478359113095], [1.0442254041880945, 1.0003826456310678, 1.0030271461607747], [1.015925956744619, 0.9978228663446054, 1.004380863792072], [1.0237173997232474, 1.0031083333333333, 1.0000530163089165], [1.010149008289425, 1.0006745783132531, 0.999828738691669], [0.9898559202076262, 1.0006637620192307, 1.001245826554288], [1.0027504829933638, 1.0008524380495603, 1.0002599248983455], [0.990214757745514, 1.0009163875598088, 1.0031328448152108], [1.0353684048692549, 1.0009553699284008, 1.0019483439699182], [0.985551347071537, 0.9991142913697544, 1.004786988726828], [0.9994441021698699, 1.001570971563981, 1.001565637274597], [1.0500773636013463, 0.9992475628930817, 1.00253058409742], [1.0207246895282873, 1.0018621176470586, 1.002577870112022], [1.0075276941198017, 0.9998033762685401, 1.0041717194325706], [0.9599929561007685, 0.9976573643410852, 1.0062167838160556], [0.9774377333230869, 0.9958116359447005, 1.0094964280501515], [0.9769108866563003, 0.9983158657513348, 1.0082378686760565], [0.967656807932532, 1.0007200455580867, 1.0050052524110837], [0.9734626851601718, 0.9991636689291099, 1.006323348075636], [1.0098309872184792, 1.006675, 1.0022215676192543], [0.9662103977962575, 0.9893187777777779, 1.0162718859980886], [1.0177097187409414, 1.0024529867256637, 1.0022379663335117], [1.0332384543765505, 0.9971919590643273, 1.0065145839826732], [0.9254109579310373, 0.9999464052287581, 1.0102869332743705], [0.9259253246753245, 0.9975153887688987, 1.009747447447289], [1.0082920737157746, 0.995722061965812, 1.0123704791828265], [0.9628909935472947, 0.9952997885835095, 1.0153757586277352], [1.0326374757196064, 0.9961036959553696, 1.0140740630948053], [0.9478846669175333, 1.000661365211365, 1.0064086377834776], [0.9609494128925956, 0.9964997085048012, 1.0096557149820482], [0.9963901652999776, 0.9983663265306123, 1.0059256737775295], [0.8793807608366141, 1.0001681879648412, 1.0055742743427207], [0.9508352876896563, 0.9953528390113562, 1.011175995282007], [0.889226255428956, 0.9927897562582344, 1.00997224527573], [1.0137560500559426, 0.9983247712418302, 1.0054205315073055], [1.0293806260765739, 0.9964558252427184, 1.0073102955680358], [0.931847932992883, 0.9982052825947335, 1.0076444097699606], [1.0821864291811891, 0.997528585086042, 1.0053483161594616], [1.0996594281470395, 0.9988537705956906, 1.0032126056852246], [1.0457768041371436, 1.0007697916666667, 1.0005981399947186], [1.0110465652871874, 1.0008837735849054, 1.000669116696247], [1.061121765458115, 1.0024668863779032, 1.0027086665370033], [1.0212797693000037, 0.9969400934579439, 1.0059281288557496], [0.9932020521503915, 0.9958017746913581, 1.0095326408858396], [0.9283247053204566, 1.0016568265682657, 1.003767351580983], [0.9860255299089741, 0.9979847985347986, 1.0026554682873243], [1.0439666869102653, 0.9994750455373406, 1.0052806339607159], [1.0130267005123192, 0.9973003616636528, 1.0048535488033354], [0.9846333065606965, 0.9991131894484411, 1.0035255689678007], [1.09348572125952, 1.0004595579450417, 1.0021657437895635], [1.0380502567353243, 1.0022704830053666, 0.9982861108644397], [1.0062235709179628, 1.0023735119047619, 0.9993932760036348], [1.007369546206817, 1.0022602495543673, 0.9998021265300792], [0.9891337042649831, 0.998991134751773, 1.003907679776026], [1.0036956753697417, 0.9991934744268076, 1.0037528620758687], [1.0212840213001926, 0.9990722368421051, 1.0036638555492157], [0.9892204808050272, 0.9990253054101222, 1.0045164622506704], [1.0208460782600122, 0.9990029513888887, 1.0039151335470495], [0.9639499408196843, 0.9988974093264249, 1.0037664659159817], [0.9946701579049779, 1.0005023666092943, 1.0038009332549294], [1.034353225214946, 0.9984693921232877, 1.0043546671947632], [0.9895638086025192, 0.9987195911413967, 1.0044364307929456], [0.9664392238343945, 0.993734246767847, 1.0098885610753079], [0.9927773941259672, 0.9987804809843399, 1.0039477847514289], [0.9798759783659157, 0.9970914444444445, 1.005724035160166], [0.9957141182862402, 1.000797342192691, 1.0046097654912294], [1.0023961037791178, 0.9992039118457301, 1.004902503986218], [1.0078822600710948, 0.9993694490131579, 1.0033384422457863], [0.9760524312214353, 0.9996425531914892, 1.0030512798509654], [0.9850672709247983, 1.0015632272974444, 1.00205330822366], [0.9748949286645495, 1.0002382034632034, 1.0033040445009758], [1.0049674676194504, 0.9985989247311827, 1.0054364248393148], [0.9960304995956586, 1.0002185660781167, 1.0033330006820051], [0.9599516491579622, 0.9989528442317915, 1.0047660490910524], [0.9840223584754706, 1.0005875, 1.0031458697876465], [0.9947895284305194, 0.9988994479495268, 1.006307592510151], [1.0401314081500017, 0.9973759259259259, 1.0076389527587446], [1.0452614984711015, 0.9959896511627907, 1.0071837759819684], [0.9959580827480832, 0.9978728846153847, 1.0061688821693733], [0.9918025334688639, 0.9981634860050892, 1.0041907664451761], [1.068434171702102, 0.9997943854324735, 1.0031917924116234], [0.9964772373667565, 0.9974600877192983, 1.0042613757697452], [0.9635843742693666, 0.997656917536016, 1.0068885100094196], [0.9414282165931217, 1.0012314074074073, 1.004813561479389], [1.014726365971097, 1.0016310751104565, 1.0039342324188572], [1.0327378818494286, 0.998964294403893, 1.0090434123633194], [0.9780112030750249, 0.9975724951830444, 1.0056284694035194], [1.0131845606100955, 0.9978065808297567, 1.0086350406892342], [1.0127405357390487, 0.9978901558073654, 1.0100187523201327], [0.96875823439701, 0.9967141223155928, 1.0109255213184765], [1.0128824312504066, 0.9963860110803324, 1.0090443386557957], [1.003154975101792, 0.9966567123287673, 1.0084672978292215], [1.040228184358807, 0.9983600180913612, 1.0069873900759414], [1.0045589512783262, 0.9990609543010752, 1.0091080299778075], [0.9587991219445096, 0.9990079787234042, 1.0072488431827886], [0.9876195953292023, 0.9991952631578946, 1.0064218534484466], [1.0331874804338559, 0.9982123970524491, 1.0081025395853056], [1.0185555584701749, 0.9957564102564104, 1.0117130658562885], [1.0294410139721744, 0.9979227848101266, 1.0096206671680035], [0.8985910933102921, 0.9987598834789847, 1.0134967909679746], [0.9774342356468213, 1.0010024721878859, 1.003900796090959], [1.0401262821349726, 0.9972880660954713, 1.0060007039105825], [1.05703300515674, 0.9961375656565655, 1.0044427175987571], [1.049726098022106, 1.0054978813559323, 1.0072732065427814], [1.0276214459823996, 1.0003419270833334, 1.007524119456413], [1.0198174201529464, 1.000143663090981, 1.0075217930475315], [1.0235196509806845, 1.0001467729240456, 1.0069689654884304], [1.0376010516135021, 1.0008073500778816, 1.0070104909690765], [0.9784158532513699, 1.0035295524691357, 1.0062372886812079], [0.9920778624420624, 1.0032275229357799, 1.0093509298283818], [0.9593984962406015, 1.0031220454545455, 1.0118363622171556], [1.0343982434834644, 1.004285929270128, 1.0075196566130973], [1.0061395942449185, 1.0057326412270855, 1.0012235072836178], [0.9761808214062002, 1.0068035117056853, 1.001166231811287], [0.9996931366189465, 1.0033267127071825, 1.0018292870860903], [0.9691675695290245, 1.0013932149362477, 1.004338686861275], [1.0004085775092668, 1.005234680043384, 1.004460956180702], [0.9081458686571645, 1.00246455424275, 1.004463015559109], [1.0151199333829295, 1.0080350999286223, 1.0022912832219175], [1.0271635654912818, 1.0047470149253732, 1.003093309745679], [1.0085893435244015, 1.005824743181013, 1.0018084480566065], [0.9489504331722393, 1.0070228460451978, 1.0040541060885417], [0.9777717323715956, 1.0080298486448436, 1.0020927589512838], [0.9736056006099674, 1.0105666666666666, 1.000137925704474], [1.050270116177774, 1.0073920175438598, 0.9967201905379717], [0.9962696808520188, 1.0005956725755996, 1.0051260322152997], [0.935595927657916, 0.998933616838488, 1.008548580957832], [0.9973078326282248, 1.004281623931624, 1.004845270724919], [1.0058865743686134, 1.0051714431934493, 1.0008482370086211], [1.1186838295794794, 1.0066, 1.0008546075842328], [1.0854922093946484, 1.0023213302752292, 1.0034233663685264], [1.0471360070410982, 1.0077522704081634, 1.0016442541579376], [1.0177129455965626, 1.0097076083248038, 0.9993309788497258], [1.0371402136334607, 1.0044937180796731, 1.0023407422193844], [1.0202646328618237, 1.0057310289115649, 0.9991296983715169], [1.038663714804723, 1.0059318722392119, 1.0005430849392487], [1.0345819529146476, 0.999708173076923, 1.0054534981898275], [1.0379294022439502, 1.0027652217741936, 1.003807418591069], [1.0144721576601268, 1.005298189134809, 1.0023245804308205], [1.0030589483714485, 1.0035283233132934, 1.004222813073859], [0.9730161533220982, 1.0047630036630035, 1.0025943897206364], [1.0280187700872219, 1.0044895418326694, 1.001184378431139], [1.0035046317131715, 1.0032031746031747, 1.000312103473138], [0.9866469195336738, 1.0043109792284868, 0.9994903099599743], [0.9977479410122005, 1.0045192307692306, 0.9997582956164954], [1.00979352560873, 1.0005097943192949, 1.0057148252651407], [0.9442599614461263, 1.0026647904483432, 1.006260825531114], [1.0024815281593593, 1.004994752186589, 1.001832454254378], [1.0002205746077444, 1.0041715150048405, 1.001498880698185], [0.9946030892802495, 1.0062434702093397, 0.9992963379428078], [0.9787004605665309, 1.0062804966248795, 1.0004974172719572], [0.987120621056392, 1.004558469420429, 1.002184861994489], [1.0878982588450248, 1.0058263649425288, 1.0021083380148552], [1.0092525489514543, 1.005751575931232, 1.0001768024251212], [0.9930637386487277, 1.004279876308278, 1.0019942007501306], [1.0128843042071198, 1.0052620370370369, 0.9997015123010942], [0.992949488875526, 1.0048081990521327, 1.0016982103909544], [1.045012804606808, 1.0045622831914223, 1.003909986504304], [1.0529045841442364, 1.001208364691126, 1.003862023998521], [0.9914952874169944, 1.0023851123595506, 1.0036121845807455], [1.0055342535958998, 1.0047434579439252, 1.0007375212869545], [1.023538753742573, 1.0043560323383085, 1.0009299376842686], [1.0222635414300103, 1.0029848062015503, 1.0022486079036783], [1.0205899038079065, 1.0040320334261839, 1.0012060826415936], [0.9797436228395059, 1.0040854031510658, 1.001104130390055], [0.9784468998753979, 1.0040555812519272, 1.0007835027842136], [1.0112297724277883, 1.0022580337941627, 1.0011798442494892], [1.0612850770438549, 1.001418501529052, 1.003005863367613], [1.0500635034608423, 1.0013234398782342, 1.003515364118442], [1.004775518689488, 1.0022305505004547, 1.0034203793030687], [1.059889630369841, 1.0077172136128836, 0.996115870303691], [1.066722697653583, 1.0109962725328445, 0.9911625786474053], [1.0293159554533402, 1.0087484360625574, 0.9932941034814529], [1.002159240861634, 1.0023586009174312, 1.00101076005044], [1.0256905315757734, 1.0014997714808043, 1.0027918951119348], [0.9819755843185213, 1.0039406544901068, 0.9984700580263501], [1.0209588091566812, 1.0036917198905109, 0.9999489014140102], [0.9710265534525973, 1.000689515151515, 1.0025206765256287], [0.9982012935955767, 1.0024939503932244, 0.9997105567345078], [1.0343975409522796, 1.0026386624396133, 1.0005896348020582], [1.0161746152184632, 1.0009815884476534, 1.0014786276910614]]}, "quarterly": {"columns": ["r3", "r6", "r9", "r12"], "index": ["1964-07-31 00:00:00", "1964-08-31 00:00:00", "1964-09-30 00:00:00", "1964-10-31 00:00:00", "1964-11-30 00:00:00", "1964-12-31 00:00:00", "1965-01-31 00:00:00", "1965-02-28 00:00:00", "1965-03-31 00:00:00", "1965-04-30 00:00:00", "1965-05-31 00:00:00", "1965-06-30 00:00:00", "1965-07-31 00:00:00", "1965-08-31 00:00:00", "1965-09-30 00:00:00", "1965-10-31 00:00:00", "1965-11-30 00:00:00", "1965-12-31 00:00:00", "1966-01-31 00:00:00", "1966-02-28 00:00:00", "1966-03-31 00:00:00", "1966-04-30 00:00:00", "1966-05-31 00:00:00", "1966-06-30 00:00:00", "1966-07-31 00:00:00", "1966-08-31 00:00:00", "1966-09-30 00:00:00", "1966-10-31 00:00:00", "1966-11-30 00:00:00", "1966-12-31 00:00:00", "1967-01-31 00:00:00", "1967-02-28 00:00:00", "1967-03-31 00:00:00", "1967-04-30 00:00:00", "1967-05-31 00:00:00", "1967-06-30 00:00:00", "1967-07-31 00:00:00", "1967-08-31 00:00:00", "1967-09-30 00:00:00", "1967-10-31 00:00:00", "1967-11-30 00:00:00", "1967-12-31 00:00:00", "1968-01-31 00:00:00", "1968-02-29 00:00:00", "1968-03-31 00:00:00", "1968-04-30 00:00:00", "1968-05-31 00:00:00", "1968-06-30 00:00:00", "1968-07-31 00:00:00", "1968-08-31 00:00:00", "1968-09-30 00:00:00", "1968-10-31 00:00:00", "1968-11-30 00:00:00", "1968-12-31 00:00:00", "1969-01-31 00:00:00", "1969-02-28 00:00:00", "1969-03-31 00:00:00", "1969-04-30 00:00:00", "1969-05-31 00:00:00", "1969-06-30 00:00:00", "1969-07-31 00:00:00", "1969-08-31 00:00:00", "1969-09-30 00:00:00", "1969-10-31 00:00:00", "1969-11-30 00:00:00", "1969-12-31 00:00:00", "1970-01-31 00:00:00", "1970-02-28 00:00:00", "1970-03-31 00:00:00", "1970-04-30 00:00:00", "1970-05-31 00:00:00", "1970-06-30 00:00:00", "1970-07-31 00:00:00", "1970-08-31 00:00:00", "1970-09-30 00:00:00", "1970-10-31 00:00:00", "1970-11-30 00:00:00", "1970-12-31 00:00:00", "1971-01-31 00:00:00", "1971-02-28 00:00:00", "1971-03-31 00:00:00", "1971-04-30 00:00:00", "1971-05-31 00:00:00", "1971-06-30 00:00:00", "1971-07-31 00:00:00", "1971-08-31 00:00:00", "1971-09-30 00:00:00", "1971-10-31 00:00:00", "1971-11-30 00:00:00", "1971-12-31 00:00:00", "1972-01-31 00:00:00", "1972-02-29 00:00:00", "1972-03-31 00:00:00", "1972-04-30 00:00:00", "1972-05-31 00:00:00", "1972-06-30 00:00:00", "1972-07-31 00:00:00", "1972-08-31 00:00:00", "1972-09-30 00:00:00", "1972-10-31 00:00:00", "1972-11-30 00:00:00", "1972-12-31 00:00:00", "1973-01-31 00:00:00", "1973-02-28 00:00:00", "1973-03-31 00:00:00", "1973-04-30 00:00:00", "1973-05-31 00:00:00", "1973-06-30 00:00:00", "1973-07-31 00:00:00", "1973-08-31 00:00:00", "1973-09-30 00:00:00", "1973-10-31 00:00:00", "1973-11-30 00:00:00", "1973-12-31 00:00:00", "1974-01-31 00:00:00", "1974-02-28 00:00:00", "1974-03-31 00:00:00", "1974-04-30 00:00:00", "1974-05-31 00:00:00", "1974-06-30 00:00:00", "1974-07-31 00:00:00", "1974-08-31 00:00:00", "1974-09-30 00:00:00", "1974-10-31 00:00:00", "1974-11-30 00:00:00", "1974-12-31 00:00:00", "1975-01-31 00:00:00", "1975-02-28 00:00:00", "1975-03-31 00:00:00", "1975-04-30 00:00:00", "1975-05-31 00:00:00", "1975-06-30 00:00:00", "1975-07-31 00:00:00", "1975-08-31 00:00:00", "1975-09-30 00:00:00", "1975-10-31 00:00:00", "1975-11-30 00:00:00", "1975-12-31 00:00:00", "1976-01-31 00:00:00", "1976-02-29 00:00:00", "1976-03-31 00:00:00", "1976-04-30 00:00:00", "1976-05-31 00:00:00", "1976-06-30 00:00:00", "1976-07-31 00:00:00", "1976-08-31 00:00:00", "1976-09-30 00:00:00", "1976-10-31 00:00:00", "1976-11-30 00:00:00", "1976-12-31 00:00:00", "1977-01-31 00:00:00", "1977-02-28 00:00:00", "1977-03-31 00:00:00", "1977-04-30 00:00:00", "1977-05-31 00:00:00", "1977-06-30 00:00:00", "1977-07-31 00:00:00", "1977-08-31 00:00:00", "1977-09-30 00:00:00", "1977-10-31 00:00:00", "1977-11-30 00:00:00", "1977-12-31 00:00:00", "1978-01-31 00:00:00", "1978-02-28 00:00:00", "1978-03-31 00:00:00", "1978-04-30 00:00:00", "1978-05-31 00:00:00", "1978-06-30 00:00:00", "1978-07-31 00:00:00", "1978-08-31 00:00:00", "1978-09-30 00:00:00", "1978-10-31 00:00:00", "1978-11-30 00:00:00", "1978-12-31 00:00:00", "1979-01-31 00:00:00", "1979-02-28 00:00:00", "1979-03-31 00:00:00", "1979-04-30 00:00:00", "1979-05-31 00:00:00", "1979-06-30 00:00:00", "1979-07-31 00:00:00", "1979-08-31 00:00:00", "1979-09-30 00:00:00", "1979-10-31 00:00:00", "1979-11-30 00:00:00", "1979-12-31 00:00:00", "1980-01-31 00:00:00", "1980-02-29 00:00:00", "1980-03-31 00:00:00", "1980-04-30 00:00:00", "1980-05-31 00:00:00", "1980-06-30 00:00:00", "1980-07-31 00:00:00", "1980-08-31 00:00:00", "1980-09-30 00:00:00", "1980-10-31 00:00:00", "1980-11-30 00:00:00", "1980-12-31 00:00:00", "1981-01-31 00:00:00", "1981-02-28 00:00:00", "1981-03-31 00:00:00", "1981-04-30 00:00:00", "1981-05-31 00:00:00", "1981-06-30 00:00:00", "1981-07-31 00:00:00", "1981-08-31 00:00:00", "1981-09-30 00:00:00", "1981-10-31 00:00:00", "1981-11-30 00:00:00", "1981-12-31 00:00:00", "1982-01-31 00:00:00", "1982-02-28 00:00:00", "1982-03-31 00:00:00", "1982-04-30 00:00:00", "1982-05-31 00:00:00", "1982-06-30 00:00:00", "1982-07-31 00:00:00", "1982-08-31 00:00:00", "1982-09-30 00:00:00", "1982-10-31 00:00:00", "1982-11-30 00:00:00", "1982-12-31 00:00:00", "1983-01-31 00:00:00", "1983-02-28 00:00:00", "1983-03-31 00:00:00", "1983-04-30 00:00:00", "1983-05-31 00:00:00", "1983-06-30 00:00:00", "1983-07-31 00:00:00", "1983-08-31 00:00:00", "1983-09-30 00:00:00", "1983-10-31 00:00:00", "1983-11-30 00:00:00", "1983-12-31 00:00:00", "1984-01-31 00:00:00", "1984-02-29 00:00:00", "1984-03-31 00:00:00", "1984-04-30 00:00:00", "1984-05-31 00:00:00", "1984-06-30 00:00:00", "1984-07-31 00:00:00", "1984-08-31 00:00:00", "1984-09-30 00:00:00", "1984-10-31 00:00:00", "1984-11-30 00:00:00", "1984-12-31 00:00:00", "1985-01-31 00:00:00", "1985-02-28 00:00:00", "1985-03-31 00:00:00", "1985-04-30 00:00:00", "1985-05-31 00:00:00", "1985-06-30 00:00:00", "1985-07-31 00:00:00", "1985-08-31 00:00:00", "1985-09-30 00:00:00", "1985-10-31 00:00:00", "1985-11-30 00:00:00", "1985-12-31 00:00:00", "1986-01-31 00:00:00", "1986-02-28 00:00:00", "1986-03-31 00:00:00", "1986-04-30 00:00:00", "1986-05-31 00:00:00", "1986-06-30 00:00:00", "1986-07-31 00:00:00", "1986-08-31 00:00:00", "1986-09-30 00:00:00", "1986-10-31 00:00:00", "1986-11-30 00:00:00", "1986-12-31 00:00:00"], "data": [[1.0054088367609255, 1.0055548573454922, 1.0053003214755676, 1.0052916791362339], [1.0035785805831465, 1.0036972585041644, 1.0031839822732285, 1.0030435925767673], [1.003336992, 1.0032289064976225, 1.0028012669772004, 1.002780501753832], [1.0037642583120205, 1.0038283052138102, 1.0033975817068705, 1.003630195746385], [1.006841783887468, 1.0068769512177131, 1.0065170220901658, 1.006467398791487], [1.007665282657298, 1.007893280521432, 1.0077587306880595, 1.00774747578924], [1.0063079031230082, 1.0065610160289875, 1.006195488594009, 1.0058005306910138], [1.0034093392630241, 1.0037581470642727, 1.0037440038278311, 1.003656309216086], [1.00024108668143, 1.0008118069732668, 1.0011257500875308, 1.0012497226774824], [1.0034296548448385, 1.0038535380173013, 1.0040104675111452, 1.0041387204435714], [1.0074847226624406, 1.0078124803785402, 1.0077370972423014, 1.0076857373447603], [1.0091807400379507, 1.0090885727210466, 1.008587185468939, 1.008301185358764], [1.0073670774091628, 1.0070986626483833, 1.0061932065100287, 1.0054880583039378], [1.00324031496063, 1.0030699873322833, 1.0023367984554221, 1.0017988297337892], [1.002507880690738, 1.0020096893490378, 1.0007990200950359, 0.9997224998182773], [1.002787758782936, 1.002085370871136, 1.0008238503628508, 0.9995296350721192], [0.9998330346009975, 0.999080822594447, 0.9976258006543549, 0.9964920746080006], [1.0005828931013052, 1.0007779591831343, 1.0002453367317874, 0.9999022306879845], [0.998941232961586, 0.9993244077987227, 0.9994385994919174, 0.9996657988439115], [1.0031817619783616, 1.0039073180778393, 1.0038067626594316, 1.0039714273859792], [1.0052274706609015, 1.006255353034996, 1.0067530064964256, 1.0068927134893462], [1.0062506625577812, 1.006261868365439, 1.005925718406458, 1.0054240331571171], [1.0023050535987752, 1.0022132146377267, 1.000634319375124, 0.9991553907222568], [0.9998251908396948, 0.9982876071940311, 0.9955485934115748, 0.9945409665614603], [0.9996773211567733, 0.9988141484359029, 0.9972154971925132, 0.9970191091362182], [1.0053181265206812, 1.0057934909739394, 1.0054089724071442, 1.0062093377815977], [1.0081916388213852, 1.011098841557878, 1.012482212753598, 1.0136954723203195], [1.011834916413374, 1.0145656631685323, 1.0163294064412038, 1.017968487191554], [1.009615272727273, 1.0124213062002267, 1.0144802243067066, 1.0161248284267972], [1.009945696969697, 1.01208559148103, 1.0145871060027363, 1.0167731498658823], [1.0056864048338368, 1.0078392447608695, 1.0096916707204981, 1.0108453348194644], [1.0083444108761328, 1.0107626527416396, 1.0125107845201, 1.0137151723165512], [1.0015450450450452, 1.0030241372859203, 1.0033243062082142, 1.0026290788855783], [1.0005317365269464, 0.9997706770966123, 0.9972142467702346, 0.9954281189066049], [0.9969522388059702, 0.9961984177118217, 0.9935226591254053, 0.9920125117891531], [0.9998424107142856, 0.9993054427151404, 0.9974421184969888, 0.9973237934711567], [1.0015292284866466, 1.0030530292939421, 1.0018633718326475, 1.0015415760798563], [0.9987496312684366, 1.0002378219734531, 0.9988890445855296, 0.9982418796857847], [0.9991552941176471, 1.0003146202550952, 0.9986230902892084, 0.9980986790109518], [0.9995360703812318, 1.0008122805112054, 1.000741139334195, 1.0012763141769423], [1.0029493421052629, 1.0047388088473934, 1.005601046286819, 1.006887695589757], [1.003569970845481, 1.0054678844448621, 1.0060744142366858, 1.0066688865829239], [1.0036700581395348, 1.003750141985642, 1.0033993124205671, 1.002980348614025], [1.003646086956522, 1.0027407831360167, 1.001442304645666, 1.000044419177642], [1.0012486311239193, 1.001001595844777, 1.0006939395502166, 1.000052123529866], [0.9989306590257879, 0.999462224924019, 1.0000210634709334, 1.0004611736811722], [0.9996621428571429, 1.0017012092550859, 1.0037134295320234, 1.0060214359548476], [1.0022467236467236, 1.0034626926455452, 1.0052934542337706, 1.0073044173753225], [1.0017931303116148, 1.0020101538522825, 1.0025544847567354, 1.0032207156452067], [1.0012817796610172, 1.0009175826817243, 1.0002177724284993, 0.9998316701873677], [0.9987478230337077, 0.9969813542265192, 0.9953571802474896, 0.9940265137654716], [1.002020658263305, 1.000189527724223, 0.9981667380933366, 0.996702483537993], [1.0022995810055868, 1.0011956726883158, 0.9994316597026083, 0.9981118514234305], [1.0008432132963987, 1.000965387810952, 1.0006313843858465, 1.000303038822508], [0.9985673553719009, 0.9990909518265522, 0.9993827613401316, 0.9993890277920783], [0.9985642857142857, 0.9994062188427275, 0.9996539780859448, 0.9994483915530576], [1.0011831967213114, 1.000608240120938, 0.998861841800966, 0.9972998111134251], [1.0014805027173912, 0.9991612808460634, 0.9961824546524509, 0.992907213565102], [1.0013452574525745, 0.9993446339692157, 0.9971027993498238, 0.9948281271272656], [1.0024059299191375, 1.0020566059585756, 1.0009053415868696, 0.9992421508733821], [1.0038605898123325, 1.0047271936816173, 1.004871624979937, 1.0052925886970276], [1.0011708, 1.0012521363323839, 1.0002443887872075, 1.0000293905709188], [1.0015277851458886, 1.0005417951722861, 1.0004389607837882, 1.000733609179224], [1.001391820580475, 1.000359742643955, 1.000356164971526, 0.9998200809349417], [1.0020669291338582, 1.0038581961574309, 1.0052104272001225, 1.0060366863506958], [1.0035779373368148, 1.0065321345692537, 1.0104101496787044, 1.013267645839762], [1.0037839610389612, 1.0063313086018555, 1.0097168755736037, 1.0122982679761674], [1.004640738341969, 1.0054852183109249, 1.006535343885575, 1.006230859757758], [1.003474806701031, 1.0028419720563497, 1.0028725725651393, 1.001781441538434], [1.0058248714652958, 1.0061536641956927, 1.0076175375835328, 1.0081383962419064], [1.006668205128205, 1.008325178754056, 1.0114384050549627, 1.0149525623181028], [1.0063255102040813, 1.008318643127496, 1.0106383951430034, 1.0143530714094893], [1.0032300126903553, 1.0046035560632267, 1.0064492126084075, 1.0095556897475886], [1.0006306818181818, 1.0038514338010982, 1.007513615858714, 1.012107166599896], [0.9999939698492465, 1.0045078059053139, 1.0089720896934278, 1.0136175108187655], [1.002058521303258, 1.0069524022696437, 1.011663996397312, 1.0165034406307867], [1.0055819548872182, 1.0100346233430988, 1.0139960018802527, 1.0178898601895316], [1.007114125, 1.0107866032029351, 1.0141543611793609, 1.017320496123276], [1.0060571072319202, 1.0075283489389733, 1.0084236548754564, 1.00896693405095], [0.9992326302729528, 0.998452861518519, 0.9968207651364035, 0.9935651653890197], [0.9960000000000001, 0.9931446785471904, 0.9896901503803326, 0.984911595332795], [0.9972158866995073, 0.9941653442078736, 0.99065071321128, 0.9869631955677591], [1.0004202702702703, 0.9994149647431614, 0.9990016107840881, 0.9988011989308413], [1.004434742647059, 1.0055245627372007, 1.0071151911757532, 1.0093436513137874], [1.0060660146699267, 1.009354257486241, 1.012811867392276, 1.0163755131521859], [1.0049425609756097, 1.0079579943788157, 1.0115525479797316, 1.0157853222127702], [1.0043401459854013, 1.007271446811409, 1.0099805178861243, 1.012867271876062], [1.0037872572815532, 1.0070414704484962, 1.00964476893242, 1.0113480345998853], [1.0007862318840581, 1.0039946131431638, 1.0056944121204963, 1.0067616428522415], [1.0027059782608696, 1.0043844770701615, 1.0049247281366178, 1.0052836282585986], [1.0011600000000003, 1.0016485052941202, 1.0011563858135377, 1.0007725623181005], [1.003153846153846, 1.0039932107081404, 1.0039926315967982, 1.004683176870571], [1.0020636690647482, 1.0034488415308702, 1.0037006264819317, 1.004222829699132], [1.002031399521531, 1.0038330335648338, 1.0044382669833065, 1.0063661465954592], [1.0019990453460623, 1.003210337539707, 1.0026447251052868, 1.0030609839800029], [1.0001809382422804, 1.0003876454506897, 0.9993352225626458, 0.9994162185712843], [1.0003770142180093, 1.0009468368553025, 1.0000098192268778, 0.9997246804102393], [0.9981390330188679, 0.9987541566954146, 0.9981214241423693, 0.9986237503189758], [1.002128588235294, 1.003280246406213, 1.0035556287785452, 1.004510684844776], [1.0000016393442623, 1.0001135115940971, 0.9996695320510061, 0.9996947053819941], [0.9978297674418605, 0.9971808632631532, 0.9953402503081682, 0.9932408322758631], [0.9916748271889401, 0.9901382783526341, 0.9868735225997469, 0.9840186385584518], [0.9903322082379863, 0.9890943575154101, 0.9871116643198325, 0.9852699086724694], [0.9932118451025057, 0.9922740947409986, 0.9908525527147104, 0.9899752209169901], [0.9968498868778279, 0.9959589500893388, 0.9951200744664103, 0.9949130020475216], [1.0041607466063347, 1.0008764535426944, 0.9977494873302298, 0.9948210383883811], [0.9910668888888888, 0.9864648194821681, 0.9824870729823999, 0.9786570625246848], [0.99545342920354, 0.9926535819477608, 0.9893929487982306, 0.987306627988448], [0.9887084429824562, 0.990768385036064, 0.9929673193571428, 0.9956618351223019], [1.0016421568627452, 1.0031573132403786, 1.004635601114845, 1.0081505759321885], [0.996474514038877, 0.9988839081608285, 0.9998060359449525, 1.0016626390761691], [0.9919461538461539, 0.9907747140010427, 0.990265918366087, 0.9904866610300764], [0.9893973044397463, 0.9913771651080118, 0.9923806144480737, 0.9923577330219134], [0.9866597803347279, 0.9856192316828172, 0.9839643792792508, 0.981648255000802], [0.9918729729729728, 0.989581161500932, 0.9869029455723002, 0.982739068256569], [0.9905748971193415, 0.9868164168327203, 0.9824978476023475, 0.9772512855189207], [0.9949228571428572, 0.9940685742303786, 0.9922554001419904, 0.9889434443062358], [0.9959773326572009, 0.9974203441482189, 0.9980167180383014, 0.9971640796657188], [0.993986873747495, 0.9926190408986989, 0.9915302845181538, 0.9908329535937941], [0.9875049407114624, 0.9877916504179904, 0.9872450367327552, 0.9878788497198138], [0.9849124999999999, 0.986642451610792, 0.9890720772760822, 0.9929389365527063], [0.9906361165048543, 0.9944947045955613, 0.9984991269995733, 1.0025537078505125], [0.9945971098265899, 0.9986820837642745, 1.002913850011662, 1.0069777366918562], [0.9933298279158701, 0.9972741132820125, 1.0010506708838665, 1.0039829411901218], [0.9973719106463877, 1.002121963683759, 1.0065382800344538, 1.0101442753529453], [1.0005248579545456, 1.0041170727223405, 1.0077889431961873, 1.0103650884012239], [1.002235754716981, 1.0040900893273723, 1.0054385189595163, 1.00554703933086], [1.0042043314500941, 1.0052752353375103, 1.0055685584156355, 1.0046852316012012], [1.0004613084112148, 1.0012811213293187, 1.0022157000800445, 1.0024807453374471], [0.9952467592592592, 0.9956673241828241, 0.996572056317819, 0.9982552513147283], [0.9925144372693726, 0.9911303475302247, 0.9893863294243266, 0.9880632605738325], [0.9929345238095237, 0.9914259829372275, 0.989348000572145, 0.9871790748794577], [0.9986803278688525, 1.0006638786991855, 1.002479880774963, 1.0038503893344994], [0.9958882459312841, 1.000412611240472, 1.0047778966921308, 1.009071280869163], [0.9977757194244604, 1.0023599904606701, 1.0065945640378635, 1.010923034427298], [0.9985306451612902, 1.002412562535007, 1.006700380857133, 1.0107462328150327], [1.0028194991055457, 1.0056859091887558, 1.0086348629723783, 1.0113351938202026], [1.0063600000000001, 1.0092821869488537, 1.011693108172007, 1.0140829647626461], [1.0067622994652408, 1.007968160116615, 1.0093530761824885, 1.010384458159075], [1.0032265957446809, 1.0038541513515786, 1.0042096132368392, 1.0044358688159574], [1.0, 1.0009802604953193, 1.0018977199162198, 1.0027723821582686], [0.9961686842105264, 0.9966701171654305, 0.9975947934352009, 0.9985519840742193], [0.997089005235602, 0.9991132274875183, 1.0016165622435091, 1.0043519138002235], [0.9976886718750001, 1.0000732879924954, 1.0031586778494888, 1.006509323030405], [0.9973277202072538, 0.9993837457306222, 1.0023990198243506, 1.0056677837430499], [0.998903700516351, 1.0009723177542074, 1.0038969119071253, 1.0070000120822429], [0.9988273972602739, 1.001546537568328, 1.0048448580402338, 1.0083567929128363], [0.9985037478705281, 0.9997791268935786, 1.0012514636935388, 1.0025741180135495], [0.9913986087689715, 0.9920875298595475, 0.9928059136320354, 0.992978954190421], [0.9905218120805368, 0.9905701878066662, 0.9902440846041161, 0.989273728184124], [0.9896330833333333, 0.9907154628302104, 0.9916612141927083, 0.9919945749871598], [0.9965502906976743, 0.9968229502343693, 0.9974229622829682, 0.9976301707323451], [0.9964528925619835, 0.9966529966278574, 0.9974854221094218, 0.9980968609764037], [0.9980427631578949, 0.9975826805098584, 0.997796340900938, 0.9980828428953492], [0.9974873977086743, 0.9972004936017483, 0.9969977448696202, 0.9971884484506496], [0.9993356443719413, 0.9981611706601096, 0.9975714459220608, 0.9971875669830338], [0.9998194805194803, 0.9982886918084867, 0.9969342772714374, 0.9956120527889154], [0.9990096370967742, 0.9989763366722006, 0.9985657265292537, 0.9982470179491354], [0.9982404895666132, 0.9985114728299817, 0.9983813124478953, 0.9981911070764032], [0.9975859649122808, 0.9979747123936206, 0.9983788692825434, 0.9985826385094745], [0.9991349206349206, 0.9995505021215441, 0.9997404914692328, 0.9997376610218558], [0.9975615536277602, 0.9983948777710254, 0.9987713297866576, 0.9988285108867693], [0.9970183098591551, 0.9983918376487483, 0.9990433417837197, 0.9995048506912534], [0.9924941860465116, 0.9937358481496807, 0.9934463134670732, 0.9931674411995696], [0.9907225384615383, 0.9910442951192341, 0.9905639590347404, 0.9898200678700938], [0.990913396946565, 0.9910326865198841, 0.9905983299134783, 0.9895127227513957], [0.9944402503793626, 0.9954898449194327, 0.996502333619183, 0.9966890682210742], [0.9938890977443611, 0.9932849777994085, 0.9938917682204936, 0.9944993323771134], [0.9932621087928465, 0.9926399747751041, 0.9917065387615359, 0.9922321933001972], [0.9935767407407408, 0.9908700223324343, 0.9880544050071864, 0.986232835692002], [0.9986018041237112, 0.9959007841583679, 0.9939416493663558, 0.9909427460476378], [0.999128795620438, 0.9981075570224827, 0.9973206099927553, 0.9949225392941605], [0.9965028901734104, 0.9972623423368462, 0.998851662929679, 0.9994289976974848], [0.9934381974248927, 0.9933072588801344, 0.9958497681286097, 0.9976634195918549], [0.9929346671388102, 0.9927187317394057, 0.9953577398341067, 0.9978771105653688], [0.9917697478991598, 0.9909796686429265, 0.9932277291941879, 0.9950708915198947], [0.9910890581717453, 0.9915275654622253, 0.9946793230640891, 0.9976784950068501], [0.9899957534246573, 0.9901410298809821, 0.9923944909561804, 0.9945799069700797], [0.9920676390773406, 0.9914082820725618, 0.9930942970435471, 0.9942420919355541], [0.9924103494623657, 0.9890221726688925, 0.9878466041046419, 0.9855410777803358], [0.9931688829787233, 0.9867311975772486, 0.98221053033669, 0.9761086489373897], [0.9928165789473685, 0.9866687926012364, 0.9824754539604134, 0.9777491756619584], [0.9923063719115733, 0.9871199394960706, 0.98548454264376, 0.9844318177375503], [0.9923025641025642, 0.9905919840677124, 0.9924786002014101, 0.994871559552069], [0.9903810126582278, 0.9871443226839391, 0.985967480854475, 0.9825478821042387], [0.9889474406991261, 0.9796578939139179, 0.9727074345460492, 0.9635887469384349], [0.9930778739184177, 0.9886071147796389, 0.987002548834072, 0.9838415195139324], [0.9980397796817625, 1.0075160751692596, 1.019945790110171, 1.0317538702607065], [1.0078036363636362, 1.0257429032755143, 1.0450855150638074, 1.062886088131486], [1.0117397094430993, 1.0219021344603574, 1.0339068877738051, 1.0442465016531157], [1.0030344350961538, 1.0015803351013126, 1.0012907716000632, 1.0004570652003566], [1.0006935339690106, 0.9936913837345348, 0.9880999826624175, 0.9821897075391509], [0.9948570247933884, 0.9858679733895619, 0.9793653979754936, 0.9716740643505657], [0.994147663551402, 0.983920390538685, 0.9775963579855108, 0.9708287796908518], [0.995996903935185, 0.9842697444249228, 0.9797851252441641, 0.9749990829940409], [0.9995474197247706, 0.9906155180318489, 0.9895245689684726, 0.9882863844116359], [1.0061161363636362, 1.0013610392115178, 1.0031178993862944, 1.002748228224037], [1.0129327313769751, 1.012726623768393, 1.0179225992770289, 1.0189408268673636], [1.0154249158249158, 1.0129076465705344, 1.0138428610826518, 1.0111722282836237], [1.0173221850613154, 1.0088556859802513, 1.0091865924746548, 1.0052760826622056], [1.0117043093922653, 1.0047122983428078, 1.005528165079539, 1.004207865880762], [1.0070977868852458, 1.0018137678053252, 1.0013751666139912, 0.9990930254028657], [1.012530097613883, 1.0081702902702585, 1.0094381902540714, 1.0085577730476496], [1.00786962943072, 1.003670687621945, 1.0027951973936597, 0.9987683097306606], [1.0162720824411133, 1.0174185201382868, 1.0210071580550553, 1.0217751499846084], [1.021056023454158, 1.031221110184711, 1.0428586564843285, 1.0536111993546506], [1.0257324654622741, 1.0351030632044167, 1.0457829850755866, 1.0562860837858232], [1.0228981991525423, 1.0262683497968303, 1.0315715096874485, 1.0360905711025072], [1.0173882787750792, 1.0123433736102838, 1.0098439274923305, 1.0064558048038197], [1.0206173442449842, 1.0186093445998385, 1.0193849603552358, 1.0192238254847397], [1.0241903157894738, 1.0248888322089449, 1.028997823506134, 1.0323100524558095], [1.02076527632951, 1.023742826661649, 1.0298191391941391, 1.0340616503095839], [1.0072370103092783, 1.007224467933813, 1.0099791100189515, 1.0109901537731494], [1.0052948717948718, 1.0081127494522413, 1.012090781913323, 1.0149646855916274], [1.0112443961105424, 1.0191407360983145, 1.0248136869750009, 1.0301932111169256], [1.0237868474923235, 1.0353797270542742, 1.043610311364957, 1.0527057131489934], [1.022085244648318, 1.0330092362675785, 1.0442245381534774, 1.0557546109927727], [1.0185723469387757, 1.0254981263218648, 1.0335093072940034, 1.0424763660302832], [1.0198, 1.0264744815414033, 1.033651998462721, 1.041811068948908], [1.0213572778345248, 1.023466463161291, 1.0274758899521608, 1.0314207490472402], [1.020175, 1.0209992404008723, 1.0235076597992605, 1.025638616477773], [1.0156915902140673, 1.0153605163965722, 1.017108047087688, 1.0181845044462892], [1.0103616902834007, 1.0094603307597394, 1.0104540005753146, 1.0108052832335128], [1.0079329637096774, 1.0079183160277196, 1.009929643661636, 1.0111108753508617], [1.0075235160965794, 1.0061148219584881, 1.0066112963866782, 1.006148995886145], [1.0102992985971944, 1.0081784768324173, 1.0074841453910401, 1.0055035395238858], [1.0112999000999001, 1.0081980363897596, 1.0067603152420108, 1.0037750929719491], [1.0117959661354583, 1.0112930186345086, 1.012583779155432, 1.0123694839840343], [1.0125541666666666, 1.0140172667213583, 1.0174116810286185, 1.0197845222146558], [1.0132278437190898, 1.014960834480563, 1.0190163975714797, 1.0220448139886078], [1.0124161735700197, 1.0126522089283045, 1.0152855952978048, 1.0171930380253251], [1.0085923604309501, 1.008417940778501, 1.0107433856010994, 1.012331367293856], [1.006959941520468, 1.0065045238762, 1.0088039750314373, 1.0104954416293592], [1.007594752186589, 1.0066461937662066, 1.0079832153627044, 1.0084684097281085], [1.0103748789932236, 1.0084796010262238, 1.008837289112361, 1.0078479284311286], [1.0138317391304348, 1.0119368688395718, 1.0106534246547023, 1.0078010918897584], [1.015901832208293, 1.0151636620958373, 1.0143338694332222, 1.0116675771054127], [1.0163539145052833, 1.0154461949376263, 1.01604824921885, 1.0146740511057255], [1.0157424568965518, 1.0158938911911743, 1.018998692631748, 1.0210878833433195], [1.0148882282712512, 1.0161532191963465, 1.020907781823997, 1.0253144410920307], [1.015544529019981, 1.0178013143978377, 1.0237224579739612, 1.0294586661162306], [1.0174042735042736, 1.0220499475290106, 1.0294972282903896, 1.0362478683407776], [1.0181454739336493, 1.023741699540979, 1.0317625767172698, 1.0387429685597096], [1.0185353358561968, 1.023536810660323, 1.0305470512517283, 1.036547199940167], [1.0119151693320791, 1.0132787312806317, 1.0171293652717242, 1.0202277611480217], [1.0077324438202249, 1.007268954050265, 1.008196637729645, 1.009008306814128], [1.0070147663551403, 1.0073435332578096, 1.00936466730489, 1.0110944168609708], [1.012105900186567, 1.0142360709215288, 1.0183942735156388, 1.021983929444522], [1.0146496744186047, 1.0199765127422658, 1.0269740490315973, 1.033725668563008], [1.013246285979573, 1.0163928548261234, 1.0214426648096564, 1.0263436491021904], [1.0120911955514362, 1.0134247336571394, 1.0163352172399358, 1.0188402643295786], [1.0117281452358928, 1.0117443590878012, 1.0131491893179683, 1.0137033651675789], [1.0101962211981568, 1.010277326183889, 1.011609822582979, 1.0124083959623578], [1.007578119266055, 1.0078959818117756, 1.0100956044734175, 1.011988430172087], [1.0047376712328766, 1.005256559089854, 1.0080178936179311, 1.0107483922174516], [1.0049331210191084, 1.0056690667609243, 1.0079680864539917, 1.009947803834596], [1.0116034639927074, 1.012023832232563, 1.0141284536196729, 1.015841674481994], [1.0214814390467462, 1.0227245673250174, 1.0257128156733257, 1.0282089163042312], [1.0289096826126956, 1.0316059445490968, 1.0359991069107235, 1.0403485517773996], [1.024185366972477, 1.0264191270229397, 1.0300950713045907, 1.0332592987119726], [1.0136127970749542, 1.0142711281216517, 1.0162582206864168, 1.0177496933103523], [1.0077333789954337, 1.0081777724370495, 1.0097163608552515, 1.0106515492952672], [1.0098163777372262, 1.011324551218096, 1.0142660691362144, 1.0168846137293177], [1.009985772727273, 1.0125361709192315, 1.0158554131527104, 1.0189866215190206], [1.008130331215971, 1.0096863583920355, 1.012224955980755, 1.0141903493875608], [1.0064784420289854, 1.0068361033495832, 1.0082193336903427, 1.009199681435148], [1.0057107400722023, 1.0054364832187785, 1.006255120759923, 1.0069409315036624], [1.0020385098743267, 1.0016473411145672, 1.0026864002891547, 1.0035859695243625], [1.000685152057245, 1.0002109605913883, 1.001010486769567, 1.0013791090220565], [1.0011747771836008, 1.0009379175575697, 1.0017635441159132, 1.0017988856708715]]}} \ No newline at end of file diff --git a/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus.png b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus.png new file mode 100644 index 00000000..11e680a7 Binary files /dev/null and b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus.png differ diff --git a/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus2.png b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus2.png new file mode 100644 index 00000000..ca18362c Binary files /dev/null and b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus2.png differ diff --git a/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fig2_tom.png b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fig2_tom.png new file mode 100644 index 00000000..9c8953a1 Binary files /dev/null and b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fig2_tom.png differ diff --git a/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fred_data.csv b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fred_data.csv new file mode 100644 index 00000000..bf41c8d4 --- /dev/null +++ b/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fred_data.csv @@ -0,0 +1,862 @@ +DATE,GS1,GS5,GS10,DFII5,DFII10,USREC +1953-04-01,2.36,2.62,2.83,,,0 +1953-05-01,2.48,2.87,3.05,,,0 +1953-06-01,2.45,2.94,3.11,,,0 +1953-07-01,2.38,2.75,2.93,,,0 +1953-08-01,2.28,2.8,2.95,,,1 +1953-09-01,2.2,2.71,2.87,,,1 +1953-10-01,1.79,2.43,2.66,,,1 +1953-11-01,1.67,2.42,2.68,,,1 +1953-12-01,1.66,2.32,2.59,,,1 +1954-01-01,1.41,2.17,2.48,,,1 +1954-02-01,1.14,2.04,2.47,,,1 +1954-03-01,1.13,1.93,2.37,,,1 +1954-04-01,0.96,1.87,2.29,,,1 +1954-05-01,0.85,1.92,2.37,,,1 +1954-06-01,0.82,1.92,2.38,,,0 +1954-07-01,0.84,1.85,2.3,,,0 +1954-08-01,0.88,1.9,2.36,,,0 +1954-09-01,1.03,1.96,2.38,,,0 +1954-10-01,1.17,2.02,2.43,,,0 +1954-11-01,1.14,2.09,2.48,,,0 +1954-12-01,1.21,2.16,2.51,,,0 +1955-01-01,1.39,2.32,2.61,,,0 +1955-02-01,1.57,2.38,2.65,,,0 +1955-03-01,1.59,2.48,2.68,,,0 +1955-04-01,1.75,2.55,2.75,,,0 +1955-05-01,1.9,2.56,2.76,,,0 +1955-06-01,1.91,2.59,2.78,,,0 +1955-07-01,2.02,2.72,2.9,,,0 +1955-08-01,2.37,2.86,2.97,,,0 +1955-09-01,2.36,2.85,2.97,,,0 +1955-10-01,2.39,2.76,2.88,,,0 +1955-11-01,2.48,2.81,2.89,,,0 +1955-12-01,2.73,2.93,2.96,,,0 +1956-01-01,2.58,2.84,2.9,,,0 +1956-02-01,2.49,2.74,2.84,,,0 +1956-03-01,2.61,2.93,2.96,,,0 +1956-04-01,2.92,3.2,3.18,,,0 +1956-05-01,2.94,3.08,3.07,,,0 +1956-06-01,2.74,2.97,3.0,,,0 +1956-07-01,2.76,3.12,3.11,,,0 +1956-08-01,3.1,3.41,3.33,,,0 +1956-09-01,3.35,3.47,3.38,,,0 +1956-10-01,3.28,3.4,3.34,,,0 +1956-11-01,3.44,3.56,3.49,,,0 +1956-12-01,3.68,3.7,3.59,,,0 +1957-01-01,3.37,3.47,3.46,,,0 +1957-02-01,3.38,3.39,3.34,,,0 +1957-03-01,3.42,3.46,3.41,,,0 +1957-04-01,3.49,3.53,3.48,,,0 +1957-05-01,3.48,3.64,3.6,,,0 +1957-06-01,3.65,3.83,3.8,,,0 +1957-07-01,3.81,4.0,3.93,,,0 +1957-08-01,4.01,4.0,3.93,,,0 +1957-09-01,4.07,4.03,3.92,,,1 +1957-10-01,4.01,4.08,3.97,,,1 +1957-11-01,3.57,3.72,3.72,,,1 +1957-12-01,3.18,3.08,3.21,,,1 +1958-01-01,2.65,2.88,3.09,,,1 +1958-02-01,1.99,2.78,3.05,,,1 +1958-03-01,1.84,2.64,2.98,,,1 +1958-04-01,1.45,2.46,2.88,,,1 +1958-05-01,1.37,2.41,2.92,,,0 +1958-06-01,1.23,2.46,2.97,,,0 +1958-07-01,1.61,2.77,3.2,,,0 +1958-08-01,2.5,3.29,3.54,,,0 +1958-09-01,3.05,3.69,3.76,,,0 +1958-10-01,3.19,3.78,3.8,,,0 +1958-11-01,3.1,3.7,3.74,,,0 +1958-12-01,3.29,3.82,3.86,,,0 +1959-01-01,3.36,4.01,4.02,,,0 +1959-02-01,3.54,3.96,3.96,,,0 +1959-03-01,3.61,3.99,3.99,,,0 +1959-04-01,3.72,4.12,4.12,,,0 +1959-05-01,3.96,4.35,4.31,,,0 +1959-06-01,4.07,4.5,4.34,,,0 +1959-07-01,4.39,4.58,4.4,,,0 +1959-08-01,4.42,4.57,4.43,,,0 +1959-09-01,5.0,4.9,4.68,,,0 +1959-10-01,4.8,4.72,4.53,,,0 +1959-11-01,4.81,4.75,4.53,,,0 +1959-12-01,5.14,5.01,4.69,,,0 +1960-01-01,5.03,4.92,4.72,,,0 +1960-02-01,4.66,4.69,4.49,,,0 +1960-03-01,4.02,4.31,4.25,,,0 +1960-04-01,4.04,4.29,4.28,,,0 +1960-05-01,4.21,4.49,4.35,,,1 +1960-06-01,3.36,4.12,4.15,,,1 +1960-07-01,3.2,3.79,3.9,,,1 +1960-08-01,2.95,3.62,3.8,,,1 +1960-09-01,3.07,3.61,3.8,,,1 +1960-10-01,3.04,3.76,3.89,,,1 +1960-11-01,3.08,3.81,3.93,,,1 +1960-12-01,2.86,3.67,3.84,,,1 +1961-01-01,2.81,3.67,3.84,,,1 +1961-02-01,2.93,3.66,3.78,,,1 +1961-03-01,2.88,3.6,3.74,,,0 +1961-04-01,2.88,3.57,3.78,,,0 +1961-05-01,2.87,3.47,3.71,,,0 +1961-06-01,3.06,3.81,3.88,,,0 +1961-07-01,2.92,3.84,3.92,,,0 +1961-08-01,3.06,3.96,4.04,,,0 +1961-09-01,3.06,3.9,3.98,,,0 +1961-10-01,3.05,3.8,3.92,,,0 +1961-11-01,3.07,3.82,3.94,,,0 +1961-12-01,3.18,3.91,4.06,,,0 +1962-01-01,3.28,3.94,4.08,,,0 +1962-02-01,3.28,3.89,4.04,,,0 +1962-03-01,3.06,3.68,3.93,,,0 +1962-04-01,2.99,3.6,3.84,,,0 +1962-05-01,3.03,3.66,3.87,,,0 +1962-06-01,3.03,3.64,3.91,,,0 +1962-07-01,3.29,3.8,4.01,,,0 +1962-08-01,3.2,3.71,3.98,,,0 +1962-09-01,3.06,3.7,3.98,,,0 +1962-10-01,2.98,3.64,3.93,,,0 +1962-11-01,3.0,3.6,3.92,,,0 +1962-12-01,3.01,3.56,3.86,,,0 +1963-01-01,3.04,3.58,3.83,,,0 +1963-02-01,3.01,3.66,3.92,,,0 +1963-03-01,3.03,3.68,3.93,,,0 +1963-04-01,3.11,3.74,3.97,,,0 +1963-05-01,3.12,3.72,3.93,,,0 +1963-06-01,3.2,3.81,3.99,,,0 +1963-07-01,3.48,3.89,4.02,,,0 +1963-08-01,3.53,3.89,4.0,,,0 +1963-09-01,3.57,3.96,4.08,,,0 +1963-10-01,3.64,3.97,4.11,,,0 +1963-11-01,3.74,4.01,4.12,,,0 +1963-12-01,3.81,4.04,4.13,,,0 +1964-01-01,3.79,4.07,4.17,,,0 +1964-02-01,3.78,4.03,4.15,,,0 +1964-03-01,3.91,4.14,4.22,,,0 +1964-04-01,3.91,4.15,4.23,,,0 +1964-05-01,3.84,4.05,4.2,,,0 +1964-06-01,3.83,4.02,4.17,,,0 +1964-07-01,3.72,4.03,4.19,,,0 +1964-08-01,3.74,4.05,4.19,,,0 +1964-09-01,3.84,4.08,4.2,,,0 +1964-10-01,3.86,4.07,4.19,,,0 +1964-11-01,3.91,4.04,4.15,,,0 +1964-12-01,4.02,4.09,4.18,,,0 +1965-01-01,3.94,4.1,4.19,,,0 +1965-02-01,4.03,4.15,4.21,,,0 +1965-03-01,4.06,4.15,4.21,,,0 +1965-04-01,4.04,4.15,4.2,,,0 +1965-05-01,4.03,4.15,4.21,,,0 +1965-06-01,3.99,4.15,4.21,,,0 +1965-07-01,3.98,4.15,4.2,,,0 +1965-08-01,4.07,4.2,4.25,,,0 +1965-09-01,4.2,4.25,4.29,,,0 +1965-10-01,4.3,4.34,4.35,,,0 +1965-11-01,4.37,4.46,4.45,,,0 +1965-12-01,4.72,4.72,4.62,,,0 +1966-01-01,4.88,4.86,4.61,,,0 +1966-02-01,4.94,4.98,4.83,,,0 +1966-03-01,4.97,4.92,4.87,,,0 +1966-04-01,4.9,4.83,4.75,,,0 +1966-05-01,4.93,4.89,4.78,,,0 +1966-06-01,4.97,4.97,4.81,,,0 +1966-07-01,5.17,5.17,5.02,,,0 +1966-08-01,5.54,5.5,5.22,,,0 +1966-09-01,5.82,5.5,5.18,,,0 +1966-10-01,5.58,5.27,5.01,,,0 +1966-11-01,5.54,5.36,5.16,,,0 +1966-12-01,5.2,5.0,4.84,,,0 +1967-01-01,4.75,4.7,4.58,,,0 +1967-02-01,4.71,4.74,4.63,,,0 +1967-03-01,4.35,4.54,4.54,,,0 +1967-04-01,4.11,4.51,4.59,,,0 +1967-05-01,4.15,4.75,4.85,,,0 +1967-06-01,4.48,5.01,5.02,,,0 +1967-07-01,5.01,5.23,5.16,,,0 +1967-08-01,5.13,5.31,5.28,,,0 +1967-09-01,5.24,5.4,5.3,,,0 +1967-10-01,5.37,5.57,5.48,,,0 +1967-11-01,5.61,5.78,5.75,,,0 +1967-12-01,5.71,5.75,5.7,,,0 +1968-01-01,5.43,5.54,5.53,,,0 +1968-02-01,5.41,5.59,5.56,,,0 +1968-03-01,5.58,5.76,5.74,,,0 +1968-04-01,5.71,5.69,5.64,,,0 +1968-05-01,6.14,6.04,5.87,,,0 +1968-06-01,5.98,5.85,5.72,,,0 +1968-07-01,5.65,5.6,5.5,,,0 +1968-08-01,5.43,5.5,5.42,,,0 +1968-09-01,5.45,5.48,5.46,,,0 +1968-10-01,5.57,5.55,5.58,,,0 +1968-11-01,5.75,5.66,5.7,,,0 +1968-12-01,6.19,6.12,6.03,,,0 +1969-01-01,6.34,6.25,6.04,,,0 +1969-02-01,6.41,6.34,6.19,,,0 +1969-03-01,6.34,6.41,6.3,,,0 +1969-04-01,6.26,6.3,6.17,,,0 +1969-05-01,6.42,6.54,6.32,,,0 +1969-06-01,7.04,6.75,6.57,,,0 +1969-07-01,7.6,7.01,6.72,,,0 +1969-08-01,7.54,7.03,6.69,,,0 +1969-09-01,7.82,7.57,7.16,,,0 +1969-10-01,7.64,7.51,7.1,,,0 +1969-11-01,7.89,7.53,7.14,,,0 +1969-12-01,8.17,7.96,7.65,,,0 +1970-01-01,8.1,8.17,7.79,,,1 +1970-02-01,7.59,7.82,7.24,,,1 +1970-03-01,6.97,7.21,7.07,,,1 +1970-04-01,7.06,7.5,7.39,,,1 +1970-05-01,7.75,7.97,7.91,,,1 +1970-06-01,7.55,7.85,7.84,,,1 +1970-07-01,7.1,7.59,7.46,,,1 +1970-08-01,6.98,7.57,7.53,,,1 +1970-09-01,6.73,7.29,7.39,,,1 +1970-10-01,6.43,7.12,7.33,,,1 +1970-11-01,5.51,6.47,6.84,,,1 +1970-12-01,5.0,5.95,6.39,,,0 +1971-01-01,4.57,5.89,6.24,,,0 +1971-02-01,3.89,5.56,6.11,,,0 +1971-03-01,3.69,5.0,5.7,,,0 +1971-04-01,4.3,5.65,5.83,,,0 +1971-05-01,5.04,6.28,6.39,,,0 +1971-06-01,5.64,6.53,6.52,,,0 +1971-07-01,6.04,6.85,6.73,,,0 +1971-08-01,5.8,6.55,6.58,,,0 +1971-09-01,5.41,6.14,6.14,,,0 +1971-10-01,4.91,5.93,5.93,,,0 +1971-11-01,4.67,5.78,5.81,,,0 +1971-12-01,4.6,5.69,5.93,,,0 +1972-01-01,4.28,5.59,5.95,,,0 +1972-02-01,4.27,5.69,6.08,,,0 +1972-03-01,4.67,5.87,6.07,,,0 +1972-04-01,4.96,6.17,6.19,,,0 +1972-05-01,4.64,5.85,6.13,,,0 +1972-06-01,4.93,5.91,6.11,,,0 +1972-07-01,4.96,5.97,6.11,,,0 +1972-08-01,4.98,6.02,6.21,,,0 +1972-09-01,5.52,6.25,6.55,,,0 +1972-10-01,5.52,6.18,6.48,,,0 +1972-11-01,5.27,6.12,6.28,,,0 +1972-12-01,5.52,6.16,6.36,,,0 +1973-01-01,5.89,6.34,6.46,,,0 +1973-02-01,6.19,6.6,6.64,,,0 +1973-03-01,6.85,6.8,6.71,,,0 +1973-04-01,6.85,6.67,6.67,,,0 +1973-05-01,6.89,6.8,6.85,,,0 +1973-06-01,7.31,6.69,6.9,,,0 +1973-07-01,8.39,7.33,7.13,,,0 +1973-08-01,8.82,7.63,7.4,,,0 +1973-09-01,8.31,7.05,7.09,,,0 +1973-10-01,7.4,6.77,6.79,,,0 +1973-11-01,7.57,6.92,6.73,,,0 +1973-12-01,7.27,6.8,6.74,,,1 +1974-01-01,7.42,6.95,6.99,,,1 +1974-02-01,6.88,6.82,6.96,,,1 +1974-03-01,7.76,7.31,7.21,,,1 +1974-04-01,8.62,7.92,7.51,,,1 +1974-05-01,8.78,8.18,7.58,,,1 +1974-06-01,8.67,8.1,7.54,,,1 +1974-07-01,8.8,8.38,7.81,,,1 +1974-08-01,9.36,8.63,8.04,,,1 +1974-09-01,8.87,8.37,8.04,,,1 +1974-10-01,8.05,7.97,7.9,,,1 +1974-11-01,7.66,7.68,7.68,,,1 +1974-12-01,7.31,7.31,7.43,,,1 +1975-01-01,6.83,7.41,7.5,,,1 +1975-02-01,5.98,7.11,7.39,,,1 +1975-03-01,6.11,7.3,7.73,,,1 +1975-04-01,6.9,7.99,8.23,,,0 +1975-05-01,6.39,7.72,8.06,,,0 +1975-06-01,6.29,7.51,7.86,,,0 +1975-07-01,7.11,7.92,8.06,,,0 +1975-08-01,7.7,8.33,8.4,,,0 +1975-09-01,7.75,8.37,8.43,,,0 +1975-10-01,6.95,7.97,8.14,,,0 +1975-11-01,6.49,7.8,8.05,,,0 +1975-12-01,6.6,7.76,8.0,,,0 +1976-01-01,5.81,7.46,7.74,,,0 +1976-02-01,5.91,7.45,7.79,,,0 +1976-03-01,6.21,7.49,7.73,,,0 +1976-04-01,5.92,7.25,7.56,,,0 +1976-05-01,6.4,7.59,7.9,,,0 +1976-06-01,6.52,7.61,7.86,,,0 +1976-07-01,6.2,7.49,7.83,,,0 +1976-08-01,6.0,7.31,7.77,,,0 +1976-09-01,5.84,7.13,7.59,,,0 +1976-10-01,5.5,6.75,7.41,,,0 +1976-11-01,5.29,6.52,7.29,,,0 +1976-12-01,4.89,6.1,6.87,,,0 +1977-01-01,5.29,6.58,7.21,,,0 +1977-02-01,5.47,6.83,7.39,,,0 +1977-03-01,5.5,6.93,7.46,,,0 +1977-04-01,5.44,6.79,7.37,,,0 +1977-05-01,5.84,6.94,7.46,,,0 +1977-06-01,5.8,6.76,7.28,,,0 +1977-07-01,5.94,6.84,7.33,,,0 +1977-08-01,6.37,7.03,7.4,,,0 +1977-09-01,6.53,7.04,7.34,,,0 +1977-10-01,6.97,7.32,7.52,,,0 +1977-11-01,6.95,7.34,7.58,,,0 +1977-12-01,6.96,7.48,7.69,,,0 +1978-01-01,7.28,7.77,7.96,,,0 +1978-02-01,7.34,7.83,8.03,,,0 +1978-03-01,7.31,7.86,8.04,,,0 +1978-04-01,7.45,7.98,8.15,,,0 +1978-05-01,7.82,8.18,8.35,,,0 +1978-06-01,8.09,8.36,8.46,,,0 +1978-07-01,8.39,8.54,8.64,,,0 +1978-08-01,8.31,8.33,8.41,,,0 +1978-09-01,8.64,8.43,8.42,,,0 +1978-10-01,9.14,8.61,8.64,,,0 +1978-11-01,10.01,8.84,8.81,,,0 +1978-12-01,10.3,9.08,9.01,,,0 +1979-01-01,10.41,9.2,9.1,,,0 +1979-02-01,10.24,9.13,9.1,,,0 +1979-03-01,10.25,9.2,9.12,,,0 +1979-04-01,10.12,9.25,9.18,,,0 +1979-05-01,10.12,9.24,9.25,,,0 +1979-06-01,9.57,8.85,8.91,,,0 +1979-07-01,9.64,8.9,8.95,,,0 +1979-08-01,9.98,9.06,9.03,,,0 +1979-09-01,10.84,9.41,9.33,,,0 +1979-10-01,12.44,10.63,10.3,,,0 +1979-11-01,12.39,10.93,10.65,,,0 +1979-12-01,11.98,10.42,10.39,,,0 +1980-01-01,12.06,10.74,10.8,,,0 +1980-02-01,13.92,12.6,12.41,,,1 +1980-03-01,15.82,13.47,12.75,,,1 +1980-04-01,13.3,11.84,11.47,,,1 +1980-05-01,9.39,9.95,10.18,,,1 +1980-06-01,8.16,9.21,9.78,,,1 +1980-07-01,8.65,9.53,10.25,,,1 +1980-08-01,10.24,10.84,11.1,,,0 +1980-09-01,11.52,11.62,11.51,,,0 +1980-10-01,12.49,11.86,11.75,,,0 +1980-11-01,14.15,12.83,12.68,,,0 +1980-12-01,14.88,13.25,12.84,,,0 +1981-01-01,14.08,12.77,12.57,,,0 +1981-02-01,14.57,13.41,13.19,,,0 +1981-03-01,13.71,13.41,13.12,,,0 +1981-04-01,14.32,13.99,13.68,,,0 +1981-05-01,16.2,14.63,14.1,,,0 +1981-06-01,14.86,13.95,13.47,,,0 +1981-07-01,15.72,14.79,14.28,,,0 +1981-08-01,16.72,15.56,14.94,,,1 +1981-09-01,16.52,15.93,15.32,,,1 +1981-10-01,15.38,15.41,15.15,,,1 +1981-11-01,12.41,13.38,13.39,,,1 +1981-12-01,12.85,13.6,13.72,,,1 +1982-01-01,14.32,14.65,14.59,,,1 +1982-02-01,14.73,14.54,14.43,,,1 +1982-03-01,13.95,13.98,13.86,,,1 +1982-04-01,13.98,14.0,13.87,,,1 +1982-05-01,13.34,13.75,13.62,,,1 +1982-06-01,14.07,14.43,14.3,,,1 +1982-07-01,13.24,14.07,13.95,,,1 +1982-08-01,11.43,13.0,13.06,,,1 +1982-09-01,10.85,12.25,12.34,,,1 +1982-10-01,9.32,10.8,10.91,,,1 +1982-11-01,9.16,10.38,10.55,,,1 +1982-12-01,8.91,10.22,10.54,,,0 +1983-01-01,8.62,10.03,10.46,,,0 +1983-02-01,8.92,10.26,10.72,,,0 +1983-03-01,9.04,10.08,10.51,,,0 +1983-04-01,8.98,10.02,10.4,,,0 +1983-05-01,8.9,10.03,10.38,,,0 +1983-06-01,9.66,10.63,10.85,,,0 +1983-07-01,10.2,11.21,11.38,,,0 +1983-08-01,10.53,11.63,11.85,,,0 +1983-09-01,10.16,11.43,11.65,,,0 +1983-10-01,9.81,11.28,11.54,,,0 +1983-11-01,9.94,11.41,11.69,,,0 +1983-12-01,10.11,11.54,11.83,,,0 +1984-01-01,9.9,11.37,11.67,,,0 +1984-02-01,10.04,11.54,11.84,,,0 +1984-03-01,10.59,12.02,12.32,,,0 +1984-04-01,10.9,12.37,12.63,,,0 +1984-05-01,11.66,13.17,13.41,,,0 +1984-06-01,12.08,13.48,13.56,,,0 +1984-07-01,12.03,13.27,13.36,,,0 +1984-08-01,11.82,12.68,12.72,,,0 +1984-09-01,11.58,12.53,12.52,,,0 +1984-10-01,10.9,12.06,12.16,,,0 +1984-11-01,9.82,11.33,11.57,,,0 +1984-12-01,9.33,11.07,11.5,,,0 +1985-01-01,9.02,10.93,11.38,,,0 +1985-02-01,9.29,11.13,11.51,,,0 +1985-03-01,9.86,11.52,11.86,,,0 +1985-04-01,9.14,11.01,11.43,,,0 +1985-05-01,8.46,10.34,10.85,,,0 +1985-06-01,7.8,9.6,10.16,,,0 +1985-07-01,7.86,9.7,10.31,,,0 +1985-08-01,8.05,9.81,10.33,,,0 +1985-09-01,8.07,9.81,10.37,,,0 +1985-10-01,8.01,9.69,10.24,,,0 +1985-11-01,7.88,9.28,9.78,,,0 +1985-12-01,7.67,8.73,9.26,,,0 +1986-01-01,7.73,8.68,9.19,,,0 +1986-02-01,7.61,8.34,8.7,,,0 +1986-03-01,7.03,7.46,7.78,,,0 +1986-04-01,6.44,7.05,7.3,,,0 +1986-05-01,6.65,7.52,7.71,,,0 +1986-06-01,6.73,7.64,7.8,,,0 +1986-07-01,6.27,7.06,7.3,,,0 +1986-08-01,5.93,6.8,7.17,,,0 +1986-09-01,5.77,6.92,7.45,,,0 +1986-10-01,5.72,6.83,7.43,,,0 +1986-11-01,5.8,6.76,7.25,,,0 +1986-12-01,5.87,6.67,7.11,,,0 +1987-01-01,5.78,6.64,7.08,,,0 +1987-02-01,5.96,6.79,7.25,,,0 +1987-03-01,6.03,6.79,7.25,,,0 +1987-04-01,6.5,7.57,8.02,,,0 +1987-05-01,7.0,8.26,8.61,,,0 +1987-06-01,6.8,8.02,8.4,,,0 +1987-07-01,6.68,8.01,8.45,,,0 +1987-08-01,7.03,8.32,8.76,,,0 +1987-09-01,7.67,8.94,9.42,,,0 +1987-10-01,7.59,9.08,9.52,,,0 +1987-11-01,6.96,8.35,8.86,,,0 +1987-12-01,7.17,8.45,8.99,,,0 +1988-01-01,6.99,8.18,8.67,,,0 +1988-02-01,6.64,7.71,8.21,,,0 +1988-03-01,6.71,7.83,8.37,,,0 +1988-04-01,7.01,8.19,8.72,,,0 +1988-05-01,7.4,8.58,9.09,,,0 +1988-06-01,7.49,8.49,8.92,,,0 +1988-07-01,7.75,8.66,9.06,,,0 +1988-08-01,8.17,8.94,9.26,,,0 +1988-09-01,8.09,8.69,8.98,,,0 +1988-10-01,8.11,8.51,8.8,,,0 +1988-11-01,8.48,8.79,8.96,,,0 +1988-12-01,8.99,9.09,9.11,,,0 +1989-01-01,9.05,9.15,9.09,,,0 +1989-02-01,9.25,9.27,9.17,,,0 +1989-03-01,9.57,9.51,9.36,,,0 +1989-04-01,9.36,9.3,9.18,,,0 +1989-05-01,8.98,8.91,8.86,,,0 +1989-06-01,8.44,8.29,8.28,,,0 +1989-07-01,7.89,7.83,8.02,,,0 +1989-08-01,8.18,8.09,8.11,,,0 +1989-09-01,8.22,8.17,8.19,,,0 +1989-10-01,7.99,7.97,8.01,,,0 +1989-11-01,7.77,7.81,7.87,,,0 +1989-12-01,7.72,7.75,7.84,,,0 +1990-01-01,7.92,8.12,8.21,,,0 +1990-02-01,8.11,8.42,8.47,,,0 +1990-03-01,8.35,8.6,8.59,,,0 +1990-04-01,8.4,8.77,8.79,,,0 +1990-05-01,8.32,8.74,8.76,,,0 +1990-06-01,8.1,8.43,8.48,,,0 +1990-07-01,7.94,8.33,8.47,,,0 +1990-08-01,7.78,8.44,8.75,,,1 +1990-09-01,7.76,8.51,8.89,,,1 +1990-10-01,7.55,8.33,8.72,,,1 +1990-11-01,7.31,8.02,8.39,,,1 +1990-12-01,7.05,7.73,8.08,,,1 +1991-01-01,6.64,7.7,8.09,,,1 +1991-02-01,6.27,7.47,7.85,,,1 +1991-03-01,6.4,7.77,8.11,,,1 +1991-04-01,6.24,7.7,8.04,,,0 +1991-05-01,6.13,7.7,8.07,,,0 +1991-06-01,6.36,7.94,8.28,,,0 +1991-07-01,6.31,7.91,8.27,,,0 +1991-08-01,5.78,7.43,7.9,,,0 +1991-09-01,5.57,7.14,7.65,,,0 +1991-10-01,5.33,6.87,7.53,,,0 +1991-11-01,4.89,6.62,7.42,,,0 +1991-12-01,4.38,6.19,7.09,,,0 +1992-01-01,4.15,6.24,7.03,,,0 +1992-02-01,4.29,6.58,7.34,,,0 +1992-03-01,4.63,6.95,7.54,,,0 +1992-04-01,4.3,6.78,7.48,,,0 +1992-05-01,4.19,6.69,7.39,,,0 +1992-06-01,4.17,6.48,7.26,,,0 +1992-07-01,3.6,5.84,6.84,,,0 +1992-08-01,3.47,5.6,6.59,,,0 +1992-09-01,3.18,5.38,6.42,,,0 +1992-10-01,3.3,5.6,6.59,,,0 +1992-11-01,3.68,6.04,6.87,,,0 +1992-12-01,3.71,6.08,6.77,,,0 +1993-01-01,3.5,5.83,6.6,,,0 +1993-02-01,3.39,5.43,6.26,,,0 +1993-03-01,3.33,5.19,5.98,,,0 +1993-04-01,3.24,5.13,5.97,,,0 +1993-05-01,3.36,5.2,6.04,,,0 +1993-06-01,3.54,5.22,5.96,,,0 +1993-07-01,3.47,5.09,5.81,,,0 +1993-08-01,3.44,5.03,5.68,,,0 +1993-09-01,3.36,4.73,5.36,,,0 +1993-10-01,3.39,4.71,5.33,,,0 +1993-11-01,3.58,5.06,5.72,,,0 +1993-12-01,3.61,5.15,5.77,,,0 +1994-01-01,3.54,5.09,5.75,,,0 +1994-02-01,3.87,5.4,5.97,,,0 +1994-03-01,4.32,5.94,6.48,,,0 +1994-04-01,4.82,6.52,6.97,,,0 +1994-05-01,5.31,6.78,7.18,,,0 +1994-06-01,5.27,6.7,7.1,,,0 +1994-07-01,5.48,6.91,7.3,,,0 +1994-08-01,5.56,6.88,7.24,,,0 +1994-09-01,5.76,7.08,7.46,,,0 +1994-10-01,6.11,7.4,7.74,,,0 +1994-11-01,6.54,7.72,7.96,,,0 +1994-12-01,7.14,7.78,7.81,,,0 +1995-01-01,7.05,7.76,7.78,,,0 +1995-02-01,6.7,7.37,7.47,,,0 +1995-03-01,6.43,7.05,7.2,,,0 +1995-04-01,6.27,6.86,7.06,,,0 +1995-05-01,6.0,6.41,6.63,,,0 +1995-06-01,5.64,5.93,6.17,,,0 +1995-07-01,5.59,6.01,6.28,,,0 +1995-08-01,5.75,6.24,6.49,,,0 +1995-09-01,5.62,6.0,6.2,,,0 +1995-10-01,5.59,5.86,6.04,,,0 +1995-11-01,5.43,5.69,5.93,,,0 +1995-12-01,5.31,5.51,5.71,,,0 +1996-01-01,5.09,5.36,5.65,,,0 +1996-02-01,4.94,5.38,5.81,,,0 +1996-03-01,5.34,5.97,6.27,,,0 +1996-04-01,5.54,6.3,6.51,,,0 +1996-05-01,5.64,6.48,6.74,,,0 +1996-06-01,5.81,6.69,6.91,,,0 +1996-07-01,5.85,6.64,6.87,,,0 +1996-08-01,5.67,6.39,6.64,,,0 +1996-09-01,5.83,6.6,6.83,,,0 +1996-10-01,5.55,6.27,6.53,,,0 +1996-11-01,5.42,5.97,6.2,,,0 +1996-12-01,5.47,6.07,6.3,,,0 +1997-01-01,5.61,6.33,6.58,,,0 +1997-02-01,5.53,6.2,6.42,,,0 +1997-03-01,5.8,6.54,6.69,,,0 +1997-04-01,5.99,6.76,6.89,,,0 +1997-05-01,5.87,6.57,6.71,,,0 +1997-06-01,5.69,6.38,6.49,,,0 +1997-07-01,5.54,6.12,6.22,,,0 +1997-08-01,5.56,6.16,6.3,,,0 +1997-09-01,5.52,6.11,6.21,,,0 +1997-10-01,5.46,5.93,6.03,,,0 +1997-11-01,5.46,5.8,5.88,,,0 +1997-12-01,5.53,5.77,5.81,,,0 +1998-01-01,5.24,5.42,5.54,,,0 +1998-02-01,5.31,5.49,5.57,,,0 +1998-03-01,5.39,5.61,5.65,,,0 +1998-04-01,5.38,5.61,5.64,,,0 +1998-05-01,5.44,5.63,5.65,,,0 +1998-06-01,5.41,5.52,5.5,,,0 +1998-07-01,5.36,5.46,5.46,,,0 +1998-08-01,5.21,5.27,5.34,,,0 +1998-09-01,4.71,4.62,4.81,,,0 +1998-10-01,4.12,4.18,4.53,,,0 +1998-11-01,4.53,4.54,4.83,,,0 +1998-12-01,4.52,4.45,4.65,,,0 +1999-01-01,4.51,4.6,4.72,,,0 +1999-02-01,4.7,4.91,5.0,,,0 +1999-03-01,4.78,5.14,5.23,,,0 +1999-04-01,4.69,5.08,5.18,,,0 +1999-05-01,4.85,5.44,5.54,,,0 +1999-06-01,5.1,5.81,5.9,,,0 +1999-07-01,5.03,5.68,5.79,,,0 +1999-08-01,5.2,5.84,5.94,,,0 +1999-09-01,5.25,5.8,5.92,,,0 +1999-10-01,5.43,6.03,6.11,,,0 +1999-11-01,5.55,5.97,6.03,,,0 +1999-12-01,5.84,6.19,6.28,,,0 +2000-01-01,6.12,6.58,6.66,,,0 +2000-02-01,6.22,6.68,6.52,,,0 +2000-03-01,6.22,6.5,6.26,,,0 +2000-04-01,6.15,6.26,5.99,,,0 +2000-05-01,6.33,6.69,6.44,,,0 +2000-06-01,6.17,6.3,6.1,,,0 +2000-07-01,6.08,6.18,6.05,,,0 +2000-08-01,6.18,6.06,5.83,,,0 +2000-09-01,6.13,5.93,5.8,,,0 +2000-10-01,6.01,5.78,5.74,,,0 +2000-11-01,6.09,5.7,5.72,,,0 +2000-12-01,5.6,5.17,5.24,,,0 +2001-01-01,4.81,4.86,5.16,,,0 +2001-02-01,4.68,4.89,5.1,,,0 +2001-03-01,4.3,4.64,4.89,,,0 +2001-04-01,3.98,4.76,5.14,,,1 +2001-05-01,3.78,4.93,5.39,,,1 +2001-06-01,3.58,4.81,5.28,,,1 +2001-07-01,3.62,4.76,5.24,,,1 +2001-08-01,3.47,4.57,4.97,,,1 +2001-09-01,2.82,4.12,4.73,,,1 +2001-10-01,2.33,3.91,4.57,,,1 +2001-11-01,2.18,3.97,4.65,,,1 +2001-12-01,2.22,4.39,5.09,,,0 +2002-01-01,2.16,4.34,5.04,,,0 +2002-02-01,2.23,4.3,4.91,,,0 +2002-03-01,2.57,4.74,5.28,,,0 +2002-04-01,2.48,4.65,5.21,,,0 +2002-05-01,2.35,4.49,5.16,,,0 +2002-06-01,2.2,4.19,4.93,,,0 +2002-07-01,1.96,3.81,4.65,,,0 +2002-08-01,1.76,3.29,4.26,,,0 +2002-09-01,1.72,2.94,3.87,,,0 +2002-10-01,1.65,2.95,3.94,,,0 +2002-11-01,1.49,3.05,4.05,,,0 +2002-12-01,1.45,3.03,4.03,,,0 +2003-01-01,1.36,3.05,4.05,1.65,2.29,0 +2003-02-01,1.3,2.9,3.9,1.24,1.99,0 +2003-03-01,1.24,2.78,3.81,1.09,1.94,0 +2003-04-01,1.27,2.93,3.96,1.36,2.18,0 +2003-05-01,1.18,2.52,3.57,1.18,1.91,0 +2003-06-01,1.01,2.27,3.33,0.91,1.72,0 +2003-07-01,1.12,2.87,3.98,1.3,2.11,0 +2003-08-01,1.31,3.37,4.45,1.48,2.32,0 +2003-09-01,1.24,3.18,4.27,1.29,2.19,0 +2003-10-01,1.25,3.19,4.29,1.21,2.08,0 +2003-11-01,1.34,3.29,4.3,1.27,1.96,0 +2003-12-01,1.31,3.27,4.27,1.23,1.98,0 +2004-01-01,1.24,3.12,4.15,1.09,1.89,0 +2004-02-01,1.24,3.07,4.08,0.86,1.76,0 +2004-03-01,1.19,2.79,3.83,0.52,1.47,0 +2004-04-01,1.43,3.39,4.35,1.02,1.9,0 +2004-05-01,1.78,3.85,4.72,1.34,2.09,0 +2004-06-01,2.12,3.93,4.73,1.41,2.15,0 +2004-07-01,2.1,3.69,4.5,1.29,2.02,0 +2004-08-01,2.02,3.47,4.28,1.12,1.86,0 +2004-09-01,2.12,3.36,4.13,1.1,1.8,0 +2004-10-01,2.23,3.35,4.1,0.97,1.73,0 +2004-11-01,2.5,3.53,4.19,0.9,1.68,0 +2004-12-01,2.67,3.6,4.23,0.92,1.67,0 +2005-01-01,2.86,3.71,4.22,1.13,1.72,0 +2005-02-01,3.03,3.77,4.17,1.08,1.63,0 +2005-03-01,3.3,4.17,4.5,1.29,1.79,0 +2005-04-01,3.32,4.0,4.34,1.23,1.71,0 +2005-05-01,3.33,3.85,4.14,1.28,1.65,0 +2005-06-01,3.36,3.77,4.0,1.39,1.67,0 +2005-07-01,3.64,3.98,4.18,1.67,1.88,0 +2005-08-01,3.87,4.12,4.26,1.71,1.89,0 +2005-09-01,3.85,4.01,4.2,1.4,1.7,0 +2005-10-01,4.18,4.33,4.46,1.7,1.94,0 +2005-11-01,4.33,4.45,4.54,1.97,2.06,0 +2005-12-01,4.35,4.39,4.47,2.09,2.12,0 +2006-01-01,4.45,4.35,4.42,1.93,2.01,0 +2006-02-01,4.68,4.57,4.57,1.98,2.05,0 +2006-03-01,4.77,4.72,4.72,2.09,2.2,0 +2006-04-01,4.9,4.9,4.99,2.26,2.41,0 +2006-05-01,5.0,5.0,5.11,2.3,2.45,0 +2006-06-01,5.16,5.07,5.11,2.45,2.53,0 +2006-07-01,5.22,5.04,5.09,2.46,2.51,0 +2006-08-01,5.08,4.82,4.88,2.27,2.29,0 +2006-09-01,4.97,4.67,4.72,2.38,2.32,0 +2006-10-01,5.01,4.69,4.73,2.51,2.41,0 +2006-11-01,5.01,4.58,4.6,2.41,2.29,0 +2006-12-01,4.94,4.53,4.56,2.28,2.25,0 +2007-01-01,5.06,4.75,4.76,2.47,2.44,0 +2007-02-01,5.05,4.71,4.72,2.34,2.36,0 +2007-03-01,4.92,4.48,4.56,2.04,2.18,0 +2007-04-01,4.93,4.59,4.69,2.12,2.26,0 +2007-05-01,4.91,4.67,4.75,2.29,2.37,0 +2007-06-01,4.96,5.03,5.1,2.65,2.69,0 +2007-07-01,4.96,4.88,5.0,2.6,2.64,0 +2007-08-01,4.47,4.43,4.67,2.39,2.44,0 +2007-09-01,4.14,4.2,4.52,2.14,2.26,0 +2007-10-01,4.1,4.2,4.53,2.01,2.2,0 +2007-11-01,3.5,3.67,4.15,1.35,1.77,0 +2007-12-01,3.26,3.49,4.1,1.27,1.79,0 +2008-01-01,2.71,2.98,3.74,0.86,1.47,1 +2008-02-01,2.05,2.78,3.74,0.65,1.41,1 +2008-03-01,1.54,2.48,3.51,0.23,1.09,1 +2008-04-01,1.74,2.84,3.68,0.62,1.36,1 +2008-05-01,2.06,3.15,3.88,0.79,1.46,1 +2008-06-01,2.42,3.49,4.1,0.97,1.63,1 +2008-07-01,2.28,3.3,4.01,0.84,1.57,1 +2008-08-01,2.18,3.14,3.89,1.15,1.68,1 +2008-09-01,1.91,2.88,3.69,1.55,1.85,1 +2008-10-01,1.42,2.73,3.81,2.75,2.75,1 +2008-11-01,1.07,2.29,3.53,3.69,2.89,1 +2008-12-01,0.49,1.52,2.42,1.76,2.17,1 +2009-01-01,0.44,1.6,2.52,1.59,1.91,1 +2009-02-01,0.62,1.87,2.87,1.29,1.75,1 +2009-03-01,0.64,1.82,2.82,1.23,1.71,1 +2009-04-01,0.55,1.86,2.93,1.11,1.57,1 +2009-05-01,0.5,2.13,3.29,1.07,1.72,1 +2009-06-01,0.51,2.71,3.72,1.18,1.86,1 +2009-07-01,0.48,2.46,3.56,1.18,1.82,0 +2009-08-01,0.46,2.57,3.59,1.29,1.77,0 +2009-09-01,0.4,2.37,3.4,1.03,1.64,0 +2009-10-01,0.37,2.33,3.39,0.83,1.48,0 +2009-11-01,0.31,2.23,3.4,0.48,1.28,0 +2009-12-01,0.37,2.34,3.59,0.43,1.36,0 +2010-01-01,0.35,2.48,3.73,0.42,1.37,0 +2010-02-01,0.35,2.36,3.69,0.42,1.42,0 +2010-03-01,0.4,2.43,3.73,0.56,1.51,0 +2010-04-01,0.45,2.58,3.85,0.62,1.5,0 +2010-05-01,0.37,2.18,3.42,0.41,1.31,0 +2010-06-01,0.32,2.0,3.2,0.34,1.26,0 +2010-07-01,0.29,1.76,3.01,0.34,1.24,0 +2010-08-01,0.26,1.47,2.7,0.13,1.02,0 +2010-09-01,0.26,1.41,2.65,0.13,0.91,0 +2010-10-01,0.23,1.18,2.54,-0.32,0.53,0 +2010-11-01,0.25,1.35,2.76,-0.21,0.67,0 +2010-12-01,0.29,1.93,3.29,0.21,1.04,0 +2011-01-01,0.27,1.99,3.39,0.06,1.06,0 +2011-02-01,0.29,2.26,3.58,0.25,1.24,0 +2011-03-01,0.26,2.11,3.41,-0.09,0.96,0 +2011-04-01,0.25,2.17,3.46,-0.14,0.86,0 +2011-05-01,0.19,1.84,3.17,-0.34,0.78,0 +2011-06-01,0.18,1.58,3.0,-0.38,0.76,0 +2011-07-01,0.19,1.54,3.0,-0.49,0.62,0 +2011-08-01,0.11,1.02,2.3,-0.75,0.14,0 +2011-09-01,0.1,0.9,1.98,-0.72,0.08,0 +2011-10-01,0.11,1.06,2.15,-0.63,0.19,0 +2011-11-01,0.11,0.91,2.01,-0.85,0.0,0 +2011-12-01,0.12,0.89,1.98,-0.78,-0.03,0 +2012-01-01,0.12,0.84,1.97,-0.92,-0.11,0 +2012-02-01,0.16,0.83,1.97,-1.11,-0.25,0 +2012-03-01,0.19,1.02,2.17,-1.03,-0.14,0 +2012-04-01,0.18,0.89,2.05,-1.06,-0.21,0 +2012-05-01,0.19,0.76,1.8,-1.12,-0.34,0 +2012-06-01,0.19,0.71,1.62,-1.05,-0.5,0 +2012-07-01,0.19,0.62,1.53,-1.15,-0.6,0 +2012-08-01,0.18,0.71,1.68,-1.19,-0.59,0 +2012-09-01,0.18,0.67,1.72,-1.47,-0.71,0 +2012-10-01,0.18,0.71,1.75,-1.47,-0.75,0 +2012-11-01,0.18,0.67,1.65,-1.38,-0.77,0 +2012-12-01,0.16,0.7,1.72,-1.4,-0.76,0 +2013-01-01,0.15,0.81,1.91,-1.39,-0.61,0 +2013-02-01,0.16,0.85,1.98,-1.39,-0.57,0 +2013-03-01,0.15,0.82,1.96,-1.43,-0.59,0 +2013-04-01,0.12,0.71,1.76,-1.38,-0.65,0 +2013-05-01,0.12,0.84,1.93,-1.14,-0.36,0 +2013-06-01,0.14,1.2,2.3,-0.59,0.25,0 +2013-07-01,0.12,1.4,2.58,-0.45,0.46,0 +2013-08-01,0.13,1.52,2.74,-0.33,0.55,0 +2013-09-01,0.12,1.6,2.81,-0.17,0.66,0 +2013-10-01,0.12,1.37,2.62,-0.41,0.43,0 +2013-11-01,0.12,1.37,2.72,-0.38,0.55,0 +2013-12-01,0.13,1.58,2.9,-0.09,0.74,0 +2014-01-01,0.12,1.65,2.86,-0.09,0.63,0 +2014-02-01,0.12,1.52,2.71,-0.26,0.55,0 +2014-03-01,0.13,1.64,2.72,-0.14,0.56,0 +2014-04-01,0.11,1.7,2.71,-0.11,0.54,0 +2014-05-01,0.1,1.59,2.56,-0.34,0.37,0 +2014-06-01,0.1,1.68,2.6,-0.29,0.37,0 +2014-07-01,0.11,1.7,2.54,-0.27,0.28,0 +2014-08-01,0.11,1.63,2.42,-0.21,0.22,0 +2014-09-01,0.11,1.77,2.53,0.1,0.46,0 +2014-10-01,0.1,1.55,2.3,0.06,0.38,0 +2014-11-01,0.13,1.62,2.33,0.14,0.45,0 +2014-12-01,0.21,1.64,2.21,0.37,0.51,0 +2015-01-01,0.2,1.37,1.88,0.17,0.27,0 +2015-02-01,0.22,1.47,1.98,0.11,0.26,0 +2015-03-01,0.25,1.52,2.04,0.04,0.28,0 +2015-04-01,0.23,1.35,1.94,-0.26,0.08,0 +2015-05-01,0.24,1.54,2.2,-0.1,0.33,0 +2015-06-01,0.28,1.68,2.36,0.05,0.5,0 +2015-07-01,0.3,1.63,2.32,0.14,0.5,0 +2015-08-01,0.38,1.54,2.17,0.31,0.56,0 +2015-09-01,0.37,1.49,2.17,0.33,0.65,0 +2015-10-01,0.26,1.39,2.07,0.21,0.57,0 +2015-11-01,0.48,1.67,2.26,0.4,0.69,0 +2015-12-01,0.65,1.7,2.24,0.46,0.73,0 +2016-01-01,0.54,1.52,2.09,0.33,0.67,0 +2016-02-01,0.53,1.22,1.78,0.14,0.47,0 +2016-03-01,0.66,1.38,1.89,-0.03,0.34,0 +2016-04-01,0.56,1.26,1.81,-0.22,0.19,0 +2016-05-01,0.59,1.3,1.81,-0.22,0.21,0 +2016-06-01,0.55,1.17,1.64,-0.27,0.17,0 +2016-07-01,0.51,1.07,1.5,-0.32,0.04,0 +2016-08-01,0.57,1.13,1.56,-0.17,0.09,0 +2016-09-01,0.59,1.18,1.63,-0.17,0.12,0 +2016-10-01,0.66,1.27,1.76,-0.26,0.1,0 +2016-11-01,0.74,1.6,2.14,-0.07,0.32,0 +2016-12-01,0.87,1.96,2.49,0.15,0.56,0 +2017-01-01,0.83,1.92,2.43,0.03,0.42,0 +2017-02-01,0.82,1.9,2.42,0.01,0.4,0 +2017-03-01,1.01,2.01,2.48,0.18,0.49,0 +2017-04-01,1.04,1.82,2.3,0.08,0.39,0 +2017-05-01,1.12,1.84,2.3,0.09,0.47,0 +2017-06-01,1.2,1.77,2.19,0.14,0.46,0 +2017-07-01,1.22,1.87,2.32,0.23,0.55,0 +2017-08-01,1.23,1.78,2.21,0.16,0.43,0 +2017-09-01,1.28,1.8,2.2,0.12,0.37,0 +2017-10-01,1.4,1.98,2.36,0.25,0.5,0 +2017-11-01,1.56,2.05,2.35,0.3,0.5,0 +2017-12-01,1.7,2.18,2.4,0.42,0.5,0 +2018-01-01,1.8,2.38,2.58,0.45,0.54,0 +2018-02-01,1.96,2.6,2.86,0.63,0.76,0 +2018-03-01,2.06,2.63,2.84,0.61,0.75,0 +2018-04-01,2.15,2.7,2.87,0.65,0.74,0 +2018-05-01,2.27,2.82,2.98,0.72,0.84,0 +2018-06-01,2.33,2.78,2.91,0.71,0.79,0 +2018-07-01,2.39,2.78,2.89,0.74,0.77,0 +2018-08-01,2.45,2.77,2.89,0.79,0.79,0 +2018-09-01,2.56,2.89,3.0,0.89,0.88,0 +2018-10-01,2.65,3.0,3.15,1.01,1.04,0 +2018-11-01,2.7,2.95,3.12,1.1,1.11,0 +2018-12-01,2.66,2.68,2.83,1.08,1.02,0 +2019-01-01,2.58,2.54,2.71,0.91,0.92,0 +2019-02-01,2.55,2.49,2.68,0.73,0.8,0 +2019-03-01,2.49,2.37,2.57,0.56,0.66,0 +2019-04-01,2.42,2.33,2.53,0.49,0.6,0 +2019-05-01,2.34,2.19,2.4,0.48,0.57,0 +2019-06-01,2.0,1.83,2.07,0.28,0.37,0 +2019-07-01,1.96,1.83,2.06,0.25,0.31,0 +2019-08-01,1.77,1.49,1.63,0.11,0.04,0 +2019-09-01,1.8,1.57,1.7,0.17,0.11,0 +2019-10-01,1.61,1.53,1.71,0.12,0.15,0 +2019-11-01,1.57,1.64,1.81,0.09,0.17,0 +2019-12-01,1.55,1.68,1.86,0.06,0.14,0 +2020-01-01,1.53,1.56,1.76,-0.09,0.04,0 +2020-02-01,1.41,1.32,1.5,-0.26,-0.11,0 +2020-03-01,0.33,0.59,0.87,-0.08,-0.12,1 +2020-04-01,0.18,0.39,0.66,-0.37,-0.45,1 +2020-05-01,0.16,0.34,0.67,-0.43,-0.44,0 +2020-06-01,0.18,0.34,0.73,-0.67,-0.54,0 +2020-07-01,0.15,0.28,0.62,-1.03,-0.83,0 +2020-08-01,0.13,0.27,0.65,-1.28,-1.01,0 +2020-09-01,0.13,0.27,0.68,-1.26,-0.98,0 +2020-10-01,0.13,0.34,0.79,-1.23,-0.92,0 +2020-11-01,0.12,0.39,0.87,-1.24,-0.84,0 +2020-12-01,0.1,0.39,0.93,-1.48,-0.98,0 +2021-01-01,0.1,0.45,1.08,-1.66,-1.0,0 +2021-02-01,0.07,0.54,1.26,-1.77,-0.92,0 +2021-03-01,0.08,0.82,1.61,-1.67,-0.66,0 +2021-04-01,0.06,0.86,1.64,-1.67,-0.71,0 +2021-05-01,0.05,0.82,1.62,-1.83,-0.85,0 +2021-06-01,0.07,0.84,1.52,-1.63,-0.82,0 +2021-07-01,0.08,0.76,1.32,-1.73,-1.01,0 +2021-08-01,0.07,0.77,1.28,-1.72,-1.07,0 +2021-09-01,0.08,0.86,1.37,-1.63,-0.97,0 +2021-10-01,0.11,1.11,1.58,-1.64,-0.95,0 +2021-11-01,0.18,1.2,1.56,-1.78,-1.06,0 +2021-12-01,0.3,1.23,1.47,-1.52,-0.99,0 +2022-01-01,0.55,1.54,1.76,-1.26,-0.69,0 +2022-02-01,1.0,1.81,1.93,-1.06,-0.52,0 +2022-03-01,1.34,2.11,2.13,-1.3,-0.72,0 +2022-04-01,1.89,2.78,2.75,-0.54,-0.14,0 +2022-05-01,2.06,2.87,2.9,-0.15,0.21,0 +2022-06-01,2.65,3.19,3.14,0.3,0.53,0 +2022-07-01,3.02,2.96,2.9,0.38,0.53,0 +2022-08-01,3.28,3.03,2.9,0.34,0.39,0 +2022-09-01,3.89,3.7,3.52,1.25,1.14,0 +2022-10-01,4.43,4.18,3.98,1.71,1.59,0 +2022-11-01,4.73,4.06,3.89,1.61,1.52,0 +2022-12-01,4.68,3.76,3.62,1.45,1.36,0 +2023-01-01,4.69,3.64,3.53,1.41,1.29,0 +2023-02-01,4.93,3.94,3.75,1.5,1.41,0 +2023-03-01,4.68,3.82,3.66,1.45,1.36,0 +2023-04-01,4.68,3.54,3.46,1.23,1.19,0 +2023-05-01,4.91,3.59,3.57,1.44,1.36,0 +2023-06-01,5.24,3.95,3.75,1.81,1.55,0 +2023-07-01,5.37,4.14,3.9,1.93,1.6,0 +2023-08-01,5.37,4.31,4.17,2.07,1.83,0 +2023-09-01,5.44,4.49,4.38,2.23,2.04,0 +2023-10-01,5.42,4.77,4.8,2.46,2.41,0 +2023-11-01,5.28,4.49,4.5,2.24,2.2,0 +2023-12-01,4.96,4.0,4.02,1.88,1.84,0 +2024-01-01,4.79,3.98,4.06,1.76,1.79,0 +2024-02-01,4.92,4.19,4.21,1.87,1.93,0 +2024-03-01,4.99,4.2,4.21,1.82,1.9,0 +2024-04-01,5.14,4.56,4.54,2.11,2.15,0 +2024-05-01,5.16,4.5,4.48,2.17,2.15,0 +2024-06-01,5.11,4.32,4.31,2.1,2.05,0 +2024-07-01,4.9,4.16,4.25,1.98,1.97,0 +2024-08-01,4.43,3.71,3.87,1.74,1.76,0 +2024-09-01,4.03,3.5,3.72,1.53,1.62,0 +2024-10-01,4.2,3.91,4.1,1.68,1.81,0 +2024-11-01,4.33,4.23,4.36,1.84,2.03,0 +2024-12-01,4.23,4.25,4.39,1.89,2.09,0 diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 2ac8fdcc..133a5ef4 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3,6 +3,37 @@ Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### + +@article{Aiyagari1989, + author = {S. Rao Aiyagari}, + title = {How Should Taxes Be Set?}, + journal = {Quarterly Review, Federal Reserve Bank of Minneapolis}, + volume = {13}, + pages = {22--32}, + year = {1989} +} + +@article{ChariKehoe1999, + author = {V. V. Chari and Patrick J. Kehoe}, + title = {Optimal Fiscal and Monetary Policy}, + journal = {Handbook of Macroeconomics}, + volume = {1}, + pages = {1671--1745}, + year = {1999} +} + + +@article{SargentWallace1981, + author = {Thomas J. Sargent and Neil Wallace}, + title = {Some Unpleasant Monetarist Arithmetic}, + journal = {Federal Reserve Bank of Minneapolis Quarterly Review}, + volume = {5}, + number = {3}, + pages = {1--17}, + year = {1981} +} + + @article{wilson1973estimation, title={The estimation of parameters in multivariate time series models}, author={Wilson, G Tunnicliffe}, @@ -460,6 +491,15 @@ @Article{sargent91_equilibrium year =1991 } +@article{hansen1982generalized, + title={Generalized instrumental variables estimation of nonlinear rational expectations models}, + author={Hansen, Lars Peter and Singleton, Kenneth J}, + journal={Econometrica: Journal of the Econometric Society}, + pages={1269--1286}, + year={1982}, + publisher={JSTOR} +} + @Inproceedings{singleton87, author ={Kenneth J. Singleton}, title ={Asset Prices in a Time-Series Model with Disparately Informed @@ -1876,6 +1916,21 @@ @article{LucasPrescott1971 year = {1971} } +@techreport{LucasStokey1982, + author = {Lucas, Robert E., Jr. and Stokey, Nancy L.}, + title = {Optimal Fiscal and Monetary Policy in an Economy Without Capital}, + institution = {Northwestern University, Center for Mathematical Studies in + Economics and Management Science}, + year = {1982}, + type = {Discussion Paper}, + number = {532} +} + + + + + + @article{LucasStokey1983, author = {Lucas, Jr., Robert E and Stokey, Nancy L}, journal = {Journal of monetary Economics}, @@ -2249,6 +2304,13 @@ @article{Tauchen1986 year = {1986} } +@book{silber2012volcker, + title={Volcker: The triumph of persistence}, + author={Silber, William L}, + year={2012}, + publisher={Bloomsbury Publishing USA} +} + @incollection{Uhlig2001, author = {Uhlig, H}, @@ -2553,3 +2615,263 @@ @article{Jacobson_73 number = {2}, pages = {124-131} } + +@unpublished{DovisAccountingMFrevised, + author = {Bocola, Luigi and Chaumont, Gaston and Dovis, Alessandro and Kirpalani, Rishabh}, + title = {Accounting for Credibility: Fiscal-Monetary Interactions and the Credibility of Central Bank Mandates}, + year = {2026}, + note = {Draft, last revision: February 2026} +} + + +@article{DovisKirpalani2021, + author = {Alessandro Dovis and Rishabh Kirpalani}, + title = {Rules without Commitment: Reputation and Incentives}, + journal = {Review of Economic Studies}, + volume = {88}, + number = {6}, + pages = {2833--2856}, + year = {2021} +} + +@techreport{AtkesonKehoe2001, + author = {Andrew Atkeson and Patrick J. Kehoe}, + title = {The Advantage of Transparent Instruments of Monetary Policy}, + institution = {National Bureau of Economic Research}, + type = {Working Paper}, + number = {8681}, + year = {2001} +} + +@article{Lohmann1992, + author = {Susanne Lohmann}, + title = {Optimal Commitment in Monetary Policy: Credibility versus Flexibility}, + journal = {American Economic Review}, + volume = {82}, + number = {1}, + pages = {273--286}, + year = {1992} +} + +@article{DebortoliNunes2010, + author = {Davide Debortoli and Ricardo Nunes}, + title = {Fiscal Policy under Loose Commitment}, + journal = {Journal of Economic Theory}, + volume = {145}, + number = {3}, + pages = {1005--1032}, + year = {2010} +} + +@article{Abreu1988, + author = {Dilip Abreu}, + title = {On the Theory of Infinitely Repeated Games with Discounting}, + journal = {Econometrica}, + volume = {56}, + number = {2}, + pages = {383--396}, + year = {1988} +} + +@article{ChariKehoe1990, + author = {V. V. Chari and Patrick J. Kehoe}, + title = {Sustainable Plans}, + journal = {Journal of Political Economy}, + volume = {98}, + number = {4}, + pages = {783--802}, + year = {1990} +} + +@article{Chang1998, + author = {Roberto Chang}, + title = {Credible Monetary Policy in an Infinite Horizon Model: Recursive Approaches}, + journal = {Journal of Economic Theory}, + volume = {81}, + number = {2}, + pages = {431--461}, + year = {1998} +} + +@article{Leeper1991, + author = {Eric M. Leeper}, + title = {Equilibria under `Active' and `Passive' Monetary and Fiscal Policies}, + journal = {Journal of Monetary Economics}, + volume = {27}, + number = {1}, + pages = {129--147}, + year = {1991} +} + +@article{Bianchi2013, + author = {Francesco Bianchi}, + title = {Regime Switches, Agents' Beliefs, and Post-World War {II} {U.S.} Macroeconomic Dynamics}, + journal = {Review of Economic Studies}, + volume = {80}, + number = {2}, + pages = {463--490}, + year = {2013} +} + +@article{BianchiIlut2017, + author = {Francesco Bianchi and Cosmin Ilut}, + title = {Monetary/Fiscal Policy Mix and Agents' Beliefs}, + journal = {Review of Economic Dynamics}, + volume = {26}, + pages = {113--139}, + year = {2017} +} + +@book{Cochrane2023, + author = {John H. Cochrane}, + title = {The Fiscal Theory of the Price Level}, + publisher = {Princeton University Press}, + year = {2023} +} + +@article{Aiyagari2002, + author = {S. Rao Aiyagari and Albert Marcet and Thomas J. Sargent and Juha Sepp{\"a}l{\"a}}, + title = {Optimal Taxation without State-Contingent Debt}, + journal = {Journal of Political Economy}, + volume = {110}, + number = {6}, + pages = {1220--1254}, + year = {2002} +} + +@incollection{Sargent1982, + author = {Thomas J. Sargent}, + title = {The Ends of Four Big Inflations}, + booktitle = {Inflation: Causes and Effects}, + editor = {Robert E. Hall}, + publisher = {University of Chicago Press}, + pages = {41--97}, + year = {1982} +} + +@book{Blinder2022, + author = {Alan S. Blinder}, + title = {A Monetary and Fiscal History of the {United States}, 1961--2021}, + publisher = {Princeton University Press}, + year = {2022} +} + +@book{KehoeNicolini2022, + author = {Timothy J. Kehoe and Juan Pablo Nicolini}, + title = {A Monetary and Fiscal History of {Latin America}, 1960--2017}, + publisher = {University of Minnesota Press}, + year = {2022} +} + +@incollection{PerezReynaOsorio2017, + author = {David P{\'e}rez-Reyna and Daniel Osorio-Rodr{\'i}guez}, + title = {The History of {Colombia}}, + booktitle = {A Monetary and Fiscal History of {Latin America}, 1960--2017}, + editor = {Timothy J. Kehoe and Juan Pablo Nicolini}, + publisher = {University of Minnesota Press}, + year = {2022} +} + +@incollection{CaputoSaravia2018, + author = {Rodrigo Caputo and Diego Saravia}, + title = {The History of {Chile}}, + booktitle = {A Monetary and Fiscal History of {Latin America}, 1960--2017}, + editor = {Timothy J. Kehoe and Juan Pablo Nicolini}, + publisher = {University of Minnesota Press}, + year = {2022} +} + +@incollection{Sargent2024, + author = {Lars Ljungqvist and Thomas J. Sargent}, + title = {Credible Government Policies, {I}}, + booktitle = {Recursive Macroeconomic Theory}, + publisher = {MIT Press}, + edition = {4}, + chapter = {23}, + year = {2018} +} + +@article{hicks1937mr, + author = {John R. Hicks}, + title = {Mr. {K}eynes and the ``{C}lassics''; A Suggested Interpretation}, + journal = {Econometrica}, + volume = {5}, + number = {2}, + pages = {147--159}, + year = {1937} +} + +@article{tobin1992old, + author = {James Tobin}, + title = {An Old {K}eynesian Counterattacks}, + journal = {Eastern Economic Journal}, + volume = {18}, + number = {4}, + pages = {387--400}, + year = {1992} +} + +@article{hansen1983stochastic, + author = {Lars Peter Hansen and Kenneth J. Singleton}, + title = {Stochastic Consumption, Risk Aversion, and the Temporal Behavior of Asset Returns}, + journal = {Journal of Political Economy}, + volume = {91}, + number = {2}, + pages = {249--265}, + year = {1983} +} + +@article{Weil_1989, + author = {Philippe Weil}, + title = {The Equity Premium Puzzle and the Risk-Free Rate Puzzle}, + journal = {Journal of Monetary Economics}, + volume = {24}, + number = {3}, + pages = {401--421}, + year = {1989} +} + +@article{hansen1995discounted, + author = {Lars Peter Hansen and Thomas J. Sargent and Thomas D. Tallarini, Jr.}, + title = {Robust Permanent Income and Pricing}, + journal = {Review of Economic Studies}, + volume = {66}, + number = {4}, + pages = {873--907}, + year = {1999} +} + +@book{Sargent_Stachurski_2025, + author = {Thomas J. Sargent and John Stachurski}, + title = {Dynamic Programming: Finite States}, + publisher = {Cambridge University Press}, + year = {2025}, + doi = {10.1017/9781009540780} +} + +@unpublished{piazzesi2015trend, + author = {Monika Piazzesi and Juliana Salomao and Martin Schneider}, + title = {Trend and Cycle in Bond Premia}, + note = {Working paper, Stanford University, March 2015}, + year = {2015} +} + +@article{hansen2020twisted, + author = {Lars Peter Hansen and Thomas J. Sargent}, + title = {Macroeconomic Uncertainty Prices when Beliefs are Tenuous}, + journal = {Journal of Econometrics}, + volume = {223}, + number = {1}, + pages = {222--250}, + year = {2021} +} + +@article{szoke2022estimating, + author = {B{\'a}lint Sz{\H{o}}ke}, + title = {Estimating Robustness}, + journal = {Journal of Economic Theory}, + volume = {199}, + pages = {105225}, + year = {2022}, + doi = {10.1016/j.jet.2021.105225} +} diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 01411dd6..072a56c3 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -61,7 +61,11 @@ parts: chapters: - file: lucas_model - file: asset_pricing_lph + - file: hansen_richard_1987 + - file: hansen_jagannathan_1991 - file: black_litterman + - file: doubts_or_variability + - file: risk_aversion_or_mistaken_beliefs - file: BCG_complete_mkts - file: BCG_incomplete_mkts - caption: Dynamic Programming Squared @@ -78,6 +82,7 @@ parts: - file: amss3 - file: chang_ramsey - file: chang_credible + - file: dovis_accounting_mf - caption: Other numbered: true chapters: diff --git a/lectures/cagan_rational_expectations.md b/lectures/cagan_rational_expectations.md index af6bec8e..45a49005 100644 --- a/lectures/cagan_rational_expectations.md +++ b/lectures/cagan_rational_expectations.md @@ -35,8 +35,8 @@ in his famous study of hyperinflation. {cite:t}`sargent1973rational` pointed out that under assumptions making Cagan's adaptive expectations equivalent to rational expectations, Cagan's -estimator of $\alpha$ — the slope of log real balances with respect to expected -inflation — is not statistically consistent. +estimator of $\alpha$ -- the slope of log real balances with respect to expected +inflation -- is not statistically consistent. This inconsistency matters because of a paradox that emerged when Cagan used his estimates of $\alpha$ to calculate the sustained rates of inflation that would @@ -47,11 +47,11 @@ That "optimal" rate is $-1/\alpha$. For each of the seven hyperinflations in his sample, the reciprocal of Cagan's estimate of $-\alpha$ turned out to be -less — and often very much less — than the actual average rate of inflation, +less -- and often very much less -- than the actual average rate of inflation, suggesting that the creators of money expanded the money supply at rates far exceeding the revenue-maximizing rate. -A natural explanation is that this paradox is a statistical artifact — a +A natural explanation is that this paradox is a statistical artifact -- a consequence of biased estimates of $\alpha$. Table 1 reproduces the relevant data from Cagan. @@ -465,7 +465,7 @@ plims = [plim_alpha_cagan(a, λ, σ_ε2, σ_η2, σ_εη) for a in α_plot] ws_limit = -λ / (1.0 - λ) fig, ax = plt.subplots() -ax.plot(α_plot, α_plot, 'k--', lw=1.5, label='No bias (45° line)') +ax.plot(α_plot, α_plot, 'k--', lw=1.5, label=r'No bias (45$\degree$ line)') label = rf'$\operatorname{{plim}}\hat\alpha$, $\lambda={λ}$' ax.plot(α_plot, plims, lw=2, label=label) ax.axhline(ws_limit, color='r', ls=':', lw=1.5, @@ -641,7 +641,7 @@ Equation {eq}`eq27` is a vector first-order autoregression, first-order moving average process. The random variables $a_{1t}$, $a_{2t}$ are the innovations in -the $x$ and $\mu$ processes, respectively — the one-period-ahead forecasting errors +the $x$ and $\mu$ processes, respectively -- the one-period-ahead forecasting errors for $x_t$ and $\mu_t$. The $a$'s are related to the $\varepsilon$'s and $\eta$'s @@ -770,8 +770,8 @@ L(\lambda,\,\sigma_{11},\,\sigma_{12},\,\sigma_{22}\mid\mu_t,\,x_t) \exp\!\left(-\tfrac{1}{2}\sum_{t=1}^{T} a_t' D_a^{-1} a_t\right). ``` -Given initial values for $(a_{10}, a_{20})$ — equivalently for $(\varepsilon_0, -\eta_0)$ — and given a value of $\lambda$, equation {eq}`eq26` or {eq}`eq27` can be +Given initial values for $(a_{10}, a_{20})$ -- equivalently for $(\varepsilon_0, +\eta_0)$ -- and given a value of $\lambda$, equation {eq}`eq26` or {eq}`eq27` can be used to solve for $a_t$, $t = 1, \ldots, T$. (We take $a_{10} = a_{20} = 0$.) @@ -807,8 +807,8 @@ That this must be so can be seen by inspecting representation On the basis of the *four* parameters $\lambda$, $\sigma_{11}$, $\sigma_{12}$, and -$\sigma_{22}$ that are identified by {eq}`eq27` — i.e., that characterize the -likelihood function {eq}`eq32` — we can think of attempting to estimate the *five* +$\sigma_{22}$ that are identified by {eq}`eq27` -- i.e., that characterize the +likelihood function {eq}`eq32` -- we can think of attempting to estimate the *five* parameters of the model: $\alpha$, $\lambda$, $\sigma_\varepsilon^2$, $\sigma_\eta^2$, and $\sigma_{\varepsilon\eta}$. @@ -1124,7 +1124,7 @@ def compute_innovations(x, μ, λ): a_{1t} = Δx_t + λ a_{1,t-1} a_{2t} = μ_t - x_t + a_{1t} - Only λ is required — α does not enter the innovation extraction. + Only λ is required -- α does not enter the innovation extraction. Returns arrays a1 and a2 of length T. """ @@ -1378,12 +1378,12 @@ $\sigma_{\varepsilon\eta} = 0$: | Country | $\hat\lambda$ | $\hat\alpha$ | $\hat\sigma_{11}$ | $\hat\sigma_{12}$ | $\hat\sigma_{22}$ | |---------|:---:|:---:|:---:|:---:|:---:| -| Germany (Oct '20–Jul '23) | .677 (.053) | −5.97 (4.62) | .0625 | .0158 | .0091 | -| Austria (Feb '21–Aug '22) | .754 (.059) | −0.31 (1.57) | .0385 | .0148 | .0085 | -| Greece (Feb '43–Aug '44) | .459 (.088) | −4.09 (2.97) | .0675 | .0245 | .0279 | -| Hungary I (Aug '22–Feb '24) | .418 (.067) | −1.84 (0.40) | .0362 | .0089 | .0060 | -| Russia (Feb '22–Jan '24) | .626 (.073) | −9.75 (10.74)| .0524 | .0138 | .0205 | -| Poland (May '22–Nov '23) | .536 (.072) | −2.53 (0.86) | .0566 | .0149 | .0089 | +| Germany (Oct '20-Jul '23) | .677 (.053) | -5.97 (4.62) | .0625 | .0158 | .0091 | +| Austria (Feb '21-Aug '22) | .754 (.059) | -0.31 (1.57) | .0385 | .0148 | .0085 | +| Greece (Feb '43-Aug '44) | .459 (.088) | -4.09 (2.97) | .0675 | .0245 | .0279 | +| Hungary I (Aug '22-Feb '24) | .418 (.067) | -1.84 (0.40) | .0362 | .0089 | .0060 | +| Russia (Feb '22-Jan '24) | .626 (.073) | -9.75 (10.74)| .0524 | .0138 | .0205 | +| Poland (May '22-Nov '23) | .536 (.072) | -2.53 (0.86) | .0566 | .0149 | .0089 | Standard errors in parentheses. @@ -1448,7 +1448,7 @@ axes[1].errorbar(range(len(countries)), α_ml, yerr=[2*s for s in α_se], axes[1].axhline(0, color='k', lw=0.7, ls='--') axes[1].set_xticks(range(len(countries))) axes[1].set_xticklabels(countries, rotation=30) -axes[1].set_ylabel(r'$\hat\alpha$ (±2 s.e.)') +axes[1].set_ylabel(r'$\hat\alpha$ ($\pm$2 s.e.)') plt.tight_layout() plt.show() @@ -1509,7 +1509,7 @@ The main results of this paper are: simultaneously. 2. A bivariate Wold representation with a triangular structure shows that - inflation Granger-causes money creation, but not vice versa — consistent with + inflation Granger-causes money creation, but not vice versa -- consistent with empirical findings that feedback runs from inflation to money creation. 3. The structural parameter $\alpha$ is *not identifiable* from the likelihood @@ -1523,7 +1523,7 @@ The main results of this paper are: 4. The large standard errors mean that confidence intervals of two standard errors on each side of the point estimates include values of $\alpha$ that would imply - money creators were maximizing seignorage revenue — potentially explaining the + money creators were maximizing seignorage revenue -- potentially explaining the paradox noted by Cagan. 5. Likelihood-ratio overfitting tests do not decisively reject the one-parameter @@ -1556,9 +1556,9 @@ def bivariate_ma1_moments(α, λ, σ_ε2=1.0, σ_η2=0.5, σ_εη=0.0): Returns: - cxx : dict with keys 0, 1 — autocovariances of Δx - cμμ : dict with keys 0, 1 — autocovariances of Δμ - cxμ : dict with keys -1, 0, 1 — cross-covariances E[Δx_t Δμ_{t-τ}] + cxx : dict with keys 0, 1 -- autocovariances of Δx + cμμ : dict with keys 0, 1 -- autocovariances of Δμ + cxμ : dict with keys -1, 0, 1 -- cross-covariances E[Δx_t Δμ_{t-τ}] """ denom = λ + α * (1.0 - λ) if np.isclose(denom, 0.0): @@ -1728,8 +1728,8 @@ for T in [100, 500]: Δx_s = np.diff(x_s) λ_h, _ = univariate_ma1_mle(Δx_s) λ_hats.append(λ_h) - print(f"T={T:4d}: mean λ̂ = {np.mean(λ_hats):.4f}, " - f"std(λ̂) = {np.std(λ_hats):.4f}") + print(f"T={T:4d}: mean λ_hat = {np.mean(λ_hats):.4f}, " + f"std(λ_hat) = {np.std(λ_hats):.4f}") ``` The standard deviation shrinks roughly as $1/\sqrt{T}$, consistent with diff --git a/lectures/doubts_or_variability.md b/lectures/doubts_or_variability.md new file mode 100644 index 00000000..6f2c5624 --- /dev/null +++ b/lectures/doubts_or_variability.md @@ -0,0 +1,3190 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.17.1 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(doubts_or_variability)= +```{raw} jupyter +
+ + QuantEcon + +
+``` + +# Doubts or Variability? + +```{contents} Contents +:depth: 2 +``` + +> No one has found risk aversion parameters of 50 or 100 in the diversification of +> individual portfolios, in the level of insurance deductibles, in the wage premiums +> associated with occupations with high earnings risk, or in the revenues raised by +> state-operated lotteries. It +> would be good to have the equity premium resolved, but I think we need to look beyond high +> estimates of risk aversion to do it. -- Robert E. Lucas Jr., {cite}`Lucas_2003` + +## Overview + +This lecture describes machinery that empirical macro-finance economists have used to evaluate the fits of structural statistical models that link asset prices to aggregate consumption. + +The Lucas asset pricing model {cite}`Lucas1978` functions as a benchmark that motivates much of this work. + +```{note} +New Keynesians call the consumption Euler equation for a one-period risk-free bond in the Lucas {cite}`Lucas1978` model the **IS curve**. + +The distinguished **old Keynesian** disapproved of that name because the object it described was so remote from the investment function that was an important component of the IS curve of John R. Hicks {cite}`hicks1937mr` that Tobin used. + +See {cite}`tobin1992old`. +``` + + +In two classic papers, Lars Peter Hansen and Kenneth Singleton used the method of maximum likelihood +{cite}`hansen1983stochastic` and a generalized method of moments {cite}`hansen1982generalized` to investigate how well Lucas's model fit some post WWII data. + +The Hansen-Singleton papers systematically organized evidence about directions in which Lucas's model misfit the data that macroeconomists subsequently called + +- an **equity premium** puzzle +- a **risk-free rate** puzzle + +```{note} +{cite:t}`MehraPrescott1985` is widely credited for naming the **equity premium** puzzle. + +{cite:t}`Weil_1989` is widely credited for naming the **risk-free rate** puzzle. + +``` + +These *puzzles* are just ways of summarizing particular dimensions along which a particular asset pricing model -- such as Lucas's -- fails empirically. + +They are thus special cases of specification failures detected by statistical diagnostics constructed earlier by {cite:t}`hansen1983stochastic` and {cite:t}`hansen1982generalized`. + +Macro-finance models that purport to resolve such puzzles all do so by changing features of the economic environment assumed by Lucas {cite}`Lucas1978`. + +Many important papers have proceeded by altering the *preferences* that Lucas had imputed to a representative agent. + +Hansen-Jagannathan bounds are a key tool for evaluating how well such re-specifications do in +correcting those misfits of Lucas's 1978 model. + + +This lecture begins with a description of the {cite:t}`Hansen_Jagannathan_1991` machinery. + +After doing that, we proceed to describe a line of research that altered Lucas's preference specification in ways that we can think of as being designed with the Hansen-Jagannathan bounds in mind. + + +We'll organize much of this lecture around parts of the paper by Thomas Tallarini {cite}`Tall2000`. + +His paper is particularly enlightening for macro-finance researchers because it showed that a recursive preference specification could fit both the equity premium and the risk-free rate, thus *resolving* both of the puzzles mentioned above. + +But like any good paper in applied economics, in answering some questions (i.e., resolving some puzzles), Tallarini's paper naturally posed new ones. + +Thus, Tallarini's puzzles-resolving required setting the risk-aversion coefficient $\gamma$ to around 50 for a random-walk consumption model and around 75 for a trend-stationary model, exactly the range that provoked the skepticism in the above quote from {cite:t}`Lucas_2003`. + +This brings us to the next parts of this lecture. + +Lucas's skeptical response to Tallarini's explanation of the two puzzles led +{cite:t}`BHS_2009` to ask whether those large $\gamma$ values really measure aversion to atemporal risk, or whether they instead measure the agent's doubts about the underlying probability model. + +Their answer, and the theme of the remaining parts of this lecture, is that much of what looks like "risk aversion" can be reinterpreted as **model uncertainty**. + +The same recursion that defines Tallarini's risk-sensitive agent is observationally equivalent to another recursion that expresses an agent's concern that the probability model governing consumption growth may be wrong. + +Under this reading, a parameter value that indicates extreme risk aversion in one interpretation of the recursion indicates concerns about *misspecification* in another interpretation of the same recursion. + +{cite:t}`BHS_2009` show that modest amounts of model uncertainty can substitute for large amounts of risk aversion in terms of choices and effects on asset prices. + + +This reinterpretation changes the welfare question that asset prices answer. + +Do large risk premia measure the benefits from reducing well-understood aggregate fluctuations, or do they measure benefits from reducing doubts about the model describing consumption growth? + + +To proceed, we begin by describing {cite:t}`Hansen_Jagannathan_1991` bounds, then specify the statistical environment, lay out four related preference specifications and the connections among them, and finally revisit Tallarini's calibration through the lens of detection-error probabilities. + +Along the way, we draw on ideas and techniques from + +- {doc}`Asset Pricing: Finite State Models `, where we introduce stochastic discount factors, and +- {doc}`Likelihood Ratio Processes `, where we develop the likelihood-ratio machinery that reappears here as the worst-case distortion $\hat g$. + + +In addition to what's in Anaconda, this lecture will need the following libraries: + +```{code-cell} ipython3 +:tags: [hide-output] + +!pip install pandas-datareader +``` + +We use the following imports: + +```{code-cell} ipython3 +import datetime as dt +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +from pandas_datareader import data as web +from scipy.stats import norm +from scipy.optimize import brentq +``` + +We also set up calibration inputs and compute the covariance matrix of equity and risk-free returns from reported moments. + +```{code-cell} ipython3 +β = 0.995 +T = 235 + +# Table 2 parameters +rw = dict(μ=0.00495, σ_ε=0.0050) +ts = dict(μ=0.00418, σ_ε=0.0050, ρ=0.980, ζ=-4.48) + +# Table 1 moments, converted from percent to decimals +r_e_mean, r_e_std = 0.0227, 0.0768 +r_f_mean, r_f_std = 0.0032, 0.0061 +r_excess_std = 0.0767 + +R_mean = np.array([1.0 + r_e_mean, 1.0 + r_f_mean]) +cov_erf = (r_e_std**2 + r_f_std**2 - r_excess_std**2) / 2.0 +Σ_R = np.array( + [ + [r_e_std**2, cov_erf], + [cov_erf, r_f_std**2], + ] +) +Σ_R_inv = np.linalg.inv(Σ_R) +``` + +## Asset pricing 101 + +### Pricing kernel and the risk-free rate + +Let's briefly review a few key concepts from {doc}`Asset Pricing: Finite State Models `. + +A random variable $m_{t+1}$ is called a **stochastic discount factor** if, for a one-period payoff $y_{t+1}$ with time-$t$ price $p_t$, it satisfies + +```{math} +:label: bhs_pricing_eq +p_t = E_t(m_{t+1} y_{t+1}), +``` + +where $E_t$ denotes the mathematical expectation conditioned on date-$t$ information. + +For time-separable CRRA preferences with discount factor $\beta$ and coefficient of relative risk aversion $\gamma$, the marginal rate of substitution gives + +```{math} +:label: bhs_crra_sdf +m_{t+1} = \beta \left(\frac{C_{t+1}}{C_t}\right)^{-\gamma}, +``` + +where $C_t$ is consumption at time $t$. + +Setting $y_{t+1} = 1$ (a risk-free bond) in {eq}`bhs_pricing_eq` yields the reciprocal of the gross one-period risk-free rate: + +```{math} +:label: bhs_riskfree +\frac{1}{R_t^f} = E_t[m_{t+1}] = E_t \left[\beta\left(\frac{C_{t+1}}{C_t}\right)^{-\gamma}\right]. +``` + +### Hansen--Jagannathan bounds + +```{note} +The derivation here uses the Cauchy-Schwarz inequality, which yields the bound +directly from the pricing equation for excess returns. + +{doc}`hansen_jagannathan_1991` derives the same +bound by projecting $m$ onto the space of traded payoffs, which additionally +yields the duality with the mean-variance frontier and the tighter +positivity-restricted bound. +``` + +Let $R_{t+1}^e$ denote the gross return on a risky asset (e.g., the market portfolio) and $R_{t+1}^f$ the gross return on a one-period risk-free bond. + +The **excess return** is + +$$ +\xi_{t+1} = R_{t+1}^e - R_{t+1}^f. +$$ + +An excess return is the payoff on a zero-cost portfolio that is long one dollar of the risky asset and short one dollar of the risk-free bond. + +Because the portfolio costs nothing to enter, its price is $p_t = 0$, so {eq}`bhs_pricing_eq` implies + +$$ +0 = E_t[m_{t+1} \xi_{t+1}]. +$$ + +We can decompose the expectation of a product into a covariance plus a product of expectations: + +$$ +E_t[m_{t+1} \xi_{t+1}] += +\operatorname{cov}_t(m_{t+1},\xi_{t+1}) + E_t[m_{t+1}] E_t[\xi_{t+1}], +$$ + +where $\operatorname{cov}_t$ denotes the conditional covariance and $\sigma_t$ will denote the conditional standard deviation. + +Setting the left-hand side to zero and solving for the expected excess return gives + +$$ +E_t[\xi_{t+1}] = -\frac{\operatorname{cov}_t(m_{t+1}, \xi_{t+1})}{E_t[m_{t+1}]}. +$$ + +Taking absolute values and applying the **Cauchy--Schwarz inequality** $|\operatorname{cov}(X,Y)| \leq \sigma(X) \sigma(Y)$ yields + +```{math} +:label: bhs_hj_bound +\frac{|E_t[\xi_{t+1}]|}{\sigma_t(\xi_{t+1})} +\leq +\frac{\sigma_t(m_{t+1})}{E_t[m_{t+1}]}. +``` + +The left-hand side of {eq}`bhs_hj_bound` is the **Sharpe ratio**: the expected excess return per unit of return volatility. + +The right-hand side, $\sigma_t(m)/E_t(m)$, is the **market price of risk**: the maximum Sharpe ratio attainable in the market. + +In words, no asset's Sharpe ratio can exceed the market price of risk. + +#### Unconditional version + +The bound {eq}`bhs_hj_bound` is stated in conditional terms. + +There is an unconditional counterpart that involves a vector of $n$ gross returns $R_{t+1}$ (e.g., equity and risk-free) with unconditional mean $E(R)$ and covariance matrix $\Sigma_R$: + +```{math} +:label: bhs_hj_unconditional +\sigma(m) +\geq +\sqrt{b^\top \Sigma_R^{-1} b}, +\qquad +b = \mathbf{1} - E(m) E(R). +``` + +{ref}`Exercise 1 ` walks through a derivation of this unconditional bound. + +Here $\mathbf{1}$ denotes an $n \times 1$ vector of ones. + +The function below computes the right-hand side of {eq}`bhs_hj_unconditional` for any given value of $E(m)$. + +```{code-cell} ipython3 +def hj_std_bound(E_m): + b = np.ones(2) - E_m * R_mean + var_lb = b @ Σ_R_inv @ b + return np.sqrt(np.maximum(var_lb, 0.0)) +``` + +### Two puzzles + +Reconciling formula {eq}`bhs_crra_sdf` with the market price of risk extracted from data on asset returns (like those in Table 1 below) requires a value of $\gamma$ so high that it provokes skepticism. + +This is the **equity premium puzzle**. + +But high values of $\gamma$ bring another difficulty. + +High values of $\gamma$ that deliver enough volatility $\sigma(m)$ also push $E(m)$, the reciprocal of the gross risk-free rate, too far down, away from the Hansen--Jagannathan bound. + +This is the **risk-free rate puzzle** of {cite:t}`Weil_1989`. + +{cite:t}`Tall2000` showed that recursive preferences with IES $= 1$ can clear the HJ bar while avoiding the risk-free rate puzzle. + +The figure below reproduces Tallarini's key diagnostic. + +Because it motivates much of what follows, we show Tallarini's figure before developing the underlying theory. + + +Closed-form expressions for the Epstein--Zin SDF moments used in the plot are derived in {ref}`Exercise 2 `. + + +The code below implements them alongside the corresponding CRRA moments. + +```{code-cell} ipython3 +def moments_type1_rw(γ): + μ, σ = rw["μ"], rw["σ_ε"] + E_m = β * np.exp(-μ + 0.5 * σ**2 * (2.0 * γ - 1.0)) + var_log_m = (σ * γ) ** 2 + mpr = np.sqrt(np.exp(var_log_m) - 1.0) + return E_m, mpr + + +def moments_type1_ts(γ): + μ, σ, ρ = ts["μ"], ts["σ_ε"], ts["ρ"] + mean_term = 1.0 - (2.0 * (1.0 - β) * (1.0 - γ)) / (1.0 - β * ρ) \ + + (1.0 - ρ) / (1.0 + ρ) + E_m = β * np.exp(-μ + 0.5 * σ**2 * mean_term) + var_term = (((1.0 - β) * (1.0 - γ)) / (1.0 - β * ρ) - 1.0) ** 2 \ + + (1.0 - ρ) / (1.0 + ρ) + var_log_m = σ**2 * var_term + mpr = np.sqrt(np.exp(var_log_m) - 1.0) + return E_m, mpr + + +def moments_crra_rw(γ): + μ, σ = rw["μ"], rw["σ_ε"] + var_log_m = (γ * σ) ** 2 + mean_log_m = np.log(β) - γ * μ + E_m = np.exp(mean_log_m + 0.5 * var_log_m) + mpr = np.sqrt(np.exp(var_log_m) - 1.0) + return E_m, mpr +``` + +For each value of $\gamma \in \{1, 5, 10, \ldots, 51\}$, we plot the implied $(E(m),\sigma(m))$ pair for three combinations of specifications of preferences and consumption growth processes. + +These are time-separable CRRA (crosses), Epstein--Zin preferences with random-walk consumption (circles), and Epstein--Zin preferences with trend-stationary consumption (pluses). + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: SDF moments and Hansen-Jagannathan bound + name: fig-bhs-1 +--- +γ_grid = np.arange(1, 55, 5) + +Em_rw = np.array([moments_type1_rw(γ)[0] for γ in γ_grid]) +σ_m_rw = np.array( + [moments_type1_rw(γ)[0] * moments_type1_rw(γ)[1] for γ in γ_grid]) + +Em_ts = np.array([moments_type1_ts(γ)[0] for γ in γ_grid]) +σ_m_ts = np.array( + [moments_type1_ts(γ)[0] * moments_type1_ts(γ)[1] for γ in γ_grid]) + +Em_crra = np.array([moments_crra_rw(γ)[0] for γ in γ_grid]) +σ_m_crra = np.array( + [moments_crra_rw(γ)[0] * moments_crra_rw(γ)[1] for γ in γ_grid]) + +Em_grid = np.linspace(0.8, 1.01, 1000) +HJ_std = np.array([hj_std_bound(x) for x in Em_grid]) + +fig, ax = plt.subplots(figsize=(7, 5)) +ax.plot(Em_grid, HJ_std, lw=2, color="black", + label="Hansen-Jagannathan bound") +ax.plot(Em_rw, σ_m_rw, "o", lw=2, + label="Epstein-Zin, random walk") +ax.plot(Em_ts, σ_m_ts, "+", lw=2, + label="Epstein-Zin, trend stationary") +ax.plot(Em_crra, σ_m_crra, "x", lw=2, + label="time-separable CRRA") + +ax.set_xlabel(r"$E(m)$") +ax.set_ylabel(r"$\sigma(m)$") +ax.legend(frameon=False) +ax.set_xlim(0.8, 1.01) +ax.set_ylim(0.0, 0.42) + +plt.tight_layout() +plt.show() +``` + +The crosses tell the story of the risk-free-rate puzzle ({cite:t}`Weil_1989`). + +As $\gamma$ rises, $\sigma(m)/E(m)$ grows but $E(m)$ drifts well below the range consistent with the observed risk-free rate. + +The circles and pluses show Tallarini's way out. + +Recursive utility with IES $= 1$ pushes volatility upward while keeping $E(m)$ roughly pinned near $1/(1+r^f)$. + +For the random-walk model, the bound is reached at around $\gamma = 50$. + +For the trend-stationary model, it is reached at around $\gamma = 75$. + +The quantitative achievement is impressive, but Lucas's challenge still stands. + +Where is the microeconomic evidence for $\gamma = 50$? + +{cite:t}`BHS_2009` argue that these large $\gamma$ values are not really about risk aversion. + +Instead, they reflect the agent's doubts about the probability model itself. + +## The choice setting + +To understand their reinterpretation, we first need to describe their statistical models of consumption growth. + +### Shocks and consumption plans + +We work with a general class of consumption plans. + +Let $x_t$ be an $n \times 1$ state vector and $\varepsilon_{t+1}$ an $m \times 1$ shock. + +A consumption plan belongs to the set $\mathcal{C}(A, B, H; x_0)$ if it admits the recursive representation + +```{math} +:label: bhs_state_space +x_{t+1} = A x_t + B \varepsilon_{t+1}, +\qquad +c_t = H x_t, +``` + +where the eigenvalues of $A$ are bounded in modulus by $1/\sqrt{\beta}$. + +The time-$t$ consumption can therefore be written as + +```{math} +c_t = H \left(B\varepsilon_t + AB\varepsilon_{t-1} + \cdots + A^{t-1}B\varepsilon_1\right) + HA^t x_0. +``` + +The equivalence theorems and Bellman equations that follow hold for arbitrary plans in $\mathcal{C}(A,B,H;x_0)$. + +We focus on the random-walk and trend-stationary models as two special cases. + +### Consumption dynamics + +Let $c_t = \log C_t$ be log consumption. + +The *geometric-random-walk* specification is + +```{math} +c_{t+1} = c_t + \mu + \sigma_\varepsilon \varepsilon_{t+1}, \qquad \varepsilon_{t+1} \sim \mathcal{N}(0, 1). +``` + +Iterating forward yields + +```{math} +c_t = c_0 + t\mu + \sigma_\varepsilon(\varepsilon_t + \varepsilon_{t-1} + \cdots + \varepsilon_1), +\qquad +t \geq 1. +``` + +The *geometric-trend-stationary* specification can be written as a deterministic trend plus a stationary AR(1) component: + +```{math} +c_t = \zeta + \mu t + z_t, +\qquad +z_{t+1} = \rho z_t + \sigma_\varepsilon \varepsilon_{t+1}, +\qquad +\varepsilon_{t+1} \sim \mathcal{N}(0, 1). +``` + +With $z_0 = c_0 - \zeta$, this implies the representation + +```{math} +c_t += +\rho^t c_0 + \mu t + (1-\rho^t)\zeta ++ +\sigma_\varepsilon(\varepsilon_t + \rho \varepsilon_{t-1} + \cdots + \rho^{t-1}\varepsilon_1), +\qquad +t \geq 1. +``` + +Equivalently, defining the detrended series $\tilde c_t := c_t - \mu t$, + +```{math} +\tilde c_{t+1} - \zeta = \rho(\tilde c_t - \zeta) + \sigma_\varepsilon \varepsilon_{t+1}. +``` + +The estimated parameters are $(\mu, \sigma_\varepsilon)$ for the random walk and $(\mu, \sigma_\varepsilon, \rho, \zeta)$ for the trend-stationary case. + +We record these parameters and moments from the paper's tables for later reference. + +```{code-cell} ipython3 +print("Table 2 parameters") +print(f"random walk: μ={rw['μ']:.5f}, σ_ε={rw['σ_ε']:.5f}") +print( + f"trend stationary: μ={ts['μ']:.5f}, σ_ε={ts['σ_ε']:.5f}, " + f"ρ={ts['ρ']:.3f}, ζ={ts['ζ']:.2f}" +) +print() +print("Table 1 moments") +print(f"E[r_e]={r_e_mean:.4f}, std[r_e]={r_e_std:.4f}") +print(f"E[r_f]={r_f_mean:.4f}, std[r_f]={r_f_std:.4f}") +print(f"std[r_e-r_f]={r_excess_std:.4f}") +``` + +(pref_equiv)= +## Preferences, distortions, and detection + + +### Overview of agents I, II, III, and IV + +We compare four preference specifications over consumption plans $C^\infty \in \mathcal{C}$. +```{note} +For origins of the names **multiplier** and **constraint** preferences, see {cite:t}`HansenSargent2001`. + +The risk-sensitive preference specification used here comes from {cite:t}`hansen1995discounted`, which adjusts specifications used earlier by +{cite:t}`Jacobson_73`, {cite:t}`Whittle_1981`, and {cite:t}`Whittle_1990` to accommodate discounting in a way that preserves time-invariant optimal decision rules. +``` + +*Type I agent (Kreps--Porteus--Epstein--Zin--Tallarini)* with +- a discount factor $\beta \in (0,1)$; +- an intertemporal elasticity of substitution fixed at $1$; +- a risk-aversion parameter $\gamma \geq 1$; and +- an approximating conditional density $\pi(\cdot)$ for shocks and its implied joint distribution $\Pi_\infty(\cdot \mid x_0)$. + +*Type II agent (multiplier preferences)* with +- $\beta \in (0,1)$; +- IES $=1$; +- unit risk aversion; +- an approximating model $\Pi_\infty(\cdot \mid x_0)$; and +- a penalty parameter $\theta > 0$ that discourages probability distortions using relative entropy. + +*Type III agent (constraint preferences)* with +- $\beta \in (0,1)$; +- IES $=1$; +- unit risk aversion; +- an approximating model $\Pi_\infty(\cdot \mid x_0)$; and +- a bound $\eta$ on discounted relative entropy. + +*Type IV agent (pessimistic ex post Bayesian)* with +- $\beta \in (0,1)$; +- IES $=1$; +- unit risk aversion; and +- a single pessimistic joint distribution $\hat\Pi_\infty(\cdot \mid x_0, \theta)$ induced by the type II worst-case distortion. + + +Two sets of equivalence results tie these agents together. + +Types I and II turn out to be observationally equivalent in a strong sense, having identical preferences over $\mathcal{C}$. + +Types III and IV are equivalent in a weaker but still useful sense, delivering the same worst-case pricing implications as a type II agent for a given endowment process. + +We now formalize each agent type and describe relationships among them. + +For each type, we derive a Bellman equation that pins down the agent's value function and stochastic discount factor. + +The stochastic discount factor for all four types takes the form + +$$ +m_{t+1} = \beta \frac{\partial U_{t+1}/\partial c_{t+1}}{\partial U_t/\partial c_t} \hat g_{t+1}, +$$ + +where $\hat g_{t+1}$ is a likelihood-ratio distortion that we will define in each case. + + +Along the way, we introduce the likelihood-ratio distortion that enters the stochastic discount factor and describe detection-error probabilities that will serve as our new calibration tool. + +### Type I: Kreps--Porteus--Epstein--Zin--Tallarini preferences + +The Epstein--Zin--Weil specification combines current consumption with a certainty equivalent of future utility through a CES aggregator: + +```{math} +:label: bhs_ez_general +V_t = \left[(1-\beta) C_t^{\rho} + \beta \mathcal{R}_t(V_{t+1})^{\rho}\right]^{1/\rho}, +\qquad +\rho := 1 - \frac{1}{\psi}, +``` + +where $\psi > 0$ is the intertemporal elasticity of substitution and the certainty equivalent uses the risk-aversion parameter $\gamma \geq 1$: + +```{math} +:label: bhs_certainty_equiv +\mathcal{R}_t(V_{t+1}) += +\left(E_t\left[V_{t+1}^{1-\gamma}\right]\right)^{\frac{1}{1-\gamma}}. +``` + +```{note} +For readers interested in a general class of aggregators and certainty equivalents, see Section +7.3 of {cite:t}`Sargent_Stachurski_2025`. +``` + +Let $\psi = 1$, so $\rho \to 0$. + +In this limit the CES aggregator reduces to + +$$ +V_t = C_t^{1-\beta} \cdot \mathcal{R}_t(V_{t+1})^{\beta}. +$$ + +Taking logs and expanding the certainty equivalent {eq}`bhs_certainty_equiv` gives the *type I recursion*: + +```{math} +:label: bhs_type1_recursion +\log V_t += +(1-\beta)c_t ++ +\frac{\beta}{1-\gamma} +\log E_t\left[(V_{t+1})^{1-\gamma}\right]. +``` + +A useful change of variables is to define the transformed continuation value + +```{math} +:label: bhs_Ut_def +U_t \equiv \frac{\log V_t}{1-\beta} +``` + +and the robustness parameter + +```{math} +:label: bhs_theta_def +\theta = \frac{-1}{(1-\beta)(1-\gamma)}. +``` + +Substituting into {eq}`bhs_type1_recursion` yields the *risk-sensitive recursion* ({ref}`Exercise 3 ` asks you to verify this step) + +```{math} +:label: bhs_risk_sensitive +U_t = c_t - \beta\theta \log E_t\left[\exp\left(\frac{-U_{t+1}}{\theta}\right)\right]. +``` + +When $\gamma = 1$ (equivalently $\theta = +\infty$), the $\log E \exp$ term reduces to $E_t U_{t+1}$ and the recursion becomes standard discounted expected log utility, $U_t = c_t + \beta E_t U_{t+1}$. + +For consumption plans in $\mathcal{C}(A, B, H; x_0)$, the recursion {eq}`bhs_risk_sensitive` implies the Bellman equation + +```{math} +:label: bhs_bellman_type1 +U(x) = c - \beta\theta \log \int \exp\left[\frac{-U(Ax + B\varepsilon)}{\theta}\right] \pi(\varepsilon)d\varepsilon. +``` + +#### Deriving the stochastic discount factor + +The stochastic discount factor is the intertemporal marginal rate of substitution, the ratio of marginal utilities at dates $t+1$ and $t$. + +Because $c_t$ enters {eq}`bhs_risk_sensitive` linearly, $\partial U_t / \partial c_t = 1$. + +Converting from log consumption to the consumption good gives $\partial U_t / \partial C_t = 1/C_t$. + +A perturbation to $c_{t+1}$ in a particular state feeds into $U_t$ through the $\log E_t \exp$ term. + +Differentiating {eq}`bhs_risk_sensitive`: + +$$ +\frac{\partial U_t}{\partial c_{t+1}} += +-\beta\theta +\frac{\exp(-U_{t+1}/\theta) (-1/\theta)}{E_t[\exp(-U_{t+1}/\theta)]} +\underbrace{\frac{\partial U_{t+1}}{\partial c_{t+1}}}_{=1} += +\beta \frac{\exp(-U_{t+1}/\theta)}{E_t[\exp(-U_{t+1}/\theta)]}. +$$ + +Converting to consumption levels gives +$\partial U_t / \partial C_{t+1} = \beta \frac{\exp(-U_{t+1}/\theta)}{E_t[\exp(-U_{t+1}/\theta)]} \frac{1}{C_{t+1}}$. + +The ratio of these marginal utilities gives the SDF: + +```{math} +:label: bhs_sdf_Ut +m_{t+1} += +\frac{\partial U_t / \partial C_{t+1}}{\partial U_t / \partial C_t} += +\beta \frac{C_t}{C_{t+1}} +\frac{\exp(-U_{t+1}/\theta)}{E_t[\exp(-U_{t+1}/\theta)]}. +``` + + +The second factor is the likelihood-ratio distortion $\hat g_{t+1}$: an exponential tilt that overweights states where the continuation value $U_{t+1}$ is low. + + +### Type II: multiplier preferences + +We now turn to the type II (multiplier) agent. + +Before writing down the preferences, we need the machinery of martingale likelihood ratios that formalizes what it means to distort a probability model. + +These tools build on {doc}`Likelihood Ratio Processes `, which develops properties of likelihood ratios in detail, and {doc}`Divergence Measures `, which covers relative entropy. + + +#### Martingale likelihood ratios + +Consider a nonnegative martingale $G_t$ with $E(G_t \mid x_0) = 1$. + +Its one-step increments + +```{math} +g_{t+1} = \frac{G_{t+1}}{G_t}, +\qquad +E_t[g_{t+1}] = 1, +\quad +g_{t+1} \geq 0, +\qquad +G_0 = 1, +``` + +define distorted conditional expectations: $\tilde E_t[b_{t+1}] = E_t[g_{t+1}b_{t+1}]$. + +The conditional relative entropy of the distortion is $E_t[g_{t+1}\log g_{t+1}]$, and the discounted entropy over the entire path is $\beta E\bigl[\sum_{t=0}^{\infty} \beta^t G_tE_t(g_{t+1}\log g_{t+1})\big|x_0\bigr]$. + + +A type II agent's *multiplier* preference ordering over consumption plans $C^\infty \in \mathcal{C}(A,B,H;x_0)$ is defined by + +```{math} +:label: bhs_type2_objective +\min_{\{g_{t+1}\}} +\sum_{t=0}^{\infty} E\left\{\beta^t G_t +\left[c_t + \beta\theta E_t\left(g_{t+1}\log g_{t+1}\right)\right] +\Big| x_0\right\}, +``` + +where $G_{t+1} = g_{t+1}G_t$, $E_t[g_{t+1}] = 1$, $g_{t+1} \geq 0$, and $G_0 = 1$. + +A larger $\theta$ makes probability distortions more expensive, discouraging departures from the approximating model. + +The value function satisfies the Bellman equation + +```{math} +:label: bhs_bellman_type2 +W(x) += +c + \min_{g(\varepsilon) \geq 0} +\beta \int \bigl[g(\varepsilon) W(Ax + B\varepsilon) ++ \theta g(\varepsilon)\log g(\varepsilon)\bigr] \pi(\varepsilon) d\varepsilon +``` + +subject to $\int g(\varepsilon) \pi(\varepsilon) d\varepsilon = 1$. + +Inside the integral, $g(\varepsilon) W(Ax + B\varepsilon)$ is the continuation value under the distorted model $g\pi$, while $\theta g(\varepsilon)\log g(\varepsilon)$ is the entropy penalty that makes large departures from the approximating model $\pi$ costly. + +The minimizer is ({ref}`Exercise 4 ` derives this and verifies the equivalence $W \equiv U$) + +```{math} +:label: bhs_ghat +\hat g_{t+1} += +\frac{\exp \bigl(-W(Ax_t + B\varepsilon_{t+1})/\theta\bigr)}{E_t \left[\exp \bigl(-W(Ax_t + B\varepsilon_{t+1})/\theta\bigr)\right]}. +``` + +Notice that $g(\varepsilon)$ multiplies both the continuation value $W$ and the entropy penalty. + +This is the key structural feature that makes $\hat g$ a likelihood ratio. + + +Substituting {eq}`bhs_ghat` back into {eq}`bhs_bellman_type2` gives + +$$W(x) = c - \beta\theta \log \int \exp \left[\frac{-W(Ax + B\varepsilon)}{\theta}\right]\pi(\varepsilon) d\varepsilon,$$ + +which is identical to {eq}`bhs_bellman_type1`. + +Therefore $W(x) \equiv U(x)$, establishing that *types I and II are observationally equivalent* over elements of $\mathcal{C}(A,B,H;x_0)$. + +The mapping between parameters is + +```{math} +\theta = \left[(1-\beta)(\gamma - 1)\right]^{-1}. +``` + +```{code-cell} ipython3 +def θ_from_γ(γ, β=β): + if γ <= 1: + return np.inf + return 1.0 / ((1.0 - β) * (γ - 1.0)) + + +def γ_from_θ(θ, β=β): + if np.isinf(θ): + return 1.0 + return 1.0 + 1.0 / ((1.0 - β) * θ) +``` + +### Type III: constraint preferences + +Type III (constraint) preferences swap the entropy penalty for a hard bound. + +Rather than penalizing distortions through $\theta$, the agent minimizes expected discounted log consumption under the worst-case model subject to a cap $\eta$ on discounted relative entropy: + +```{math} +J(x_0) += +\min_{\{g_{t+1}\}} +\sum_{t=0}^{\infty} E \left[\beta^t G_t c_t \Big| x_0\right] +``` + +subject to $G_{t+1} = g_{t+1}G_t$, $E_t[g_{t+1}] = 1$, $g_{t+1} \geq 0$, $G_0 = 1$, and + +```{math} +\beta E \left[\sum_{t=0}^{\infty} \beta^t G_t E_t\left(g_{t+1}\log g_{t+1}\right)\Big|x_0\right] \leq \eta. +``` + +The Lagrangian for the type III problem is + +$$ +\mathcal{L} += +\sum_{t=0}^{\infty} E\left[\beta^t G_t c_t \Big| x_0\right] ++ +\theta \left[ +\beta E \left(\sum_{t=0}^{\infty} \beta^t G_t E_t(g_{t+1}\log g_{t+1})\Big| x_0 \right) - \eta +\right], +$$ + +where $\theta \geq 0$ is the multiplier on the entropy constraint. + +Collecting terms inside the expectation gives + +$$ +\mathcal{L} += +\sum_{t=0}^{\infty} E \left \{\beta^t G_t +\left[c_t + \beta \theta E_t(g_{t+1}\log g_{t+1})\right] +\Big| x_0\right\} - \theta\eta, +$$ + +which, apart from the constant $-\theta\eta$, has the same structure as the type II objective {eq}`bhs_type2_objective`. + +The first-order condition for $g_{t+1}$ is therefore identical, and the optimal distortion is the same $\hat g_{t+1}$ as in {eq}`bhs_ghat`, evaluated at the $\theta$ that makes the entropy constraint bind. + +The SDF is again $m_{t+1} = \beta(C_t/C_{t+1})\hat g_{t+1}$. + +So for the particular endowment process and the $\theta$ that enforces the entropy bound, a type III agent and a type II agent assign the same shadow prices to uncertain claims. + +### Type IV: ex post Bayesian + +The type IV agent is the simplest of the four: an ordinary expected-utility agent with log preferences who happens to hold a pessimistic probability model $\hat\Pi_\infty$: + +```{math} +\hat E_0 \sum_{t=0}^{\infty} \beta^t c_t. +``` + +$\hat E_0$ denotes expectation under the pessimistic model $\hat\Pi_\infty$. + +Here $\hat\Pi_\infty(\cdot \mid x_0, \theta)$ is the joint distribution generated by the type II agent's worst-case distortion. + +Since the agent has log utility under $\hat\Pi_\infty$, the Euler equation for any gross return $R_{t+1}$ is + +$$ +1 = \hat E_t \left[\beta \frac{C_t}{C_{t+1}} R_{t+1}\right]. +$$ + +To express this in terms of the approximating model $\Pi_\infty$, apply a change of measure using the one-step likelihood ratio $\hat g_{t+1} = d\hat\Pi / d\Pi$: + +$$ +1 = E_t\left[\hat g_{t+1} \cdot \beta \frac{C_t}{C_{t+1}} R_{t+1}\right] += E_t\left[m_{t+1} R_{t+1}\right], +$$ + +so the effective SDF under the approximating model is $m_{t+1} = \beta(C_t/C_{t+1})\hat g_{t+1}$. + +For the particular $A, B, H$ and $\theta$ used to construct $\hat\Pi_\infty$, the type IV value function equals $J(x)$ from type III. + +### Stochastic discount factor + +Pulling together the results for all four agent types, the stochastic discount factor can be written compactly as + +```{math} +:label: bhs_sdf +m_{t+1} += +\beta \frac{C_t}{C_{t+1}} \hat g_{t+1}. +``` + +The factor $\hat g_{t+1}$ is a likelihood ratio between the approximating and worst-case one-step models. + +With log utility, $C_t/C_{t+1} = \exp(-(c_{t+1}-c_t))$ is the usual intertemporal marginal rate of substitution. + +Robustness multiplies it by $\hat g_{t+1}$, so uncertainty aversion enters pricing entirely through the distortion. + +For the constraint-preference agent, the worst-case distortion coincides with the multiplier agent's at the $\theta$ that makes the entropy constraint bind. + +For the ex post Bayesian, it is simply a change of measure from the approximating model to the pessimistic one. + +### Value function decomposition + +Substituting the minimizing $\hat g$ back into the Bellman equation {eq}`bhs_bellman_type2` yields a revealing decomposition of the type II value function: + +```{math} +:label: bhs_W_decomp_bellman +W(x) = c + \beta \int \bigl[\hat g(\varepsilon) W(Ax + B\varepsilon) + \theta \hat g(\varepsilon)\log \hat g(\varepsilon)\bigr] \pi(\varepsilon)d\varepsilon. +``` + +Define two components: + +```{math} +:label: bhs_J_recursion +J(x) = c + \beta \int \hat g(\varepsilon) J(Ax + B\varepsilon) \pi(\varepsilon)d\varepsilon, +``` + +```{math} +:label: bhs_N_recursion +N(x) = \beta \int \hat g(\varepsilon)\bigl[\log \hat g(\varepsilon) + N(Ax + B\varepsilon)\bigr] \pi(\varepsilon)d\varepsilon. +``` + +Then $W(x) = J(x) + \theta N(x)$. + +Here $J(x_t) = \hat E_t \sum_{j=0}^{\infty} \beta^j c_{t+j}$ is expected discounted log consumption under the *worst-case* model. + +$J$ is the value function shared by both the type III and type IV agents. + +For the type III agent, once the worst-case model is pinned down by the entropy constraint, the resulting value is simply expected discounted consumption under that model. + +The type IV agent adopts the same model as a fixed belief, so she evaluates the same expectation. + +The term $N(x)$ is discounted continuation entropy, measuring the total information cost of the probability distortion from date $t$ onward. + +This decomposition plays a central role in the welfare calculations of {ref}`the welfare section ` below, where it explains why type III uncertainty compensation is twice that of type II. + +### Gaussian mean-shift distortions + +Everything so far holds for general distortions $\hat g$. + +We now specialize to the Gaussian case that underlies our two consumption models. + +Under both models, the shock is $\varepsilon_{t+1} \sim \mathcal{N}(0,1)$. + +As we verify in the next subsection, the value function $W$ is linear in the state, so the exponent in the worst-case distortion {eq}`bhs_ghat` is linear in $\varepsilon_{t+1}$. + +Exponentially tilting a Gaussian by a linear function produces another Gaussian with the same variance but a shifted mean. + +The worst-case model therefore keeps the variance at one but shifts the mean of $\varepsilon_{t+1}$ to some $w < 0$. + +The resulting likelihood ratio is ({ref}`Exercise 5 ` verifies its properties) + +```{math} +:label: bhs_ghat_gaussian +\hat g_{t+1} += +\exp\left(w \varepsilon_{t+1} - \frac{1}{2}w^2\right), +\qquad +E_t[\hat g_{t+1}] = 1. +``` + +Hence $\log \hat g_{t+1}$ is normal with mean $-w^2/2$ and variance $w^2$, and + +```{math} +\operatorname{std}(\hat g_{t+1}) = \sqrt{e^{w^2}-1}. +``` + +The mean shift $w$ is determined by how strongly each shock $\varepsilon_{t+1}$ affects continuation value. + +From {eq}`bhs_ghat`, the worst-case distortion puts $\hat g \propto \exp(-W(x_{t+1})/\theta)$. + +If $W(x_{t+1})$ loads on $\varepsilon_{t+1}$ with coefficient $\lambda$, then the Gaussian mean shift is $w = -\lambda/\theta$. + +By guessing linear value functions and matching coefficients in the Bellman equation ({ref}`Exercise 6 ` works out both cases), we obtain the worst-case mean shifts + +```{math} +:label: bhs_w_formulas +w_{rw}(\theta) = -\frac{\sigma_\varepsilon}{(1-\beta)\theta}, +\qquad +w_{ts}(\theta) = -\frac{\sigma_\varepsilon}{(1-\rho\beta)\theta}. +``` + +The denominator $(1-\beta)$ in the random-walk case becomes $(1-\beta\rho)$ in the trend-stationary case. + +Because the AR(1) component is persistent, each shock has a larger cumulative effect on continuation utility, so the worst-case distortion is more aggressive. + +```{code-cell} ipython3 +def w_from_θ(θ, model): + if np.isinf(θ): + return 0.0 + if model == "rw": + return -rw["σ_ε"] / ((1.0 - β) * θ) + if model == "ts": + return -ts["σ_ε"] / ((1.0 - β * ts["ρ"]) * θ) + raise ValueError("model must be 'rw' or 'ts'") +``` + +### Discounted entropy + +When the approximating and worst-case conditional densities are $\mathcal{N}(0,1)$ and $\mathcal{N}(w(\theta),1)$, the likelihood ratio is $\hat g(\varepsilon) = \exp(w(\theta)\varepsilon - \frac{1}{2}w(\theta)^2)$, so $\log \hat g(\varepsilon) = w(\theta)\varepsilon - \frac{1}{2}w(\theta)^2$. + +Under the worst-case measure $\varepsilon \sim \mathcal{N}(w(\theta),1)$, so $E_{\hat\pi}[\varepsilon] = w(\theta)$, giving conditional relative entropy + +```{math} +:label: bhs_conditional_entropy +E_t[\hat g_{t+1}\log \hat g_{t+1}] = w(\theta) \cdot w(\theta) - \frac{1}{2}w(\theta)^2 = \frac{1}{2}w(\theta)^2. +``` + +Because the distortion is iid, the conditional entropy $E_t[\hat g_{t+1}\log \hat g_{t+1}] = \frac{1}{2}w(\theta)^2$ from {eq}`bhs_conditional_entropy` is constant and $N(x)$ does not depend on $x$. + +The recursion {eq}`bhs_N_recursion` then reduces to $N(x) = \beta(\frac{1}{2}w(\theta)^2 + N(x))$, where we have used $\int \hat g(\varepsilon)\pi(\varepsilon)d\varepsilon = 1$ (since $\hat g$ is a likelihood ratio). + +Solving for $N(x)$, + +$$ +N(x)(1-\beta) = \frac{\beta}{2}w(\theta)^2, +$$ + +gives discounted entropy + +```{math} +:label: bhs_eta_formula +\eta = N(x) = \frac{\beta}{2(1-\beta)} w(\theta)^2. +``` + +```{code-cell} ipython3 +def η_from_θ(θ, model): + w = w_from_θ(θ, model) + return β * w**2 / (2.0 * (1.0 - β)) +``` + +This gives a clean mapping between $\theta$ and $\eta$ that aligns multiplier and constraint preferences along an exogenous endowment process. + +As we will see in the {ref}`detection-error section ` below, it is more natural to hold $\eta$ (or equivalently the detection-error probability $p$) fixed rather than $\theta$ when comparing across consumption models. + +### Value functions for random-walk consumption + +We now solve the recursions {eq}`bhs_W_decomp_bellman`, {eq}`bhs_J_recursion`, and {eq}`bhs_N_recursion` in closed form for the random-walk model, where $W$ is the type II (multiplier) value function, $J$ is the type III/IV value function, and $N$ is discounted continuation entropy. + +Substituting $w_{rw}(\theta) = -\sigma_\varepsilon / [(1-\beta)\theta]$ from {eq}`bhs_w_formulas` into {eq}`bhs_eta_formula` gives + +$$ +N(x) = \frac{\beta}{2(1-\beta)} w_{rw}(\theta)^2 + = \frac{\beta}{2(1-\beta)} \left(\frac{-\sigma_\varepsilon}{(1-\beta)\theta}\right)^2 + = \frac{\beta}{2(1-\beta)} \cdot \frac{\sigma_\varepsilon^2}{(1-\beta)^2\theta^2} +$$ + +so that + +```{math} +:label: bhs_N_rw +N(x) = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^3\theta^2}. +``` + +For $W$, we guess $W(x_t) = \frac{1}{1-\beta}[c_t + d]$ for some constant $d$ and verify it in the risk-sensitive Bellman equation {eq}`bhs_bellman_type1`. + +Under the random walk, $W(x_{t+1}) = \frac{1}{1-\beta}[c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1} + d]$, so $-W(x_{t+1})/\theta$ is affine in the standard normal $\varepsilon_{t+1}$. + +Using the fact that $\log E[e^Z] = \mu_Z + \frac{1}{2}\sigma_Z^2$ for a normal random variable $Z$, the Bellman equation {eq}`bhs_bellman_type1` reduces to a constant-matching condition that pins down $d$ ({ref}`Exercise 7 ` works through the algebra): + +```{math} +:label: bhs_W_rw +W(x_t) = \frac{1}{1-\beta}\left[c_t + \frac{\beta}{1-\beta}\left(\mu - \frac{\sigma_\varepsilon^2}{2(1-\beta)\theta}\right)\right]. +``` + +Using $W = J + \theta N$, the type III/IV value function is + +```{math} +:label: bhs_J_rw +J(x_t) = W(x_t) - \theta N(x_t) = \frac{1}{1-\beta}\left[c_t + \frac{\beta}{1-\beta}\left(\mu - \frac{\sigma_\varepsilon^2}{(1-\beta)\theta}\right)\right]. +``` + +Notice that the coefficient on $\sigma_\varepsilon^2/[(1-\beta)\theta]$ doubles from $\tfrac{1}{2}$ in $W$ to $1$ in $J$. + +The reason is that $W$ includes the entropy "rebate" $\theta N$, which partially offsets the pessimistic tilt, while $J$ evaluates consumption purely under the worst-case model with no such offset. + +(detection_error_section)= +## Detection-error probabilities + +So far we have expressed SDF moments, value functions, and worst-case distortions as functions of $\gamma$ (or equivalently $\theta$). + +But if $\gamma$ should not be calibrated by introspection about atemporal gambles, what replaces it? + +The answer proposed by {cite:t}`BHS_2009` is a statistical test: how easily could an econometrician distinguish the approximating model from its worst-case alternative? + +### Likelihood-ratio testing and detection errors + +Let $L_T$ be the log likelihood ratio between the worst-case and approximating models based on a sample of length $T$. + +Define + +```{math} +p_A = \Pr_A(L_T < 0), +\qquad +p_B = \Pr_B(L_T > 0), +``` + +where $\Pr_A$ and $\Pr_B$ denote probabilities under the approximating and worst-case models. + +Then $p(\theta^{-1}) = \frac{1}{2}(p_A + p_B)$ is the average probability of choosing the wrong model. + +Fix a sample size $T$ (here 235 quarters, matching the postwar US data used in the paper). + +For a given $\theta$, compute the worst-case model and imagine that a Bayesian runs a likelihood-ratio test to distinguish it from the approximating model. + +What fraction of the time would she pick the wrong one? + +That fraction is the **detection-error probability** $p(\theta^{-1})$. + +When $p$ is close to 0.5 the two models are nearly indistinguishable, so the consumer's fear is hard to rule out. + +When $p$ is small the worst-case model is easy to reject and the robustness concern carries less force. + +### Market price of model uncertainty + +The **market price of model uncertainty** (MPU) is the conditional standard deviation of the distortion: + +```{math} +:label: bhs_mpu_formula +\text{MPU} += +\operatorname{std}(\hat g_{t+1}) += +\sqrt{e^{w(\theta)^2}-1} +\approx |w(\theta)|. +``` + +In the Gaussian mean-shift setting, $L_T$ is normal with mean $\pm \tfrac{1}{2}w^2T$ and variance $w^2T$, so the detection-error probability has the closed form ({ref}`Exercise 8 ` derives this) + +```{math} +:label: bhs_detection_formula +p(\theta^{-1}) += +\frac{1}{2}\left(p_A + p_B\right), +``` + +```{math} +:label: bhs_detection_closed +p(\theta^{-1}) = \Phi \left(-\frac{|w(\theta)|\sqrt{T}}{2}\right). +``` + +```{code-cell} ipython3 +def detection_probability(θ, model): + w = abs(w_from_θ(θ, model)) + return norm.cdf(-0.5 * w * np.sqrt(T)) + + +def θ_from_detection_probability(p, model): + if p >= 0.5: + return np.inf + w_abs = -2.0 * norm.ppf(p) / np.sqrt(T) + if model == "rw": + return rw["σ_ε"] / ((1.0 - β) * w_abs) + if model == "ts": + return ts["σ_ε"] / ((1.0 - β * ts["ρ"]) * w_abs) + raise ValueError("model must be 'rw' or 'ts'") +``` + +### Interpreting the calibration objects + +Let us trace the chain of mappings that connects preference parameters to statistical distinguishability. + +The parameter $\theta$ governs how expensive it is for the minimizing player to distort the approximating model. + +A small $\theta$ means cheap distortions and therefore stronger robustness concerns. + +The associated $\gamma = 1 + \left[(1-\beta)\theta\right]^{-1}$ can be large even when we do not want to interpret behavior as extreme atemporal risk aversion. + +The distortion magnitude $|w(\theta)|$ directly measures how pessimistically the agent tilts one-step probabilities. + +The detection-error probability $p(\theta^{-1})$ translates that tilt into a statistical statement about finite-sample distinguishability. + +High $p$ means the two models are hard to tell apart, while low $p$ means the worst case is easier to reject. + +This chain bridges econometric identification and preference calibration. + +Finally, recall from {eq}`bhs_eta_formula` that discounted entropy is $\eta = \frac{\beta}{2(1-\beta)}w(\theta)^2$, so when the distortion is a Gaussian mean shift, discounted entropy is proportional to the squared market price of model uncertainty. + +### Detection probabilities across the two models + +The left panel below plots $p(\theta^{-1})$ against $\theta^{-1}$ for both consumption specifications. + +Because the baseline dynamics differ, the same numerical $\theta$ implies very different detection probabilities across the two models. + +The right panel resolves this by plotting detection probabilities against discounted relative entropy $\eta$, which normalizes the statistical distance. + +Once indexed by $\eta$, the two curves fall on top of each other. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Detection probabilities across two models + name: fig-bhs-2 +--- +θ_inv_grid = np.linspace(0.0, 1.8, 400) +θ_grid = np.full_like(θ_inv_grid, np.inf) +mask_θ = θ_inv_grid > 0.0 +θ_grid[mask_θ] = 1.0 / θ_inv_grid[mask_θ] + +p_rw = np.array([detection_probability(θ, "rw") for θ in θ_grid]) +p_ts = np.array([detection_probability(θ, "ts") for θ in θ_grid]) + +η_rw = np.array([η_from_θ(θ, "rw") for θ in θ_grid]) +η_ts = np.array([η_from_θ(θ, "ts") for θ in θ_grid]) + +fig, axes = plt.subplots(1, 2, figsize=(12, 4)) + +axes[0].plot(θ_inv_grid, 100.0 * p_rw, lw=2, label="random walk") +axes[0].plot(θ_inv_grid, 100.0 * p_ts, lw=2, label="trend stationary") +axes[0].set_xlabel(r"$\theta^{-1}$") +axes[0].set_ylabel("detection error probability (percent)") +axes[0].legend(frameon=False) + +axes[1].plot(η_rw, 100.0 * p_rw, lw=2, label="random walk") +axes[1].plot(η_ts, 100.0 * p_ts, lw=2, ls="--", label="trend stationary") +axes[1].set_xlabel(r"discounted entropy $\eta$") +axes[1].set_ylabel("detection error probability (percent)") +axes[1].set_xlim(0.0, 10) +axes[1].legend(frameon=False) + +plt.tight_layout() +plt.show() +``` + +Detection-error probabilities (or equivalently, discounted entropy) are therefore the right yardstick for cross-model comparisons. + +If we hold $\theta$ fixed when switching from a random walk to a trend-stationary specification, we implicitly change how much misspecification the consumer fears. + +Holding $\eta$ or $p$ fixed instead keeps the statistical difficulty of detecting misspecification constant. + +The explicit mapping that equates discounted entropy across models is ({ref}`Exercise 9 ` derives it): + +```{math} +:label: bhs_theta_cross_model +\theta_{\text{TS}} += +\left(\frac{\sigma_\varepsilon^{\text{TS}}}{\sigma_\varepsilon^{\text{RW}}}\right) +\frac{1-\beta}{1-\rho\beta} \theta_{\text{RW}}. +``` + +At our calibration $\sigma_\varepsilon^{\text{TS}} = \sigma_\varepsilon^{\text{RW}}$, this simplifies to $\theta_{\text{TS}} = \frac{1-\beta}{1-\rho\beta}\theta_{\text{RW}}$. + +Because $\rho = 0.98$ and $\beta = 0.995$, the ratio $(1-\beta)/(1-\rho\beta)$ is much less than one, so holding entropy fixed requires a substantially smaller $\theta$ (stronger robustness) for the trend-stationary model than for the random walk. + +## Unify the two models using detection-error probabilities + +With this machinery in hand, we can redraw Tallarini's figure using detection-error probabilities as the common index. + +For each $p(\theta^{-1}) = 0.50, 0.45, \ldots, 0.01$, we invert to find the model-specific $\theta$, convert to $\gamma$, and plot the implied $(E(m), \sigma(m))$ pair. + +```{code-cell} ipython3 +p_points = np.array( + [0.50, 0.45, 0.40, 0.35, 0.30, 0.25, 0.20, 0.15, 0.10, 0.05, 0.01]) + +θ_rw_points = np.array( + [θ_from_detection_probability(p, "rw") for p in p_points]) +θ_ts_points = np.array( + [θ_from_detection_probability(p, "ts") for p in p_points]) + +γ_rw_points = np.array([γ_from_θ(θ) for θ in θ_rw_points]) +γ_ts_points = np.array([γ_from_θ(θ) for θ in θ_ts_points]) + +Em_rw_p = np.array( + [moments_type1_rw(γ)[0] for γ in γ_rw_points]) +σ_m_rw_p = np.array( + [moments_type1_rw(γ)[0] * moments_type1_rw(γ)[1] for γ in γ_rw_points]) +Em_ts_p = np.array( + [moments_type1_ts(γ)[0] for γ in γ_ts_points]) +σ_m_ts_p = np.array( + [moments_type1_ts(γ)[0] * moments_type1_ts(γ)[1] for γ in γ_ts_points]) + +print("p γ_rw γ_ts") +for p, g1, g2 in zip(p_points, γ_rw_points, γ_ts_points): + print(f"{p:>4.2f} {g1:>9.2f} {g2:>9.2f}") +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Pricing loci from common detectability + name: fig-bhs-3 +--- + +# Empirical Sharpe ratio -- the minimum of the HJ bound curve +sharpe = (r_e_mean - r_f_mean) / r_excess_std + +def sharpe_gap(p, model): + """Market price of risk minus Sharpe ratio, as a function of p.""" + if p >= 0.5: + return -sharpe + θ = θ_from_detection_probability(p, model) + γ = γ_from_θ(θ) + _, mpr = moments_type1_rw(γ) if model == "rw" else moments_type1_ts(γ) + return mpr - sharpe + +p_hj_rw = brentq(sharpe_gap, 1e-4, 0.49, args=("rw",)) +p_hj_ts = brentq(sharpe_gap, 1e-4, 0.49, args=("ts",)) + +fig, ax = plt.subplots(figsize=(7, 5)) +ax.plot(Em_rw_p, σ_m_rw_p, "o", lw=2, + label="random walk") +ax.plot(Em_ts_p, σ_m_ts_p, "+", lw=2, markersize=12, + label="trend stationary") +ax.plot(Em_grid, HJ_std, lw=2, + color="black", label="Hansen-Jagannathan bound") + +# Mark p where each model's market price of risk reaches the Sharpe ratio +for p_hj, model, color, name, marker in [ + (p_hj_rw, "rw", "C0", "RW", "o"), + (p_hj_ts, "ts", "C1", "TS", "+"), +]: + θ_hj = θ_from_detection_probability(p_hj, model) + γ_hj = γ_from_θ(θ_hj) + Em_hj, mpr_hj = (moments_type1_rw(γ_hj) if model == "rw" + else moments_type1_ts(γ_hj)) + σ_m_hj = Em_hj * mpr_hj + ax.axhline(σ_m_hj, ls="--", lw=1, color=color, + label=f"{name} reaches bound at $p = {p_hj:.3f}$") + if model == "ts": + ax.plot(Em_hj, σ_m_hj, marker, lw=2, markersize=12, color=color) + else: + ax.plot(Em_hj, σ_m_hj, marker, lw=2, color=color) + +ax.set_xlabel(r"$E(m)$") +ax.set_ylabel(r"$\sigma(m)$") +ax.legend(frameon=False) +ax.set_xlim(0.96, 1.05) +ax.set_ylim(0.0, 0.34) + +plt.tight_layout() +plt.show() +``` + +The result is striking. + +The random-walk and trend-stationary loci nearly coincide. + +Recall that under Tallarini's $\gamma$-calibration, reaching the Hansen--Jagannathan bound required $\gamma \approx 50$ for the random walk but $\gamma \approx 75$ for the trend-stationary model. + +These are very different numbers for what is supposed to be the "same" preference parameter. + +Under detection-error calibration, both models reach the bound at essentially the same detectability level. + +The apparent model dependence was an artifact of using $\gamma$ as the cross-model yardstick. + +Once we measure robustness concerns in units of statistical detectability, the two consumption specifications tell a single, coherent story. + +A representative consumer with moderate, difficult-to-dismiss fears about model misspecification behaves as though she has very high risk aversion. + +The following figure brings together the two key ideas of this section: a small one-step density shift that is hard to detect (left panel) compounds into a large gap in expected log consumption (right panel). + +At $p = 0.03$ both models share the same innovation mean shift $w$, and the left panel shows that the approximating and worst-case one-step densities nearly coincide. + +The right panel reveals the cumulative consequence: a per-period shift that is virtually undetectable compounds into a large gap in expected log consumption, especially under random-walk dynamics where each shock has a permanent effect. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Density shift and cumulative consumption + name: fig-bhs-fear +--- +p_star = 0.03 +θ_star = θ_from_detection_probability(p_star, "rw") +w_star = w_from_θ(θ_star, "rw") +σ_ε = rw["σ_ε"] +ρ = ts["ρ"] + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5)) + +ε = np.linspace(-4.5, 4.5, 500) +f0 = norm.pdf(ε, 0, 1) +fw = norm.pdf(ε, w_star, 1) + +ax1.fill_between(ε, f0, alpha=0.15, color='k') +ax1.plot(ε, f0, 'k', lw=2, + label=r'Approximating $\mathcal{N}(0, 1)$') +ax1.fill_between(ε, fw, alpha=0.15, color='C3') +ax1.plot(ε, fw, 'C3', lw=2, ls='--', + label=fr'Worst case $\mathcal{{N}}({w_star:.2f},1)$') + +peak = norm.pdf(0, 0, 1) +ax1.annotate('', xy=(w_star, 0.55 * peak), xytext=(0, 0.55 * peak), + arrowprops=dict(arrowstyle='->', color='C3', lw=1.8)) +ax1.text(w_star / 2, 0.59 * peak, f'$w = {w_star:.2f}$', + ha='center', fontsize=11, color='C3') + +ax1.set_xlabel(r'$\varepsilon_{t+1}$') +ax1.set_ylabel('density') +ax1.legend(frameon=False) + +quarters = np.arange(0, 241) +years = quarters / 4 + +gap_rw = 100 * σ_ε * w_star * quarters +gap_ts = 100 * σ_ε * w_star * (1 - ρ**quarters) / (1 - ρ) + +ax2.plot(years, gap_rw, 'C0', lw=2, label='random walk') +ax2.plot(years, gap_ts, 'C1', lw=2, label='trend stationary') +ax2.fill_between(years, gap_rw, alpha=0.1, color='C0') +ax2.fill_between(years, gap_ts, alpha=0.1, color='C1') +ax2.axhline(0, color='k', lw=0.5, alpha=0.3) + +# Endpoint labels +ax2.text(61, gap_rw[-1], f'{gap_rw[-1]:.1f}%', + fontsize=10, color='C0', va='center') +ax2.text(61, gap_ts[-1], f'{gap_ts[-1]:.1f}%', + fontsize=10, color='C1', va='center') + +ax2.set_xlabel('years') +ax2.set_ylabel('gap in expected log consumption (%)') +ax2.legend(frameon=False, loc='lower left') +ax2.set_xlim(0, 68) + +plt.tight_layout() +plt.show() +``` + +The next figure poses the "doubts or variability?" question by decomposing the log SDF into two additive components. + +Taking logs of {eq}`bhs_sdf` gives + +$$ +\log m_{t+1} += +\underbrace{\log \beta - \Delta c_{t+1}}_{\text{log-utility intertemporal MRS}} ++ +\underbrace{\log \hat g_{t+1}}_{\text{worst-case distortion}}. +$$ + +Under the random-walk model, $\Delta c_{t+1} = \mu + \sigma_\varepsilon \varepsilon_{t+1}$, and the Gaussian distortion {eq}`bhs_ghat_gaussian` gives $\log \hat g_{t+1} = w \varepsilon_{t+1} - \tfrac{1}{2}w^2$. + +Substituting, we can write + +$$ +\log m_{t+1} += +\bigl(\log\beta - \mu - \tfrac{1}{2}w^2\bigr) +- +(\sigma_\varepsilon - w)\varepsilon_{t+1}, +$$ + +so the slope of $\log m_{t+1}$ in $\varepsilon_{t+1}$ is $\sigma_\varepsilon - w$. + +Since $w < 0$, the distortion steepens the SDF relative to what log utility alone would deliver. + +The figure below reveals how little work log utility does on its own. + +The intertemporal marginal rate of substitution (IMRS) is nearly flat. + +At postwar calibrated volatility ($\sigma_\varepsilon = 0.005$), it contributes almost nothing to the pricing kernel's slope. + +The worst-case distortion accounts for virtually all of the SDF's volatility. + +What looks like extreme risk aversion ($\gamma \approx 34$) is really just log utility combined with moderate fears of model misspecification. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Robust SDF log-utility decomposition + name: fig-bhs-sdf-decomp +--- +θ_cal = θ_from_detection_probability(0.10, "rw") +γ_cal = γ_from_θ(θ_cal) +w_cal = w_from_θ(θ_cal, "rw") + +μ_c, σ_c = rw["μ"], rw["σ_ε"] +Δc = np.linspace(μ_c - 3.5 * σ_c, μ_c + 3.5 * σ_c, 300) +ε = (Δc - μ_c) / σ_c + +log_imrs = np.log(β) - Δc +log_ghat = w_cal * ε - 0.5 * w_cal**2 +log_sdf = log_imrs + log_ghat + +fig, ax = plt.subplots(figsize=(8, 5)) + +ax.plot(100 * Δc, log_imrs, 'C1', lw=2, + label=r'IMRS: $\log\beta - \Delta c$') +ax.plot(100 * Δc, log_ghat, 'C3', lw=2, ls='--', + label=r'Distortion: $\log\hat{g}$') +ax.plot(100 * Δc, log_sdf, 'k', lw=2, + label=r'SDF: $\log m = \log\mathrm{IMRS} + \log\hat{g}$') +ax.axhline(0, color='k', lw=0.5, alpha=0.3) +ax.set_xlabel(r'consumption growth $\Delta c_{t+1}$ (%)') +ax.set_ylabel('log SDF component') +ax.legend(frameon=False, fontsize=10, loc='upper right') + +plt.show() +``` + +(welfare_experiments)= +## What do risk premia measure? + +{cite:t}`Lucas_2003` asked how much consumption a representative consumer would sacrifice to eliminate aggregate fluctuations. + +His answer rested on the assumption that the consumer knows the true data-generating process. + +The robust reinterpretation opens up a second, quite different thought experiment. + +Instead of eliminating all randomness, suppose we keep the randomness but remove the consumer's fear of model misspecification (set $\theta = \infty$). + +How much would she pay for that relief? + +To answer this, we seek a permanent proportional reduction $c_0 - c_0^k$ in initial log consumption that leaves an agent of type $k$ indifferent between the original risky plan and a deterministic certainty-equivalent path. + +Because utility is log and the consumption process is Gaussian, these compensations can be computed in closed form. + +### The certainty equivalent path + +The point of comparison is the deterministic path with the same mean level of consumption as the stochastic plan: + +```{math} +:label: bhs_ce_path +c_{t+1}^{ce} - c_t^{ce} = \mu + \tfrac{1}{2}\sigma_\varepsilon^2. +``` + +The additional $\tfrac{1}{2}\sigma_\varepsilon^2$ term is a Jensen's inequality correction. + +Since $E[C_t] = E[e^{c_t}] = \exp(c_0 + t\mu + \tfrac{1}{2}t\sigma_\varepsilon^2)$, {eq}`bhs_ce_path` matches the mean *level* of consumption at every date. + +### Compensating variations from the value functions + +We use the closed-form value functions derived earlier: {eq}`bhs_W_rw` for the type I/II value function $W$ and {eq}`bhs_J_rw` for the type III/IV value function $J$. + +For the certainty-equivalent path {eq}`bhs_ce_path`, there is no risk and no model uncertainty ($\theta = \infty$, so $\hat g = 1$), so the value function reduces to discounted expected log utility. + +With $c_t^{ce} = c_0^J + t(\mu + \tfrac{1}{2}\sigma_\varepsilon^2)$, we have + +$$ +U^{ce}(c_0^J) += \sum_{t=0}^{\infty}\beta^t c_t^{ce} += \sum_{t=0}^{\infty}\beta^t \bigl[c_0^J + t(\mu + \tfrac{1}{2}\sigma_\varepsilon^2)\bigr] += \frac{c_0^J}{1-\beta} + \frac{\beta(\mu + \tfrac{1}{2}\sigma_\varepsilon^2)}{(1-\beta)^2}, +$$ + +where we used $\sum_{t \geq 0}\beta^t = \frac{1}{1-\beta}$ and $\sum_{t \geq 0}t\beta^t = \frac{\beta}{(1-\beta)^2}$. + +Factoring gives + +$$ +U^{ce}(c_0^J) = \frac{1}{1-\beta}\left[c_0^J + \frac{\beta}{1-\beta}\left(\mu + \tfrac{1}{2}\sigma_\varepsilon^2\right)\right]. +$$ + +### Type I (Epstein--Zin) compensation + +Setting $U^{ce}(c_0^I) = W(x_0)$ from {eq}`bhs_W_rw`: + +$$ +\frac{1}{1-\beta}\left[c_0^I + \frac{\beta}{1-\beta}\left(\mu + \tfrac{1}{2}\sigma_\varepsilon^2\right)\right] += +\frac{1}{1-\beta}\left[c_0 + \frac{\beta}{1-\beta}\left(\mu - \frac{\sigma_\varepsilon^2}{2(1-\beta)\theta}\right)\right]. +$$ + +Multiplying both sides by $(1-\beta)$ and cancelling the common $\frac{\beta\mu}{1-\beta}$ terms gives + +$$ +c_0^I + \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)} += +c_0 - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta}. +$$ + +Solving for $c_0 - c_0^I$: + +```{math} +:label: bhs_comp_type1 +c_0 - c_0^I += +\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)}\left(1 + \frac{1}{(1-\beta)\theta}\right) += +\frac{\beta\sigma_\varepsilon^2\gamma}{2(1-\beta)}, +``` + +where the last step uses $\gamma = 1 + [(1-\beta)\theta]^{-1}$. + +### Type II (multiplier) decomposition + +Because $W \equiv U$, we have $c_0^{II} = c_0^I$ and the total compensation is the same. + +However, the interpretation differs because we can now decompose it into *risk* and *model uncertainty* components. + +A type II agent with $\theta = \infty$ (no model uncertainty) has log preferences and requires + +```{math} +:label: bhs_type2_rw_decomp +\Delta c_0^{risk} += +\frac{\beta \sigma_\varepsilon^2}{2(1-\beta)}, +\qquad +\Delta c_0^{uncertainty} += +\frac{\beta \sigma_\varepsilon^2}{2(1-\beta)^2\theta}. +``` + +The risk term $\Delta c_0^{risk}$ is Lucas's cost of business cycles. + +At postwar consumption volatility ($\sigma_\varepsilon \approx 0.005$), it is negligibly small. + +The uncertainty term $\Delta c_0^{uncertainty}$ captures the additional compensation a type II agent demands for facing model misspecification. + +With $\theta$ in the denominator, this term can be first-order even when the detection-error probability is only moderate. + +### Type III (constraint) compensation + +For a type III agent, we set $U^{ce}(c_0^{III}) = J(x_0)$ using the value function $J$ from {eq}`bhs_J_rw`: + +$$ +\frac{1}{1-\beta}\left[c_0^{III} + \frac{\beta}{1-\beta}\left(\mu + \tfrac{1}{2}\sigma_\varepsilon^2\right)\right] += +\frac{1}{1-\beta}\left[c_0 + \frac{\beta}{1-\beta}\left(\mu - \frac{\sigma_\varepsilon^2}{(1-\beta)\theta}\right)\right]. +$$ + +Following the same algebra as for type I but with the doubled uncertainty correction in $J$: + +$$ +c_0 - c_0^{III} += +\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)} + \frac{\beta\sigma_\varepsilon^2}{(1-\beta)^2\theta}. +$$ + +Using $\frac{1}{(1-\beta)\theta} = \gamma - 1$, this simplifies to + +```{math} +:label: bhs_type3_rw_decomp +c_0 - c_0^{III} += +\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)}(2\gamma - 1). +``` + +The risk component is the same $\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)}$ as before. + +The uncertainty component alone is + +$$ +c_0^{III}(r) - c_0^{III} += +\frac{\beta\sigma_\varepsilon^2}{(1-\beta)^2\theta}, +$$ + +which is *twice* the type II uncertainty compensation {eq}`bhs_type2_rw_decomp`. + +The factor of two traces back to the difference between $W$ and $J$ noted after {eq}`bhs_J_rw`. + +The entropy rebate $\theta N$ in $W = J + \theta N$ partially offsets the pessimistic tilt for the type II agent, but not for the type III agent who evaluates consumption purely under the worst-case model. + +### Type IV (ex post Bayesian) compensation + +A type IV agent believes the pessimistic model, so the perceived drift is $\tilde\mu = \mu - \sigma_\varepsilon^2/[(1-\beta)\theta]$. + +The compensation for moving to the certainty-equivalent path is the same as {eq}`bhs_type3_rw_decomp`, because this agent ranks plans using the same value function $J$. + +### Comparison with a risky but free-of-model-uncertainty path + +The certainty equivalents above compare a risky plan to a deterministic path, thereby eliminating both risk and uncertainty at once. + +We now describe an alternative measure that isolates compensation for model uncertainty alone by keeping risk intact. + +The idea is to compare two situations with identical risky consumption for all dates $t \geq 1$, concentrating all compensation for model uncertainty in a single adjustment to date-zero consumption. + +Specifically, we seek $c_0^{II}(u)$ that makes a type II agent indifferent between: + +1. Facing the stochastic plan under $\theta < \infty$ (fear of model misspecification), consuming $c_0$ at date zero. +2. Facing the *same* stochastic plan under $\theta = \infty$ (no fear of misspecification), but consuming only $c_0^{II}(u) < c_0$ at date zero. + +In both cases, continuation consumptions $c_t$ for $t \geq 1$ are generated by the random walk starting from the *same* $c_0$. + +For the type II agent under $\theta < \infty$, the total value is $W(c_0)$ from {eq}`bhs_W_rw`. + +For the agent liberated from model uncertainty ($\theta = \infty$), the value is + +$$ +c_0^{II}(u) + \beta E\left[V^{\log}(c_1)\right], +$$ + +where $V^{\log}(c_t) = \frac{1}{1-\beta} \left[c_t + \frac{\beta\mu}{1-\beta}\right]$ is the log-utility value function and $c_1 = c_0 + \mu + \sigma_\varepsilon \varepsilon_1$. + +Since $c_1$ is built from $c_0$ (not $c_0^{II}(u)$), the continuation is + +$$ +\beta E\left[V^{\log}(c_1)\right] += \frac{\beta}{1-\beta} E\left[c_1 + \frac{\beta\mu}{1-\beta}\right] += \frac{\beta}{1-\beta}\left[c_0 + \mu + \frac{\beta\mu}{1-\beta}\right] += \frac{\beta}{1-\beta}\left[c_0 + \frac{\mu}{1-\beta}\right], +$$ + +where we used $E[c_1] = c_0 + \mu$ (the noise term has zero mean). + +Expanding gives + +$$ +\beta E\left[V^{\log}(c_1)\right] += \frac{\beta c_0}{1-\beta} + \frac{\beta\mu}{(1-\beta)^2}. +$$ + +Setting $W(c_0)$ equal to the liberation value and simplifying: + +$$ +\frac{c_0}{1-\beta} + \frac{\beta\mu}{(1-\beta)^2} - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^3\theta} += +c_0^{II}(u) + \frac{\beta c_0}{1-\beta} + \frac{\beta\mu}{(1-\beta)^2}. +$$ + +Because $\frac{c_0}{1-\beta} - \frac{\beta c_0}{1-\beta} = c_0$, solving for the compensation gives + +```{math} +:label: bhs_comp_type2u +c_0 - c_0^{II}(u) = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^3\theta} = \frac{\beta\sigma_\varepsilon^2(\gamma - 1)}{2(1-\beta)^2}. +``` + +This is $\frac{1}{1-\beta}$ times the uncertainty compensation $\Delta c_0^{\text{uncertainty}}$ from {eq}`bhs_type2_rw_decomp`. + +The extra factor of $\frac{1}{1-\beta}$ arises because all compensation is packed into a single period. + +Adjusting $c_0$ alone must offset the cumulative loss in continuation value that the uncertainty penalty imposes in every future period. + +An analogous calculation for a **type III** agent, using $J(c_0)$ from {eq}`bhs_J_rw`, gives + +```{math} +:label: bhs_comp_type3u +c_0 - c_0^{III}(u) = \frac{\beta\sigma_\varepsilon^2}{(1-\beta)^3\theta} = \frac{\beta\sigma_\varepsilon^2(\gamma - 1)}{(1-\beta)^2}, +``` + +which is $\frac{1}{1-\beta}$ times the type III uncertainty compensation and *twice* the type II compensation {eq}`bhs_comp_type2u`, again reflecting the absence of the entropy rebate in $J$. + +### Summary of welfare compensations (random walk) + +The following table collects all compensating variations for the random walk model. + +| Agent | Compensation | Formula | Measures | +|:------|:-------------|:--------|:---------| +| I, II | $c_0 - c_0^{II}$ | $\frac{\beta\sigma_\varepsilon^2\gamma}{2(1-\beta)}$ | risk + uncertainty (vs. deterministic) | +| II | $c_0 - c_0^{II}(r)$ | $\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)}$ | risk only (vs. deterministic) | +| II | $c_0^{II}(r) - c_0^{II}$ | $\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta}$ | uncertainty only (vs. deterministic) | +| II | $c_0 - c_0^{II}(u)$ | $\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^3\theta}$ | uncertainty only (vs. risky path) | +| III | $c_0 - c_0^{III}$ | $\frac{\beta\sigma_\varepsilon^2(2\gamma-1)}{2(1-\beta)}$ | risk + uncertainty (vs. deterministic) | +| III | $c_0^{III}(r) - c_0^{III}$ | $\frac{\beta\sigma_\varepsilon^2}{(1-\beta)^2\theta}$ | uncertainty only (vs. deterministic) | +| III | $c_0 - c_0^{III}(u)$ | $\frac{\beta\sigma_\varepsilon^2}{(1-\beta)^3\theta}$ | uncertainty only (vs. risky path) | + +The "versus deterministic" rows use the certainty-equivalent path {eq}`bhs_ce_path` as a benchmark. + +The "vs. risky path" rows use the risky-but-uncertainty-free comparison of {eq}`bhs_comp_type2u`--{eq}`bhs_comp_type3u`. + +### Trend-stationary formulas + +For the trend-stationary model, the denominators $(1-\beta)$ in the uncertainty terms are replaced by $(1-\beta\rho)$, and the risk terms involve $(1-\beta\rho^2)$: + +```{math} +:label: bhs_ts_compensations +\Delta c_0^{risk,ts} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta\rho^2)}, +\qquad +\Delta c_0^{unc,ts,II} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta\rho)^2\theta}, +\qquad +\Delta c_0^{unc,ts,III} = \frac{\beta\sigma_\varepsilon^2}{(1-\beta\rho)^2\theta}. +``` + +The qualitative message carries over: the risk component is negligible, and the model-uncertainty component dominates. + +## Visualizing the welfare decomposition + +We set $\beta = 0.995$ and calibrate $\theta$ so that $p(\theta^{-1}) = 0.10$, a conservative detection-error level. + +```{code-cell} ipython3 +p_star = 0.10 +θ_star = θ_from_detection_probability(p_star, "rw") +γ_star = γ_from_θ(θ_star) +w_star = w_from_θ(θ_star, "rw") + +# Type II compensations, random walk model +comp_risk_only = β * rw["σ_ε"]**2 / (2.0 * (1.0 - β)) +comp_risk_unc = comp_risk_only + β * rw["σ_ε"]**2 / (2.0 * (1.0 - β)**2 * θ_star) + +# Two useful decompositions in levels +risk_only_pct = 100.0 * (np.exp(comp_risk_only) - 1.0) +risk_unc_pct = 100.0 * (np.exp(comp_risk_unc) - 1.0) +uncertainty_only_pct = 100.0 * (np.exp(comp_risk_unc - comp_risk_only) - 1.0) + +print(f"p*={p_star:.2f}, θ*={θ_star:.4f}, γ*={γ_star:.2f}, w*={w_star:.4f}") +print(f"risk only compensation (log units): {comp_risk_only:.6f}") +print(f"risk + uncertainty compensation (log units): {comp_risk_unc:.6f}") +print(f"risk only compensation (percent): {risk_only_pct:.3f}%") +print(f"risk + uncertainty compensation (percent): {risk_unc_pct:.3f}%") +print(f"uncertainty component alone (percent): {uncertainty_only_pct:.3f}%") + +h = 250 +t = np.arange(h + 1) + +# Baseline approximating model fan +mean_base = rw["μ"] * t +std_base = rw["σ_ε"] * np.sqrt(t) + +# Certainty equivalent line from Eq. (47), shifted by compensating variations +certainty_slope = rw["μ"] + 0.5 * rw["σ_ε"]**2 +ce_risk = -comp_risk_only + certainty_slope * t +ce_risk_unc = -comp_risk_unc + certainty_slope * t + +# Alternative models from the ambiguity set in panel B +mean_low = (rw["μ"] + rw["σ_ε"] * w_star) * t +mean_high = (rw["μ"] - rw["σ_ε"] * w_star) * t +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Certainty equivalents under robustness + name: fig-bhs-4 +--- +fig, axes = plt.subplots(1, 2, figsize=(12, 4)) + +# Panel A +ax = axes[0] +ax.fill_between(t, mean_base - std_base, mean_base + std_base, + alpha=0.25, color="tab:blue") +ax.plot(t, ce_risk_unc, lw=2, ls="--", color="black", + label="certainty equivalent: risk + uncertainty") +ax.plot(t, ce_risk, lw=2, color="tab:orange", + label="certainty equivalent: risk only") +ax.plot(t, mean_base, lw=2, + color="tab:blue", label="approximating-model mean") +ax.set_xlabel("quarters") +ax.set_ylabel("log consumption") +ax.legend(frameon=False, fontsize=8, loc="upper left") + +# Panel B +ax = axes[1] +ax.fill_between(t, mean_base - std_base, mean_base + std_base, + alpha=0.20, color="tab:blue") +ax.fill_between(t, mean_low - std_base, mean_low + std_base, + alpha=0.20, color="tab:red") +ax.fill_between(t, mean_high - std_base, mean_high + std_base, + alpha=0.20, color="tab:green") +ax.plot(t, ce_risk_unc, lw=2, ls="--", color="black", + label="certainty equivalent: risk + uncertainty") +ax.plot(t, mean_base, lw=2, color="tab:blue", label="approximating-model mean") +ax.plot(t, mean_low, lw=2, color="tab:red", label="worst-case-leaning mean") +ax.plot(t, mean_high, lw=2, color="tab:green", label="best-case-leaning mean") +ax.set_xlabel("quarters") +ax.set_ylabel("log consumption") +ax.legend(frameon=False, fontsize=8, loc="upper left") + +plt.tight_layout() +plt.show() +``` + +The left panel illustrates the elimination of model uncertainty and risk for a type II agent. + +The shaded fan shows a one-standard-deviation band for the $j$-step-ahead conditional distribution of $c_t$ under the calibrated random-walk model. + +The dashed line $c^{II}$ shows the certainty-equivalent path whose date-zero consumption is reduced by $c_0 - c_0^{II}$, making the type II agent indifferent between this deterministic trajectory and the stochastic plan. + +It compensates for bearing both risk and model ambiguity. + +The solid line $c^r$ shows the certainty equivalent for a type II agent without model uncertainty ($\theta = \infty$), initialized at $c_0 - c_0^{II}(r)$. + +At postwar calibrated values this gap is small, so $c^r$ sits just below the center of the fan. + +Consistent with {cite:t}`Lucas_2003`, the welfare gains from eliminating well-understood risk are very small. + +The large welfare gains found by {cite:t}`Tall2000` can be reinterpreted as arising not from reducing risk, but from reducing model uncertainty. + +The right panel shows the set of nearby models that the robust consumer guards against. + +Each shaded fan depicts a one-standard-deviation band for a different model in the ambiguity set. + +The models are statistically close to the baseline, with detection-error probability $p = 0.10$, but imply very different long-run consumption levels. + +The consumer's caution against such alternatives accounts for the large certainty-equivalent gap in the left panel. + +## Welfare gains from removing model uncertainty + +A type III (constraint-preference) agent evaluates the worst model inside an entropy ball of radius $\eta$. + +As $\eta$ grows the set of plausible misspecifications expands, and with it the welfare cost of confronting model uncertainty. + +Since $\eta$ itself is not easy to interpret, we instead index these costs by the associated detection-error probability $p(\eta)$. + +The figure below plots the compensation for removing model uncertainty, measured as a proportion of consumption, against $p(\eta)$. + +```{code-cell} ipython3 +η_grid = np.linspace(0.0, 5.0, 300) + +# Use w and η relation, then convert to θ model by model +w_abs_grid = np.sqrt(2.0 * (1.0 - β) * η_grid / β) + +θ_rw_from_η = np.full_like(w_abs_grid, np.inf) +θ_ts_from_η = np.full_like(w_abs_grid, np.inf) +mask_w = w_abs_grid > 0.0 +θ_rw_from_η[mask_w] = rw["σ_ε"] / ((1.0 - β) * w_abs_grid[mask_w]) +θ_ts_from_η[mask_w] = ts["σ_ε"] / ((1.0 - β * ts["ρ"]) * w_abs_grid[mask_w]) + +# Type III uncertainty terms from Table 3 +gain_rw = np.where( + np.isinf(θ_rw_from_η), + 0.0, + β * rw["σ_ε"]**2 / ((1.0 - β)**2 * θ_rw_from_η), +) +gain_ts = np.where( + np.isinf(θ_ts_from_η), + 0.0, + β * ts["σ_ε"]**2 / ((1.0 - β * ts["ρ"])**2 * θ_ts_from_η), +) + +# Convert log compensation to percent of initial consumption in levels +gain_rw_pct = 100.0 * (np.exp(gain_rw) - 1.0) +gain_ts_pct = 100.0 * (np.exp(gain_ts) - 1.0) + +# Detection error probabilities implied by η +p_eta_pct = 100.0 * norm.cdf(-0.5 * w_abs_grid * np.sqrt(T)) +order = np.argsort(p_eta_pct) +p_plot = p_eta_pct[order] +gain_rw_plot = gain_rw_pct[order] +gain_ts_plot = gain_ts_pct[order] +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Type III uncertainty compensation curve + name: fig-bhs-5 +--- +fig, ax = plt.subplots(figsize=(7, 4)) +ax.plot(p_plot, gain_rw_plot, lw=2, label="RW type III") +ax.plot(p_plot, gain_ts_plot, lw=2, label="TS type III") +ax.set_xlabel(r"detection error probability $p(\eta)$ (percent)") +ax.set_ylabel("proportion of consumption (percent)") +ax.legend(frameon=False) + +plt.tight_layout() +plt.show() +``` + +The random-walk model implies somewhat larger costs than the trend-stationary model at the same detection-error probability, but both curves dwarf the classic Lucas cost of business cycles. + +To put the magnitudes in perspective, Lucas estimated that eliminating all aggregate consumption risk is worth roughly 0.05% of consumption. + +At detection-error probabilities of 10--20%, the model-uncertainty compensation alone runs to several percent, orders of magnitude larger. + +Under the robust reading, the large risk premia that Tallarini matched with high $\gamma$ are really compensations for bearing model uncertainty, and the implied welfare gains from resolving that uncertainty are correspondingly large. + +The following contour plot shows how type II (multiplier) compensation varies over two dimensions: the detection-error probability $p$ and the consumption volatility $\sigma_\varepsilon$. + +The cross marks the calibrated point ($p = 0.10$, $\sigma_\varepsilon = 0.5\%$). + +At the calibrated volatility, moving left (lower $p$, stronger robustness concerns) increases compensation dramatically, while the classic risk-only cost (the $p = 50\%$ edge) remains negligible. + +A comparison of the two panels reveals that the random-walk model generates much larger welfare costs than the trend-stationary model at the same ($p$, $\sigma_\varepsilon$), because permanent shocks compound the worst-case drift indefinitely. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Type II compensation contours + name: fig-bhs-contour +--- +p_grid = np.linspace(0.02, 0.49, 300) +σ_grid = np.linspace(0.001, 0.015, 300) +P, Σ = np.meshgrid(p_grid, σ_grid) + +W_abs = -2 * norm.ppf(P) / np.sqrt(T) + +# RW: total type II = β*σ^2*γ / [2(1-β)] +Γ_rw = 1 + W_abs / Σ +comp_rw = 100 * (np.exp(β * Σ**2 * Γ_rw / (2 * (1 - β))) - 1) + +# TS: risk + uncertainty +ρ_val = ts["ρ"] +risk_ts = β * Σ**2 / (2 * (1 - β * ρ_val**2)) +unc_ts = β * Σ * W_abs / (2 * (1 - β * ρ_val)) +comp_ts = 100 * (np.exp(risk_ts + unc_ts) - 1) + +levels = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50] + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(13, 5.5), sharey=True) + +for ax, comp in [(ax1, comp_rw), (ax2, comp_ts)]: + cf = ax.contourf(100 * P, 100 * Σ, comp, levels=levels, + cmap='Blues', extend='both') + cs = ax.contour(100 * P, 100 * Σ, comp, levels=levels, + colors='k', linewidths=0.5) + ax.clabel(cs, fmt='%g%%', fontsize=8) + ax.plot(10, 0.5, 'x', lw=2, markersize=14, color='w', + mec='k', mew=1, zorder=5) + ax.set_xlabel(r'detection-error probability $p$ (%)') + +ax1.set_ylabel(r'consumption volatility $\sigma_\varepsilon$ (%)') + +plt.tight_layout() +plt.show() +``` + +## Learning doesn't eliminate misspecification fears + +A reasonable question arises: if the consumer has 235 quarters of data, can't she learn enough to dismiss the worst-case model? + +The answer is no. + +This is because the drift is a low-frequency feature that is very hard to pin down. + +Estimating the mean of a random walk to the precision needed to reject small but economically meaningful shifts requires far more data than estimating volatility precisely does. + +The following figure makes this point concrete. + +We measure consumption as real personal consumption expenditures on nondurable goods and services, deflated by its implicit chain price deflator and expressed in per-capita terms using the civilian noninstitutional population aged 16+. + +The construction uses four FRED series: + +| FRED series | Description | +| --- | --- | +| `PCND` | Nominal PCE: nondurable goods (billions of \$, SAAR, quarterly) | +| `PCESV` | Nominal PCE: services (billions of \$, SAAR, quarterly) | +| `DPCERD3Q086SBEA` | PCE implicit price deflator (index 2017 $= 100$, quarterly) | +| `CNP16OV` | Civilian noninstitutional population, 16+ (thousands, monthly) | + +We use nominal rather than chained-dollar components because chained-dollar series are not additive. + +Chain-weighted indices update their base-period expenditure weights every period, so components deflated with different price changes do not sum to the separately chained aggregate. + +Adding nominal series and deflating the sum with a single price index avoids this problem. + +The processing pipeline is: + +1. Add nominal nondurables and services: $C_t^{nom} = C_t^{nd} + C_t^{sv}$. +2. Deflate by the PCE price index: $C_t^{real} = C_t^{nom} / (P_t / 100)$. +3. Convert to per-capita: divide by the quarterly average of the monthly population series. +4. Compute log consumption: $c_t = \log C_t^{real,pc}$. + +When we plot *levels* of log consumption, we align the time index to 1948Q1--2006Q4, which yields $T+1 = 236$ quarterly observations. + +```{code-cell} ipython3 +start_date = dt.datetime(1947, 1, 1) +end_date = dt.datetime(2007, 1, 1) + + +def _read_fred_series(series_id, start_date, end_date): + series = web.DataReader(series_id, "fred", start_date, end_date)[series_id] + series = pd.to_numeric(series, errors="coerce").dropna().sort_index() + if series.empty: + raise ValueError(f"FRED series '{series_id}' returned no data in sample window") + return series + + +# Fetch nominal PCE components, deflator, and population from FRED +nom_nd = _read_fred_series("PCND", start_date, end_date) # quarterly, 1947- +nom_sv = _read_fred_series("PCESV", start_date, end_date) # quarterly, 1947- +defl = _read_fred_series("DPCERD3Q086SBEA", start_date, end_date) # quarterly, 1947- +pop_m = _read_fred_series("CNP16OV", start_date, end_date) # monthly, 1948- + +# Step 1: add nominal nondurables + services +nom_total = nom_nd + nom_sv + +# Step 2: deflate by PCE implicit price deflator (index 2017=100) +real_total = nom_total / (defl / 100.0) + +# Step 3: convert to per-capita (population is monthly, so average to quarterly) +pop_q = pop_m.resample("QS").mean() +real_pc = (real_total / pop_q).dropna() + +# Restrict to sample period 1948Q1-2006Q4 +real_pc = real_pc.loc["1948-01-01":"2006-12-31"].dropna() + +if real_pc.empty: + raise RuntimeError( + "FRED returned no usable observations after alignment/filtering") + +# Step 4: log consumption +log_c_data = np.log(real_pc.to_numpy(dtype=float).reshape(-1)) +years_data = ( + real_pc.index.year + + (real_pc.index.month - 1) / 12.0).to_numpy(dtype=float) + +print(f"Fetched {len(log_c_data)} quarterly observations from FRED") +print(f"Sample: {years_data[0]:.1f} - {years_data[-1] + 0.25:.1f}") +print(f"Observations: {len(log_c_data)}") +``` + +We can verify Table 2 by computing sample moments of log consumption growth from our FRED data: + +```{code-cell} ipython3 +# Growth rates: 1948Q2 to 2006Q4 (T = 235 quarters) +diff_c = np.diff(log_c_data) + +μ_hat = diff_c.mean() +σ_hat = diff_c.std(ddof=1) + +print("Sample estimates from FRED data vs Table 2:") +print(f" μ = {μ_hat:.5f} (Table 2 RW: {rw['μ']:.5f})") +print(f" σ_ε = {σ_hat:.4f} (Table 2: {rw['σ_ε']:.4f})") +print(f" T = {len(diff_c)} quarters") +``` + +```{code-cell} ipython3 +p_fig6 = 0.20 + +rw_fig6 = dict(μ=μ_hat, σ_ε=σ_hat) +w_fig6 = 2.0 * norm.ppf(p_fig6) / np.sqrt(T) + +c = log_c_data +years = years_data + +t6 = np.arange(T + 1) +μ_approx = rw_fig6["μ"] +μ_worst = rw_fig6["μ"] + rw_fig6["σ_ε"] * w_fig6 + +a_approx = (c - μ_approx * t6).mean() +a_worst = (c - μ_worst * t6).mean() +line_approx = a_approx + μ_approx * t6 +line_worst = a_worst + μ_worst * t6 + +p_right = np.linspace(0.01, 0.50, 500) +w_right = 2.0 * norm.ppf(p_right) / np.sqrt(T) +μ_worst_right = rw_fig6["μ"] + rw_fig6["σ_ε"] * w_right + +μ_se = rw_fig6["σ_ε"] / np.sqrt(T) +upper_band = rw_fig6["μ"] + 2.0 * μ_se +lower_band = rw_fig6["μ"] - 2.0 * μ_se +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Drift distortion and sampling uncertainty + name: fig-bhs-6 +--- +fig, axes = plt.subplots(1, 2, figsize=(12, 4)) + +ax = axes[0] +ax.plot(years, c, lw=2, color="tab:blue", label="log consumption") +ax.plot(years, line_approx, lw=2, ls="--", + color="black", label="approximating model") +ax.plot( + years, + line_worst, + lw=2, + ls=":", + color="black", + label=rf"wc model $p(\theta^{{-1}})={p_fig6:.1f}$", +) +ax.set_xlabel("year") +ax.set_ylabel("log consumption") +ax.legend(frameon=False, fontsize=8, loc="upper left") + +ax = axes[1] +ax.plot( + 100.0 * p_right, + 1_000.0 * μ_worst_right, + lw=2, + color="tab:red", + label=r"$\mu + \sigma_\varepsilon w(\theta)$", +) +ax.axhline(1_000.0 * rw_fig6["μ"], lw=2, color="black", label=r"$\hat\mu$") +ax.axhline(1_000.0 * upper_band, lw=2, ls="--", + color="gray", label=r"$\hat\mu \pm 2\hat s.e.$") +ax.axhline(1_000.0 * lower_band, lw=2, ls="--", color="gray") +ax.set_xlabel("detection error probability (percent)") +ax.set_ylabel(r"mean consumption growth ($\times 10^{-3}$)") +ax.legend(frameon=False, fontsize=8) +ax.set_xlim(0.0, 50.0) + +plt.tight_layout() +plt.show() +``` + +In the left panel, postwar US log consumption is shown alongside two deterministic trend lines: the approximating-model drift $\mu$ and the worst-case drift $\mu + \sigma_\varepsilon w(\theta)$ for $p(\theta^{-1}) = 0.20$. + +The two trends are close enough that, even with six decades of data, it is hard to distinguish them by eye. + +In the right panel, as the detection-error probability rises (the two models become harder to tell apart), the worst-case mean growth rate drifts back toward $\hat\mu$. + +The dashed gray lines mark a two-standard-error band around the maximum-likelihood estimate of $\mu$. + +Even at detection probabilities in the 5--20% range, the worst-case drift remains inside (or very near) this confidence band. + +Drift distortions that are economically large, large enough to generate substantial model-uncertainty premia, are statistically small relative to sampling uncertainty in $\hat\mu$. + +Robustness concerns persist despite long histories precisely because the low-frequency features that matter most for pricing are the hardest to estimate precisely. + +## Concluding remarks + +The title of this lecture poses a question: are large risk premia prices of *variability* (atemporal risk aversion) or prices of *doubts* (model uncertainty)? + +Asset-pricing data alone cannot settle the question, because the two interpretations are observationally equivalent. + +But the choice of interpretation matters for the conclusions we draw. + +Under the risk-aversion reading, high Sharpe ratios imply that consumers would pay a great deal to smooth known aggregate consumption fluctuations. + +Under the robustness reading, those same Sharpe ratios tell us that consumers would pay a great deal to resolve uncertainty about which probability model actually governs consumption growth. + +Three features of the analysis support the robustness reading: + +1. Detection-error probabilities provide a more stable calibration language than $\gamma$. + + - The two consumption models that required very different $\gamma$ values to match the data yield nearly identical pricing implications when indexed by detectability. + +2. The welfare gains implied by asset prices decompose overwhelmingly into a model-uncertainty component, with the pure risk component remaining small, consistent with Lucas's original finding. + +3. The drift distortions that drive pricing are small enough to hide inside standard-error bands, so finite-sample learning cannot eliminate the consumer's fears. + +Whether one ultimately prefers the risk or the uncertainty interpretation, the framework clarifies that the question is not about the size of risk premia but about the economic object those premia measure. + +## Exercises + +The following exercises ask you to fill in several derivation steps. + +```{exercise} +:label: dov_ex1 + +Let $R_{t+1}$ be an $n \times 1$ vector of gross returns with unconditional mean $E(R)$ and covariance matrix $\Sigma_R$. + +Let $m_{t+1}$ be a stochastic discount factor satisfying $\mathbf{1} = E[m_{t+1} R_{t+1}]$. + +1. Use the covariance decomposition $E[mR] = E[m] E[R] + \operatorname{cov}(m,R)$ to show that $\operatorname{cov}(m,R) = \mathbf{1} - E[m] E[R] =: b$. +2. For a portfolio with weight vector $\alpha$ and return $R^p = \alpha^\top R$, show that $\operatorname{cov}(m, R^p) = \alpha^\top b$. +3. Apply the Cauchy--Schwarz inequality to the pair $(m, R^p)$ to obtain $|\alpha^\top b| \leq \sigma(m)\sqrt{\alpha^\top \Sigma_R\alpha}$. +4. Maximize the ratio $|\alpha^\top b|/\sqrt{\alpha^\top \Sigma_R \alpha}$ over $\alpha$ and show that the maximum is $\sqrt{b^\top \Sigma_R^{-1} b}$, attained at $\alpha^\star = \Sigma_R^{-1}b$. +5. Conclude that $\sigma(m) \geq \sqrt{b^\top \Sigma_R^{-1} b}$, which is {eq}`bhs_hj_unconditional`. +``` + +```{solution-start} dov_ex1 +:class: dropdown +``` + +**Part 1.** From $\mathbf{1} = E[mR] = E[m] E[R] + \operatorname{cov}(m,R)$, rearranging gives $\operatorname{cov}(m,R) = \mathbf{1} - E[m] E[R]= b$. + +**Part 2.** The portfolio return is $R^p = \alpha^\top R$, so + +$$ +\operatorname{cov}(m, R^p) = \operatorname{cov}(m, \alpha^\top R) = \alpha^\top \operatorname{cov}(m, R) = \alpha^\top b. +$$ + +**Part 3.** +Applying the Cauchy--Schwarz inequality to $(m, R^p)$: + +$$ +|\alpha^\top b| = |\operatorname{cov}(m, R^p)| \leq \sigma(m) \sigma(R^p) = \sigma(m) \sqrt{\alpha^\top \Sigma_R \alpha}. +$$ + +**Part 4.** Rearranging Part 3 gives + +$$ +\frac{|\alpha^\top b|}{\sqrt{\alpha^\top \Sigma_R \alpha}} \leq \sigma(m). +$$ + +To maximize the left-hand side over $\alpha$, define the $\Sigma_R$-inner product $\langle u, v \rangle_{\Sigma} = u^\top \Sigma_R v$. + +Inserting $I = \Sigma_R \Sigma_R^{-1}$ gives + +$$ +\alpha^\top b += \alpha^\top (\Sigma_R \Sigma_R^{-1}) b += (\alpha^\top \Sigma_R)(\Sigma_R^{-1} b) += \langle \alpha, \Sigma_R^{-1}b \rangle_{\Sigma}. +$$ + +Cauchy--Schwarz in this inner product gives + +$$ +|\langle \alpha, \Sigma_R^{-1}b \rangle_{\Sigma}| +\leq +\sqrt{\langle \alpha, \alpha \rangle_{\Sigma}}\sqrt{\langle \Sigma_R^{-1}b, \Sigma_R^{-1}b \rangle_{\Sigma}} += +\sqrt{\alpha^\top \Sigma_R \alpha} \sqrt{b^\top \Sigma_R^{-1} b}, +$$ + +with equality when $\alpha \propto \Sigma_R^{-1} b$. + +Substituting $\alpha^\star = \Sigma_R^{-1} b$ verifies + +$$ +\max_\alpha \frac{|\alpha^\top b|}{\sqrt{\alpha^\top \Sigma_R \alpha}} = \sqrt{b^\top \Sigma_R^{-1} b}. +$$ + +**Part 5.** Combining Parts 3 and 4 gives $\sigma(m) \geq \sqrt{b^\top \Sigma_R^{-1} b}$, which is {eq}`bhs_hj_unconditional`. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex2 + +Combine the SDF representation {eq}`bhs_sdf` with the random-walk consumption dynamics and the Gaussian mean-shift distortion to derive closed-form SDF moments. + +1. Show that $\log m_{t+1}$ is normally distributed under the approximating model and compute its mean and variance in terms of $(\beta,\mu,\sigma_\varepsilon,w)$. +2. Use lognormal moments to derive expressions for $E[m]$ and $\sigma(m)/E[m]$. +3. Use the parameter mapping $\theta = [(1-\beta)(\gamma-1)]^{-1}$ and the associated $w$ to obtain closed-form expressions for the random-walk model. +4. Explain why $E[m]$ stays roughly constant while $\sigma(m)/E[m]$ grows linearly with $\gamma$. +``` + +```{solution-start} dov_ex2 +:class: dropdown +``` + +Under the random walk, + +$$ +c_{t+1}-c_t=\mu+\sigma_\varepsilon \varepsilon_{t+1} + +$$ +with $\varepsilon_{t+1}\sim\mathcal{N}(0,1)$ under the approximating model. + +Using {eq}`bhs_sdf` and the Gaussian distortion + +$$ +\hat g_{t+1}=\exp \left(w\varepsilon_{t+1}-\tfrac{1}{2}w^2\right), + +$$ +we get + +$$ +m_{t+1} += +\beta \exp \left(-(c_{t+1}-c_t)\right)\hat g_{t+1} += +\beta \exp \left(-\mu-\sigma_\varepsilon\varepsilon_{t+1}\right)\exp \left(w\varepsilon_{t+1}-\frac{1}{2}w^2\right). +$$ + +Therefore + +$$ +\log m_{t+1} += +\log\beta-\mu-\frac{1}{2}w^2 + (w-\sigma_\varepsilon)\varepsilon_{t+1}, + +$$ +which is normal with mean + +$$ +E[\log m]=\log\beta-\mu-\tfrac{1}{2}w^2 + +$$ +and variance + +$$ +\operatorname{Var}(\log m)=(w-\sigma_\varepsilon)^2. +$$ + +For a lognormal random variable, + +$$ +E[m]=\exp(E[\log m]+\tfrac{1}{2}\operatorname{Var}(\log m)) + +$$ +and + +$$ +\sigma(m)/E[m]=\sqrt{e^{\operatorname{Var}(\log m)}-1}. + +$$ +Hence + +$$ +E[m] += +\beta\exp\left( +-\mu-\frac{1}{2}w^2+\frac{1}{2}(w-\sigma_\varepsilon)^2 +\right) += +\beta\exp\left(-\mu+\frac{\sigma_\varepsilon^2}{2}-\sigma_\varepsilon w\right), + +$$ +and + +$$ +\frac{\sigma(m)}{E[m]} += +\sqrt{\exp\left((w-\sigma_\varepsilon)^2\right)-1}. +$$ + +Now use $w_{\text{RW}}(\theta)=-\sigma_\varepsilon/[(1-\beta)\theta]$ from {eq}`bhs_w_formulas` and +$\theta=[(1-\beta)(\gamma-1)]^{-1}$ to get $w=-\sigma_\varepsilon(\gamma-1)$. +Then + +$$ +-\sigma_\varepsilon w=\sigma_\varepsilon^2(\gamma-1) + +$$ +and + +$$ +(w-\sigma_\varepsilon)^2 = (-\sigma_\varepsilon\gamma)^2=\sigma_\varepsilon^2\gamma^2. + +$$ +Substituting gives the closed-form expressions for the random-walk model: + +```{math} +:label: bhs_Em_rw +E[m] = \beta \exp\left[-\mu + \frac{\sigma_\varepsilon^2}{2}(2\gamma - 1)\right], +``` + +```{math} +:label: bhs_sigma_rw +\frac{\sigma(m)}{E[m]} = \sqrt{\exp\left(\sigma_\varepsilon^2 \gamma^2\right) - 1}. +``` + +Notice that in {eq}`bhs_Em_rw`, because $\sigma_\varepsilon$ is small ($\approx 0.005$), the term $\frac{\sigma_\varepsilon^2}{2}(2\gamma-1)$ grows slowly with $\gamma$, keeping $E[m]$ roughly constant near $1/(1+r^f)$. + +Meanwhile {eq}`bhs_sigma_rw` shows that $\sigma(m)/E[m] \approx \sigma_\varepsilon \gamma$ grows linearly with $\gamma$. + +This is how Epstein--Zin preferences push volatility toward the HJ bound without distorting the risk-free rate. + +An analogous calculation for the trend-stationary model yields: + +```{math} +:label: bhs_Em_ts +E[m] = \beta \exp\left[-\mu + \frac{\sigma_\varepsilon^2}{2}\left(1 - \frac{2(1-\beta)(1-\gamma)}{1-\beta\rho} + \frac{1-\rho}{1+\rho}\right)\right], +``` + +```{math} +:label: bhs_sigma_ts +\frac{\sigma(m)}{E[m]} = \sqrt{\exp\left[\sigma_\varepsilon^2\left(\left(\frac{(1-\beta)(1-\gamma)}{1-\beta\rho} - 1\right)^{2} + \frac{1-\rho}{1+\rho}\right)\right] - 1}. +``` + +```{solution-end} +``` + +```{exercise} +:label: dov_ex3 + +Starting from the type I recursion {eq}`bhs_type1_recursion` and the definitions of $U_t$ and $\theta$ in {eq}`bhs_Ut_def`--{eq}`bhs_theta_def`, derive the risk-sensitive recursion {eq}`bhs_risk_sensitive`. + +Verify that as $\gamma \to 1$ (equivalently $\theta \to \infty$), the recursion converges to standard discounted expected log utility $U_t = c_t + \beta E_t U_{t+1}$. +``` + +```{solution-start} dov_ex3 +:class: dropdown +``` + +Start from the type I recursion {eq}`bhs_type1_recursion` and write + +$$ +(V_{t+1})^{1-\gamma} = \exp\bigl((1-\gamma)\log V_{t+1}\bigr). +$$ + +Using $\log V_t = (1-\beta)U_t$ from {eq}`bhs_Ut_def`, we obtain + +$$ +(1-\beta)U_t += +(1-\beta)c_t ++ +\frac{\beta}{1-\gamma}\log E_t\left[\exp\bigl((1-\gamma)(1-\beta)U_{t+1}\bigr)\right]. +$$ + +Divide by $(1-\beta)$ and use {eq}`bhs_theta_def`, + +$$ +\theta = -\bigl[(1-\beta)(1-\gamma)\bigr]^{-1}. +$$ + +Then $(1-\gamma)(1-\beta)=-1/\theta$ and $\beta/[(1-\beta)(1-\gamma)]=-\beta\theta$, so + +$$ +U_t += +c_t - \beta\theta \log E_t \left[\exp \left(-\frac{U_{t+1}}{\theta}\right)\right], +$$ + +which is {eq}`bhs_risk_sensitive`. + +For $\theta\to\infty$ (equivalently $\gamma\to 1$), use the expansion + +$$ +\exp(-U_{t+1}/\theta)=1-U_{t+1}/\theta+o(1/\theta). +$$ + +Taking expectations, + +$$ +E_t[\exp(-U_{t+1}/\theta)] = 1 - E_t[U_{t+1}]/\theta + o(1/\theta). +$$ + +Applying $\log(1+x) = x + o(x)$ with $x = -E_t[U_{t+1}]/\theta + o(1/\theta)$, + +$$ +\log E_t[\exp(-U_{t+1}/\theta)] += +-E_t[U_{t+1}]/\theta + o(1/\theta), +$$ + +so $-\theta\log E_t[\exp(-U_{t+1}/\theta)] \to E_t[U_{t+1}]$ as +$\theta\to\infty$ and the recursion converges to + +$$ +U_t = c_t + \beta E_t U_{t+1}. +$$ + +```{solution-end} +``` + +```{exercise} +:label: dov_ex4 + +Consider the type II Bellman equation {eq}`bhs_bellman_type2`. + +1. Use a Lagrange multiplier to impose the normalization constraint $\int g(\varepsilon) \pi(\varepsilon) d\varepsilon = 1$. +2. Derive the first-order condition for $g(\varepsilon)$ and show that the minimizer is the exponential tilt in {eq}`bhs_ghat`. +3. Substitute your minimizing $g$ back into {eq}`bhs_bellman_type2` to recover the risk-sensitive Bellman equation {eq}`bhs_bellman_type1`. + +Conclude that $W(x) \equiv U(x)$ for consumption plans in $\mathcal{C}(A,B,H;x_0)$. +``` + +```{solution-start} dov_ex4 +:class: dropdown +``` + +Fix $x$ and write $W'(\varepsilon) := W(Ax + B\varepsilon)$ for short. + +Form the Lagrangian + +$$ +\mathcal{L}[g,\lambda] += +\beta \int \Bigl[g(\varepsilon)W'(\varepsilon) + \theta g(\varepsilon)\log g(\varepsilon)\Bigr]\pi(\varepsilon)d\varepsilon ++ +\lambda\left(\int g(\varepsilon)\pi(\varepsilon) d\varepsilon - 1\right). +$$ + +The pointwise first-order condition for $g(\varepsilon)$ is + +$$ +0 += +\frac{\partial \mathcal{L}}{\partial g(\varepsilon)} += +\beta\Bigl[W'(\varepsilon) + \theta(1+\log g(\varepsilon))\Bigr]\pi(\varepsilon) ++ +\lambda\pi(\varepsilon), +$$ + +so (dividing by $\beta\pi(\varepsilon)$) + +$$ +\log g(\varepsilon) += +-\frac{W'(\varepsilon)}{\theta} - 1 - \frac{\lambda}{\beta\theta}. +$$ + +Exponentiating yields $g(\varepsilon)=K\exp(-W'(\varepsilon)/\theta)$ where $K = \exp(-1 - \lambda/(\beta\theta))$ is a constant that does not depend on $\varepsilon$. + +To pin down $K$, impose the normalization $\int g(\varepsilon)\pi(\varepsilon)d\varepsilon=1$: + +$$ +1 = K \int \exp \left(-\frac{W(Ax+B\varepsilon)}{\theta}\right)\pi(\varepsilon) d\varepsilon, +$$ + +so + +$$ +K^{-1} += +\int \exp\left(-\frac{W(Ax+B\varepsilon)}{\theta}\right)\pi(\varepsilon) d\varepsilon. +$$ + +Substituting $K^{-1}$ into the denominator of $g = K\exp(-W'/\theta)$ gives the minimizer: + +$$ +g^*(\varepsilon) += +\frac{\exp\left(-W(Ax+B\varepsilon)/\theta\right)}{ + \int \exp\left(-W(Ax+B\tilde\varepsilon)/\theta\right)\pi(\tilde\varepsilon) d\tilde\varepsilon}. +$$ + +This has exactly the same form as the distortion $\hat g_{t+1} = \exp(-U_{t+1}/\theta)/E_t[\exp(-U_{t+1}/\theta)]$ that appears in the type I SDF {eq}`bhs_sdf_Ut`, with $W$ in place of $U$. + +Once we verify below that $W \equiv U$, the minimizer $g^*$ and the SDF distortion $\hat g$ coincide, which is {eq}`bhs_ghat`. + +To substitute back, define + +$$ +Z(x):=\int \exp(-W(Ax+B\varepsilon)/\theta)\pi(\varepsilon) d\varepsilon. +$$ + +Then $\hat g(\varepsilon)=\exp(-W(Ax+B\varepsilon)/\theta)/Z(x)$ and + +$$ +\log\hat g(\varepsilon)=-W(Ax+B\varepsilon)/\theta-\log Z(x). +$$ + +Hence + +$$ +\int \Bigl[\hat g(\varepsilon)W(Ax+B\varepsilon) + \theta \hat g(\varepsilon)\log \hat g(\varepsilon)\Bigr]\pi(\varepsilon) d\varepsilon += +-\theta\log Z(x), +$$ + +because the $W$ terms cancel and $\int \hat g \pi = 1$. + +Plugging this into {eq}`bhs_bellman_type2` gives + +$$ +W(x) += +c-\beta\theta\log Z(x) += +c-\beta\theta \log \int \exp\left(-\frac{W(Ax+B\varepsilon)}{\theta}\right)\pi(\varepsilon) d\varepsilon, +$$ + +which is {eq}`bhs_bellman_type1`. Therefore $W(x)\equiv U(x)$. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex5 + +Let $\varepsilon \sim \mathcal{N}(0,1)$ under the approximating model and define + +$$ +\hat g(\varepsilon) = \exp\left(w\varepsilon - \frac{1}{2}w^2\right) +$$ + +as in the Gaussian mean-shift section. + +1. Show that $E[\hat g(\varepsilon)] = 1$. + +2. Show that for any bounded measurable function $f$, + +$$ +E[\hat g(\varepsilon) f(\varepsilon)] +$$ + +equals the expectation of $f$ under $\mathcal{N}(w,1)$. + +3. Compute the mean and variance of $\log \hat g(\varepsilon)$ and use these to derive + +$$ +\operatorname{std}(\hat g) = \sqrt{e^{w^2}-1}. +$$ + +4. Compute the conditional relative entropy $E[\hat g\log \hat g]$ and verify that it equals $\tfrac{1}{2}w^2$. +``` + +```{solution-start} dov_ex5 +:class: dropdown +``` + +1. Using the moment generating function of a standard normal, + +$$ +E[\hat g(\varepsilon)] += +e^{-w^2/2}E[e^{w\varepsilon}] += +e^{-w^2/2}e^{w^2/2} += +1. +$$ + +2. Let $\varphi(\varepsilon) = (2\pi)^{-1/2}e^{-\varepsilon^2/2}$ be the $\mathcal{N}(0,1)$ density. + +Then + +$$ +\hat g(\varepsilon)\varphi(\varepsilon) += +\frac{1}{\sqrt{2\pi}} +\exp\left(w\varepsilon-\frac{1}{2}w^2-\frac{1}{2}\varepsilon^2\right) += +\frac{1}{\sqrt{2\pi}} +\exp\left(-\frac{1}{2}(\varepsilon-w)^2\right), +$$ + +which is the $\mathcal{N}(w,1)$ density. + +Therefore, for bounded measurable $f$, + +$$ +E[\hat g(\varepsilon)f(\varepsilon)] += +\int f(\varepsilon)\hat g(\varepsilon)\varphi(\varepsilon)d\varepsilon +$$ + +equals the expectation of $f$ under $\mathcal{N}(w,1)$. + +3. Since $\log \hat g(\varepsilon) = w\varepsilon - \tfrac{1}{2}w^2$ and $\varepsilon\sim\mathcal{N}(0,1)$, + +$$ +E[\log \hat g] = -\frac{1}{2}w^2, +\qquad +\operatorname{Var}(\log \hat g)=w^2. + +$$ +Moreover, $\operatorname{Var}(\hat g)=E[\hat g^2]-1$ because $E[\hat g]=1$. + +Now + +$$ +E[\hat g^2] += +E\left[\exp\left(2w\varepsilon - w^2\right)\right] += +e^{-w^2}E[e^{2w\varepsilon}] += +e^{-w^2}e^{(2w)^2/2} += +e^{w^2}, + +$$ +so $\operatorname{std}(\hat g)=\sqrt{e^{w^2}-1}$. + +4. Using part 2 with $f(\varepsilon)=\log \hat g(\varepsilon)=w\varepsilon-\tfrac{1}{2}w^2$, + +$$ +E[\hat g\log \hat g] += +E_{\mathcal{N}(w,1)}\left[w\varepsilon-\frac{1}{2}w^2\right] += +w\cdot E_{\mathcal{N}(w,1)}[\varepsilon]-\frac{1}{2}w^2 += +w^2-\frac{1}{2}w^2 += +\frac{1}{2}w^2. +$$ + +```{solution-end} +``` + +```{exercise} +:label: dov_ex6 + +Derive the worst-case mean shifts {eq}`bhs_w_formulas` for both consumption models. + +From {eq}`bhs_ghat`, $\hat g_{t+1} \propto \exp(-W(x_{t+1})/\theta)$. + +When $W$ is linear in the state, the exponent is linear in $\varepsilon_{t+1}$, and the Gaussian mean shift is $w = -\lambda/\theta$ where $\lambda$ is the coefficient on $\varepsilon_{t+1}$ in $W(x_{t+1})$. + +1. Random-walk model: Guess $W(x_t) = \frac{1}{1-\beta}[c_t + d]$. Using $c_{t+1} = c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1}$, find $\lambda$ and show that $w = -\sigma_\varepsilon/[(1-\beta)\theta]$. + +2. Trend-stationary model: Write $z_t = \tilde c_t - \zeta$ and guess $W(x_t) = \frac{1}{1-\beta}[c_t + \alpha_1 z_t + \alpha_0]$. Show that: + - The coefficient on $\varepsilon_{t+1}$ in $W(x_{t+1})$ is $(1+\alpha_1)\sigma_\varepsilon/(1-\beta)$. + - Matching coefficients on $z_t$ in the Bellman equation gives $\alpha_1 = \beta(\rho-1)/(1-\beta\rho)$. + - Therefore $1+\alpha_1 = (1-\beta)/(1-\beta\rho)$ and $w = -\sigma_\varepsilon/[(1-\beta\rho)\theta]$. +``` + +```{solution-start} dov_ex6 +:class: dropdown +``` + +**Part 1.** +Under the guess $W(x_t) = \frac{1}{1-\beta}[c_t + d]$ and $c_{t+1} = c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1}$, + +$$ +W(x_{t+1}) = \frac{1}{1-\beta}[c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1} + d]. +$$ + +The coefficient on $\varepsilon_{t+1}$ is $\lambda = \sigma_\varepsilon/(1-\beta)$, so $w = -\lambda/\theta = -\sigma_\varepsilon/[(1-\beta)\theta]$. + +**Part 2.** +Under the guess $W(x_t) = \frac{1}{1-\beta}[c_t + \alpha_1 z_t + \alpha_0]$ with $c_{t+1} = c_t + \mu + (\rho-1)z_t + \sigma_\varepsilon\varepsilon_{t+1}$ and $z_{t+1} = \rho z_t + \sigma_\varepsilon\varepsilon_{t+1}$, + +$$ +W(x_{t+1}) = \tfrac{1}{1-\beta}\bigl[c_t + \mu + (\rho-1)z_t + \sigma_\varepsilon\varepsilon_{t+1} + \alpha_1(\rho z_t + \sigma_\varepsilon\varepsilon_{t+1}) + \alpha_0\bigr]. +$$ + +The coefficient on $\varepsilon_{t+1}$ is $(1+\alpha_1)\sigma_\varepsilon/(1-\beta)$. + +To find $\alpha_1$, substitute the guess into the Bellman equation. + +The factors of $\frac{1}{1-\beta}$ cancel on both sides, and matching coefficients on $z_t$ gives + +$$ +\alpha_1 = \beta\bigl[(\rho-1) + \alpha_1\rho\bigr] +\quad\Rightarrow\quad +\alpha_1(1-\beta\rho) = \beta(\rho-1) +\quad\Rightarrow\quad +\alpha_1 = \frac{\beta(\rho-1)}{1-\beta\rho}. +$$ + +Therefore + +$$ +1+\alpha_1 = \frac{1-\beta\rho + \beta(\rho-1)}{1-\beta\rho} = \frac{1-\beta}{1-\beta\rho}, +$$ + +and the coefficient on $\varepsilon_{t+1}$ becomes $(1+\alpha_1)\sigma_\varepsilon/(1-\beta) = \sigma_\varepsilon/(1-\beta\rho)$, giving $w = -\sigma_\varepsilon/[(1-\beta\rho)\theta]$. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex7 + +Verify the closed-form value function {eq}`bhs_W_rw` for the random-walk model by substituting a guess of the form $W(x_t) = \frac{1}{1-\beta}[c_t + d]$ into the risk-sensitive Bellman equation {eq}`bhs_bellman_type1`. + +1. Under the random walk $c_{t+1} = c_t + \mu + \sigma_\varepsilon \varepsilon_{t+1}$, show that $W(Ax_t + B\varepsilon) = \frac{1}{1-\beta}[c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1} + d]$. +2. Substitute into the $\log E\exp$ term, using the fact that for $Z \sim \mathcal{N}(\mu_Z, \sigma_Z^2)$ we have $\log E[\exp(Z)] = \mu_Z + \frac{1}{2}\sigma_Z^2$. +3. Solve for $d$ and confirm that it matches {eq}`bhs_W_rw`. +``` + +```{solution-start} dov_ex7 +:class: dropdown +``` + +**Part 1.** Under the random walk, $c_{t+1} = c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1}$. Substituting the guess $W(x) = \frac{1}{1-\beta}[Hx + d]$ with $Hx_t = c_t$: + +$$ +W(Ax_t + B\varepsilon_{t+1}) = \frac{1}{1-\beta}\bigl[c_t + \mu + \sigma_\varepsilon\varepsilon_{t+1} + d\bigr]. +$$ + +**Part 2.** The Bellman equation {eq}`bhs_bellman_type1` requires computing + +$$ +-\beta\theta\log E_t\left[\exp\left(\frac{-W(Ax_t + B\varepsilon_{t+1})}{\theta}\right)\right]. +$$ + +Substituting the guess: + +$$ +\frac{-W(Ax_t + B\varepsilon_{t+1})}{\theta} += +\frac{-1}{(1-\beta)\theta}\bigl[c_t + \mu + d + \sigma_\varepsilon\varepsilon_{t+1}\bigr]. +$$ + +This is an affine function of the standard normal $\varepsilon_{t+1}$, so the argument of the $\log E\exp$ is normal with + +$$ +\mu_Z = \frac{-(c_t + \mu + d)}{(1-\beta)\theta}, +\qquad +\sigma_Z^2 = \frac{\sigma_\varepsilon^2}{(1-\beta)^2\theta^2}. +$$ + +Using $\log E[e^Z] = \mu_Z + \frac{1}{2}\sigma_Z^2$: + +$$ +-\beta\theta\left[\frac{-(c_t + \mu + d)}{(1-\beta)\theta} + \frac{\sigma_\varepsilon^2}{2(1-\beta)^2\theta^2}\right] += +\frac{\beta}{1-\beta}\left[c_t + \mu + d - \frac{\sigma_\varepsilon^2}{2(1-\beta)\theta}\right]. +$$ + +**Part 3.** The Bellman equation becomes + +$$ +\frac{1}{1-\beta}[c_t + d] += +c_t + \frac{\beta}{1-\beta}\left[c_t + \mu + d - \frac{\sigma_\varepsilon^2}{2(1-\beta)\theta}\right]. +$$ + +Expanding the right-hand side: + +$$ +c_t + \frac{\beta c_t}{1-\beta} + \frac{\beta(\mu + d)}{1-\beta} - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta} += +\frac{c_t}{1-\beta} + \frac{\beta(\mu + d)}{1-\beta} - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta}. +$$ + +Equating both sides and cancelling $\frac{c_t}{1-\beta}$: + +$$ +\frac{d}{1-\beta} = \frac{\beta(\mu + d)}{1-\beta} - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta}. +$$ + +Solving: $d - \beta d = \beta\mu - \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)\theta}$, so + +$$ +d = \frac{\beta}{1-\beta}\left(\mu - \frac{\sigma_\varepsilon^2}{2(1-\beta)\theta}\right), +$$ + +which matches {eq}`bhs_W_rw`. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex8 + +In the Gaussian mean-shift setting of {ref}`Exercise 5 `, let $L_T$ be the log likelihood ratio between the worst-case and approximating models based on $T$ observations. + +1. Show that $L_T$ is normal under each model. +2. Compute its mean and variance under the approximating and worst-case models. +3. Using the definition of detection-error probability in {eq}`bhs_detection_formula`, derive the closed-form expression {eq}`bhs_detection_closed`. +``` + +```{solution-start} dov_ex8 +:class: dropdown +``` + +Let the approximating model be $\varepsilon_i \sim \mathcal{N}(0,1)$ and the worst-case model be $\varepsilon_i \sim \mathcal{N}(w,1)$, iid for $i=1,\ldots,T$. + +Take the log likelihood ratio in the direction that matches the definitions in the text: + +$$ +L_T += +\log \frac{\prod_{i=1}^T \varphi(\varepsilon_i)}{\prod_{i=1}^T \varphi(\varepsilon_i-w)} += +\sum_{i=1}^T \ell(\varepsilon_i), + +$$ +where $\varphi$ is the $\mathcal{N}(0,1)$ density and + +$$ +\ell(\varepsilon) += +\log \varphi(\varepsilon) - \log \varphi(\varepsilon-w) += +-\frac{1}{2}\Bigl[\varepsilon^2-(\varepsilon-w)^2\Bigr] += +-w\varepsilon + \frac{1}{2}w^2. +$$ + +Therefore + +$$ +L_T = -w\sum_{i=1}^T \varepsilon_i + \tfrac{1}{2}w^2T. +$$ + +Under the approximating model, $\sum_{i=1}^T \varepsilon_i \sim \mathcal{N}(0,T)$, so + +$$ +L_T \sim \mathcal{N}\left(\frac{1}{2}w^2T, w^2T\right). +$$ + +Under the worst-case model, $\sum_{i=1}^T \varepsilon_i \sim \mathcal{N}(wT,T)$, so + +$$ +L_T \sim \mathcal{N}\left(-\frac{1}{2}w^2T, w^2T\right). +$$ + +Now + +$$ +p_A = \Pr_A(L_T<0) += +\Phi\left(\frac{0-\frac{1}{2}w^2T}{|w|\sqrt{T}}\right) += +\Phi\left(-\frac{|w|\sqrt{T}}{2}\right), +$$ + +and + +$$ +p_B = \Pr_B(L_T>0) += +1-\Phi\left(\frac{0-(-\frac{1}{2}w^2T)}{|w|\sqrt{T}}\right) += +1-\Phi\left(\frac{|w|\sqrt{T}}{2}\right) += +\Phi\left(-\frac{|w|\sqrt{T}}{2}\right). +$$ + +Therefore + +$$ +p(\theta^{-1})=\tfrac{1}{2}(p_A+p_B)=\Phi\left(-\tfrac{|w|\sqrt{T}}{2}\right), + +$$ +which is {eq}`bhs_detection_closed`. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex9 + +Using the formulas for $w(\theta)$ in {eq}`bhs_w_formulas` and the definition of discounted entropy + +$$ +\eta = \frac{\beta}{1-\beta}\cdot \frac{w(\theta)^2}{2}, +$$ + +show that holding $\eta$ fixed across the random-walk and trend-stationary consumption specifications implies the mapping {eq}`bhs_theta_cross_model`. + +Specialize your result to the case $\sigma_\varepsilon^{\text{TS}} = \sigma_\varepsilon^{\text{RW}}$ and interpret the role of $\rho$. +``` + +```{solution-start} dov_ex9 +:class: dropdown +``` + +Because $\eta$ depends on $\theta$ only through $w(\theta)^2$, holding $\eta$ fixed across models is equivalent to holding $|w(\theta)|$ fixed. + +Using {eq}`bhs_w_formulas`, + +$$ +|w_{\text{RW}}(\theta_{\text{RW}})| += +\frac{\sigma_\varepsilon^{\text{RW}}}{(1-\beta)\theta_{\text{RW}}}, +\qquad +|w_{\text{TS}}(\theta_{\text{TS}})| += +\frac{\sigma_\varepsilon^{\text{TS}}}{(1-\beta\rho)\theta_{\text{TS}}}. +$$ + +Equating these magnitudes and solving for $\theta_{\text{TS}}$ gives + +$$ +\theta_{\text{TS}} += +\left(\frac{\sigma_\varepsilon^{\text{TS}}}{\sigma_\varepsilon^{\text{RW}}}\right) +\frac{1-\beta}{1-\beta\rho}\theta_{\text{RW}}, +$$ + +which is {eq}`bhs_theta_cross_model`. + +If $\sigma_\varepsilon^{\text{TS}}=\sigma_\varepsilon^{\text{RW}}$, then + +$$ +\theta_{\text{TS}}=\frac{1-\beta}{1-\beta\rho}\theta_{\text{RW}}. +$$ + +Since $\rho\in(0,1)$ implies $1-\beta\rho > 1-\beta$, the ratio $(1-\beta)/(1-\beta\rho)$ is less than one. + +To hold entropy fixed, the trend-stationary model therefore requires a smaller $\theta$ (i.e., a cheaper distortion and stronger robustness) than the random-walk model. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex10 + +For type II (multiplier) preferences under random-walk consumption growth, derive the compensating-variation formulas in {eq}`bhs_type2_rw_decomp`. + +In particular, derive + +1. the *risk* term by comparing the stochastic economy to a deterministic consumption path with the same mean level of consumption (Lucas's thought experiment), and +2. the *uncertainty* term by comparing a type II agent with parameter $\theta$ to the expected-utility case $\theta=\infty$, holding the stochastic environment fixed. +``` + +```{solution-start} dov_ex10 +:class: dropdown +``` + +Write the random walk as + +$$ +c_t = c_0 + t\mu + \sigma_\varepsilon\sum_{j=1}^t \varepsilon_j +$$ + +with $\varepsilon_j\stackrel{iid}{\sim}\mathcal{N}(0,1)$. + +**Risk term:** + +The mean level of consumption is + +$$ +E[C_t]=E[e^{c_t}]=\exp(c_0+t\mu+\tfrac{1}{2}t\sigma_\varepsilon^2), +$$ + +so the deterministic path with the same mean levels is + +$$ +\bar c_t = c_0 + t(\mu+\tfrac{1}{2}\sigma_\varepsilon^2). +$$ + +Under expected log utility ($\theta=\infty$), discounted expected utility is + +$$ +\sum_{t\geq 0}\beta^t E[c_t] += +\frac{c_0}{1-\beta} + \frac{\beta\mu}{(1-\beta)^2}, + +$$ + +while for the deterministic mean-level path it is + +$$ +\sum_{t\geq 0}\beta^t \bar c_t += +\frac{c_0}{1-\beta} + \frac{\beta(\mu+\tfrac{1}{2}\sigma_\varepsilon^2)}{(1-\beta)^2}. +$$ + +If we reduce initial consumption by $\Delta c_0^{risk}$ (so $\bar c_t$ shifts down by $\Delta c_0^{risk}$ for all $t$), utility falls by $\Delta c_0^{risk}/(1-\beta)$. + +Equating the two utilities gives + +$$ +\frac{\Delta c_0^{risk}}{1-\beta} += +\frac{\beta(\tfrac{1}{2}\sigma_\varepsilon^2)}{(1-\beta)^2} +\quad\Rightarrow\quad +\Delta c_0^{risk}=\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)}. +$$ + +**Uncertainty term:** + +For type II multiplier preferences, the minimizing distortion is a Gaussian mean shift with parameter $w$ and per-period relative entropy $\tfrac{1}{2}w^2$. + +Under the distorted model, $E[\varepsilon]=w$, so + +$$ +E[c_t]=c_0+t(\mu+\sigma_\varepsilon w). +$$ + +Plugging this into the type II objective (and using $E_t[g\log g]=\tfrac{1}{2}w^2$) gives the discounted objective as a function of $w$: + +$$ +J(w) += +\sum_{t\geq 0}\beta^t\Bigl(c_0+t(\mu+\sigma_\varepsilon w)\Bigr) ++ +\sum_{t\geq 0}\beta^{t+1}\theta\cdot\frac{w^2}{2}. +$$ + +Using $\sum_{t\geq0}\beta^t=1/(1-\beta)$ and $\sum_{t\geq0}t\beta^t=\beta/(1-\beta)^2$, + +$$ +J(w) += +\frac{c_0}{1-\beta} ++ +\frac{\beta(\mu+\sigma_\varepsilon w)}{(1-\beta)^2} ++ +\frac{\beta\theta}{1-\beta}\cdot\frac{w^2}{2}. +$$ + +Minimizing over $w$ yields + +$$ +0=\frac{\partial J}{\partial w} += +\frac{\beta\sigma_\varepsilon}{(1-\beta)^2} ++ +\frac{\beta\theta}{1-\beta}w +\quad\Rightarrow\quad +w^*=-\frac{\sigma_\varepsilon}{(1-\beta)\theta}, +$$ + +which matches {eq}`bhs_w_formulas`. + +Substituting $w^*$ back in gives + +$$ +J(w^*) += +\frac{c_0}{1-\beta} ++ +\frac{\beta\mu}{(1-\beta)^2} +-\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^3\theta}. +$$ + +When $\theta=\infty$ (no model uncertainty), the last term disappears. +Thus the utility gain from removing model uncertainty at fixed $(\mu,\sigma_\varepsilon)$ is + +$$ +\beta\sigma_\varepsilon^2/[2(1-\beta)^3\theta]. +$$ + +To offset this by a permanent upward shift in initial log consumption, we need + +$$ +\Delta c_0^{uncertainty}/(1-\beta)=\beta\sigma_\varepsilon^2/[2(1-\beta)^3\theta], +$$ + +so + +$$ +\Delta c_0^{uncertainty} += +\frac{\beta\sigma_\varepsilon^2}{2(1-\beta)^2\theta}. +$$ + +Together these reproduce {eq}`bhs_type2_rw_decomp`. + +```{solution-end} +``` + +```{exercise} +:label: dov_ex11 + +Derive the trend-stationary risk compensation $\Delta c_0^{risk,ts}$ in {eq}`bhs_ts_compensations`. + +For the trend-stationary model with $\tilde c_{t+1} - \zeta = \rho(\tilde c_t - \zeta) + \sigma_\varepsilon\varepsilon_{t+1}$, where $\tilde c_t = c_t - \mu t$, compute the risk compensation $\Delta c_0^{risk,ts}$ by comparing expected log utility under the stochastic plan to the deterministic certainty-equivalent path, and show that + +$$ +\Delta c_0^{risk,ts} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta\rho^2)}. +$$ + +*Hint:* You will need $\operatorname{Var}(z_t) = \sigma_\varepsilon^2(1 + \rho^2 + \cdots + \rho^{2(t-1)})$ and the formula $\sum_{t \geq 1}\beta^t \sum_{j=0}^{t-1}\rho^{2j} = \frac{\beta}{(1-\beta)(1-\beta\rho^2)}$. +``` + +```{solution-start} dov_ex11 +:class: dropdown +``` + +Under the trend-stationary model with $z_0 = 0$, $c_t = c_0 + \mu t + z_t$ and $E[c_t] = c_0 + \mu t$ (since $E[z_t] = 0$). + +The deterministic certainty-equivalent path matches $E[C_t] = \exp(c_0 + \mu t + \frac{1}{2}\operatorname{Var}(z_t))$, so its log is $c_0^{ce} + \mu t + \frac{1}{2}\operatorname{Var}(z_t)$. + +Under expected log utility ($\theta = \infty$), the value of the stochastic plan is + +$$ +\sum_{t \geq 0}\beta^t E[c_t] = \frac{c_0}{1-\beta} + \frac{\beta\mu}{(1-\beta)^2}. +$$ + +The value of the certainty-equivalent path (matching mean levels) starting from $c_0 - \Delta c_0^{risk}$ is + +$$ +\sum_{t \geq 0}\beta^t \bigl[c_0 - \Delta c_0^{risk} + \mu t + \tfrac{1}{2}\operatorname{Var}(z_t)\bigr]. +$$ + +Since $\operatorname{Var}(z_t) = \sigma_\varepsilon^2 \sum_{j=0}^{t-1}\rho^{2j}$, the extra term sums to + +$$ +\sum_{t \geq 1}\beta^t \cdot \frac{\sigma_\varepsilon^2}{2}\sum_{j=0}^{t-1}\rho^{2j} += \frac{\sigma_\varepsilon^2}{2}\cdot\frac{\beta}{(1-\beta)(1-\beta\rho^2)}. +$$ + +Equating values and solving: + +$$ +\frac{\Delta c_0^{risk}}{1-\beta} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta)(1-\beta\rho^2)} +\quad\Rightarrow\quad +\Delta c_0^{risk,ts} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta\rho^2)}. +$$ + +The uncertainty compensation follows from the value function: $\Delta c_0^{unc,ts,II} = \frac{\beta\sigma_\varepsilon^2}{2(1-\beta\rho)^2\theta}$, with the $(1-\beta)$ factors replaced by $(1-\beta\rho)$ because the worst-case mean shift scales with $1/(1-\beta\rho)$ rather than $1/(1-\beta)$. + +```{solution-end} +``` diff --git a/lectures/dovis_accounting_mf.md b/lectures/dovis_accounting_mf.md new file mode 100644 index 00000000..830594ce --- /dev/null +++ b/lectures/dovis_accounting_mf.md @@ -0,0 +1,2159 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.17.1 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(dovis_accounting_mf)= +```{raw} jupyter + +``` + +# Accounting for Monetary and Fiscal Policy + +```{contents} Contents +:depth: 2 +``` + +```{admonition} GPU +:class: warning + +This lecture was built using a machine with JAX installed and access to a GPU. + +To run this lecture on [Google Colab](https://colab.research.google.com/), click on the "rocket" icon at the top of the page, select "Colab", and set the runtime environment to include a GPU. + +To run this lecture on your own machine, you need to install [Google JAX](https://github.com/google/jax). +``` + +## Overview + +This lecture studies a model of fiscal and monetary policy interactions developed by {cite:t}`DovisAccountingMFrevised`. + +The model provides a framework for revisiting some long-standing questions about **fiscal dominance** versus **monetary dominance** in a framework that allows for **partial commitment** to an inflation target. + + +```{note} +For an early discussion of "partial commitment" in the context of fiscal and monetary policy, see the concluding section of {cite:t}`LucasStokey1982`, the original working paper version of {cite:t}`LucasStokey1983`. + +In Quantecon's view, the referees and editors of the *Journal of Monetary Economics* version made a mistake by insisting that Lucas and Stokey rewrite the concluding section of their paper. +``` + +```{note} +{cite:t}`SargentWallace1981` contrasted "fiscal dominance" and "monetary dominance" as different ways of coordinating +monetary and fiscal policy. + +They thought about them at the beginning of the Reagan administration, when the 1970s surge in US inflation had not yet been tamed by the monetary-fiscal policies presided over by Paul Volcker. + +Sargent and Wallace's title, "Some Unpleasant Monetarist Arithmetic," expressed the idea that in the face of a persistent net-of-interest government deficit, efforts to reduce inflation through tight monetary policy work only temporarily, if at all. + + +That is because they lead to higher government debt and thus greater gross-of-interest government deficits that must be financed in the future. +``` + +In the model, a benevolent government that cannot commit finds it attractive to delegate monetary policy to a central bank charged with an inflation target, yet may later override that mandate when it needs seigniorage revenue. + +Whether the government honors or breaks its promise depends on two state variables: the fiscal situation (how much debt is outstanding and how urgently the government values public spending) and a random institutional cost that stands in for legal, reputational, and political barriers to overriding the central bank. + +As these states evolve, the economy switches endogenously between a **monetary-dominant** regime, in which the inflation target holds, and a **fiscal-dominant** regime, in which it does not. + +Two polar cases are nested as special cases: the Ramsey allocation emerges when the institutional cost is always prohibitively high, and the Markov equilibrium emerges when it is zero. + +The paper distinguishes two ways that a disinflation can occur: + +- **Fundamental disinflation**: a reduction in fiscal needs ($\theta$) leads inflation and debt to + decline together. +- **Institutional disinflation**: an increase in the credibility of the inflation mandate ($\xi$) leads inflation to fall while debt *rises*. + +The contrasting comovement of debt and inflation in these types of disinflations +allows the authors to create a statistical model that lets them classify observed disinflations into episodes that were driven by fiscal fundamentals or by institutional changes. + +The paper applies these ideas to Colombia and Chile, using a +**particle filter** to recover the sequences of fiscal and institutional shocks that are consistent with the observed joint paths of inflation and debt-to-GDP ratios. + +This lecture will: + +1. Set up the model environment in a {cite:t}`SargentWallace1981` economy with a household and a government +2. Describe implementable fiscal and monetary outcomes +3. Characterize two setups: the **Ramsey** outcome (full commitment) and the + **Markov** outcome (no commitment) +4. Formulate a partial-commitment model with endogenous regime switching governed by + a stochastic cost $\xi$ of deviating from the mandate +5. Write Python code to solve the model numerically +6. Simulate the two types of disinflation (fundamental and institutional) +7. Implement an illustrative particle filter on synthetic data +8. Summarize the paper's case studies of Colombia and Chile + +JAX is used to vectorize the key computations and accelerate value function iteration and particle filtering. + +In addition to what's in Anaconda, this lecture will need the following libraries: + +```{code-cell} ipython3 +:tags: [hide-output] + +!pip install jax +``` + +```{code-cell} ipython3 +from typing import NamedTuple +from functools import partial + +import jax +import jax.numpy as jnp +from jax import jit, lax, vmap +import numpy as np +import matplotlib.pyplot as plt + +jax.config.update("jax_enable_x64", True) +``` + +## The economy + +### Environment + +Consider an economy that blends elements of {cite:t}`AMSS_2002` and {cite:t}`Calvo1978` +(see also {cite:t}`LucasStokey1983` and {cite:t}`ChariKehoe1999`). + +Time is discrete, indexed by $t = 0, 1, \ldots$. + +The exogenous state is $s_t \in \mathcal{S}$, following +a Markov process with transition $\Pr(s_{t+1}|s_t)$. + +A representative household has preferences + +$$ +\sum_t \sum_{s^t} \beta^t \Pr(s^t|s_0)\, U\bigl(c(s^t),\, l(s^t),\, m(s^t),\, g(s^t),\, s_t\bigr) +$$ + +where + +$$ +U(c, l, m, g, s) = c - \nu(l) + v(m) + \theta(s)\, u(g). +$$ + +Here + +- $c$ is private consumption +- $l$ is labor supply +- $m$ is real money balances +- $g$ is public consumption +- $\nu(l)$ is a strictly increasing and convex labor disutility function +- $v(m)$ and $u(g)$ are strictly increasing and concave +- $\theta(s)$ is a preference shock to the marginal utility of government spending +- $\beta$ is the household discount factor + +The linear production technology is operated by competitive firms, and the resource constraint is $c(s^t) + g(s^t) \leq l(s^t)$. + +The government finances spending $g$ with linear taxes on labor income, by issuing real uncontingent debt $b$, and by printing money injected into the economy via open market operations. + +The government is benevolent but may have a different discount factor $\hat\beta \leq \beta$. + +### Implementable allocations + +Following the insight in {cite:t}`Aiyagari1989` and {cite:t}`AMSS_2002` (see also QuantEcon lectures {doc}`amss`, {doc}`amss2`, and {doc}`amss3`), we define the **real primary surplus** as + +$$ +\Delta(s^t) \equiv \tau(s^t) l(s^t) - g(s^t). +$$ + +We can then define the static **indirect utility function over surpluses** as + +$$ +U(\Delta, s) = \max_{c,\,l,\,g} \; c - \nu(l) + \theta(s)\, u(g) +$$ + +subject to the resource constraint $c + g \leq l$ and the static implementability constraint +$(1 - \nu'(l))\, l - g \geq \Delta$. + +This function is well-defined for all surplus values below the maximal surplus implied by the static Laffer curve, $\Delta \leq \bar\Delta \equiv \max_l (1 - \nu'(l))\,l$. + +The indirect utility function $U(\Delta, s)$ is *decreasing and concave* in $\Delta$ for all $s$, and the marginal disutility of primary surplus is increasing and is affected by the fundamental shocks. + +Let $\phi \equiv M_{t-1}/P_t$ denote real money balances (price of money in terms of goods) and define + +$$ +H(\phi) \equiv \phi + v'(\phi)\, \phi. +$$ + +The **money demand** or **portfolio balance** condition becomes + +$$ +\mu\, \phi = \beta \sum_{s'} \Pr(s'|s)\, H(\phi'), +$$ + +where $\mu \equiv M_t / M_{t-1}$ is the gross money growth rate. + +The **government budget constraint** (in normalized form) is + +$$ +b_{t-1} + \phi_t = \Delta_t + \beta b_t + \mu_t \phi_t. +$$ + +The **government's value** is + +$$ +V_0 = \sum_t \sum_{s^t} \hat\beta^t \Pr(s^t|s_0) \left[ U(\Delta(s^t), s_t) + v(\phi(s^t)) \right]. +$$ + +We now define the model primitives as Python functions. + +The labor disutility is $\nu(l) = \chi\, l^{1+\psi}/(1+\psi)$. + +Money utility is $v(\phi) = \kappa\phi - \eta_m\phi^2$. + +Government spending utility is $u(g) = g^{1-\sigma}/(1-\sigma)$. + +The function $H(\phi) = \phi\,(1 + v'(\phi))$ appears in the money demand condition. + +```{code-cell} ipython3 +def v_money(φ, κ, η_m): + return κ * φ - η_m * φ**2 + +def v_money_prime(φ, κ, η_m): + return κ - 2.0 * η_m * φ + +def H_func(φ, κ, η_m): + return φ * (1.0 + v_money_prime(φ, κ, η_m)) + +def u_gov(g, σ): + return jnp.where( + jnp.abs(σ - 1.0) < 1e-10, + jnp.log(g), + g ** (1.0 - σ) / (1.0 - σ), + ) +``` + +## Policy determination + +### The credibility problem + +An important innovation of {cite:t}`DovisAccountingMFrevised` is to model policy determination under +**partial commitment** in the following sense. + +The government promises an inflation target $\pi^*$ (equivalently, a +promised value for real balances $\phi'$) for next period. + +But a next-period government +can choose to **honor** or **abrogate** the mandate. + +The cost of abrogating is modeled as a random variable $\xi$ that stands in for the various frictions that make overriding a mandate difficult: + +- reputational losses (see {cite:t}`DovisKirpalani2021`) +- coordination failures that lead to inferior equilibria +- institutional constraints and political costs + +When $\xi$ is always large enough, the mandate is never broken and the Ramsey outcome obtains. + +When $\xi = 0$, the mandate is always broken and a Markov equilibrium results. + +The full model nests both extremes. + +Unlike the loose commitment framework of {cite:t}`DebortoliNunes2010`, where the probability of re-optimization is exogenous, here the regime switch is *endogenous*: the government decides whether to honor or abrogate based on the realized cost. + +### Recursive formulation + +The state is $x = (b, \phi, s)$ where $b$ is inherited real debt, $\phi$ is the promised real balances, and $s = (\theta, \xi)$ is the exogenous state. + +This recursive formulation builds on {cite:t}`Abreu1988`, {cite:t}`ChariKehoe1990`, and {cite:t}`Chang1998`. + +```{note} +For descriptions of these frameworks, see other lectures in this suite of QuantEcon lecture notes, including {doc}`Ramsey plans, time inconsistency, sustainable plans `,{doc}`competitive equilibria in the Chang model `, and {doc}`sustainable plans in the Chang model `. +``` + +The economy can be in one of two regimes: + +- **Monetary dominance** (MD, $\eta = 1$): the government honors the inflation target. +- **Fiscal dominance** (FD, $\eta = 0$): the government ignores the target and chooses $\phi$ to maximize short-run welfare. + +The idea of regime switching between monetary and fiscal dominance builds on {cite:t}`Leeper1991`, {cite:t}`Bianchi2013`, and {cite:t}`BianchiIlut2017`. + +The present model differs in two important ways: the switches are **endogenous** (they emerge from the government's optimization rather than from an exogenous Markov chain), and the policy chosen within each regime is also endogenous (not governed by fixed monetary and fiscal rules). + +The **regime indicator** is + +$$ +\eta(b', \phi', s') = \begin{cases} +1 & \text{if } V^{md}(b', \phi', s') \geq V^{fd}(b', s') - \xi(s') \\ +0 & \text{otherwise} +\end{cases} +$$ + +The inflation target summarized by a promised $\phi$ is satisfied if and only if + +$$ +\xi \geq \xi^* = V^{fd}(b, s) - V^{md}(b, \phi, s) + = \max_{\phi_{fd}} V^{md}(b, \phi_{fd}, s) - V^{md}(b, \phi, s). +$$ + +Deviating from the target allows the government to attain the maximum utility possible net of the cost $\xi$. + +A cost $\xi$ greater than the cutoff $\xi^*$ is required for the target to be sustained. + +More ambitious inflation targets (closer to the Ramsey value $\phi^*$) are harder to achieve because the cutoff $\xi^*$ is larger. + +The target is also easier to achieve when the marginal utility of government expenditure $\theta$ is lower, because positive surpluses are less valuable. + +{numref}`fig-credibility-targets` below illustrates this logic. + +**Monetary dominance** -- the government solves: + +$$ +V^{md}(b, \phi, s) = \max_{\Delta, b', \mu, \phi'} U(\Delta, \theta) + v(\phi) + + \hat\beta \sum_{s'} \Pr(s'|s)\, V(b', \phi', s') +$$ + +where maximization is subject to the budget constraint $\Delta = b + \phi - \beta b' - \mu\phi$ and the money demand condition $\mu\phi = J(b', \phi', s)$. + +**Fiscal dominance** -- the government's problem adds current $\phi$ to its choice set: + +$$ +V^{fd}(b, s) = \max_{\phi, \Delta, b', \mu, \phi'} U(\Delta, \theta) + v(\phi) + + \hat\beta \sum_{s'} \Pr(s'|s)\, V(b', \phi', s') +$$ + +where the static first-order necessary condition with respect to $\phi$ is $-U'(\Delta, \theta) = v'(\phi^{fd})$. + +The **expected marginal value of real balances** is + +$$ +J(b', \phi', s) = \beta \sum_{s'} \Pr(s'|s) \left[ + \eta(b', \phi', s')\, H(\phi') + + (1 - \eta(b', \phi', s'))\, H\!\left(\phi^{fd}(b', s')\right) +\right]. +$$ + +The default parameter values below are calibrated to an average of Colombia and Chile 1960–2017, following Table 1 of {cite:t}`DovisAccountingMFrevised`. + +In our implementation, the preference parameter $\theta$ is held fixed inside each model instance; we study fundamental disinflation by comparing solutions across different $\theta$ values. + +```{code-cell} ipython3 +# Default parameter values +β, β_hat = 0.95, 0.92 +χ, ψ, σ = 0.015, 1.0, 2.0 +κ, η_m = 0.70, 0.06 +λ_gumbel = 20.0 +φ_star = κ / (2.0 * η_m) +``` + +## Two benchmark outcomes + +Before turning to the full model, it is useful to analyze two polar benchmarks. + +### The Ramsey outcome (full commitment) + +Under full commitment ($\xi$ always large enough so that $\eta = 1$), the government solves + +$$ +V^R(b, \phi, s) = \max_{\Delta, b', \phi'(s')} U(\Delta, s) + v(\phi) + +\hat\beta \sum_{s'} \Pr(s'|s) V^R(b', \phi'(s'), s') +$$ + +subject to $\Delta = b + \phi - \beta b' - \beta \sum_{s'} \Pr(s'|s) H(\phi'(s'))$. + +```{note} +The choice of $\phi'(s')$ here is allowed to be *state-contingent* -- the Ramsey planner can promise different real balances in different future states. + +The partial-commitment model studied below instead restricts the promise to a single $\phi'$ that does not vary with $s'$. + +We present the Ramsey problem in its more general form as a benchmark; the restriction to a non-contingent promise is what makes abrogation tempting and gives rise to the credibility problem. +``` + +Under the Ramsey outcome, there is a trade-off between following the Friedman rule and making real debt state contingent. + +If the volatility of the marginal value of government expenditures is sufficiently small, the benefits of making the real debt state contingent are small relative to the cost of anticipated inflation and it is optimal to set $\phi(s') = \phi^*$ for all $s'$ next period. + +Key properties: + +- For the quadratic specification $v(\phi) = \kappa\phi - \eta_m\phi^2$, the satiation point is $\phi^* = \kappa/(2\eta_m)$. +- Under the conditions of Proposition 3 of the paper, the Ramsey outcome has a fixed inflation level + $1 + \pi^R = \beta H(\phi^*) / \phi^*$ that does not depend on fiscal fundamentals -- the level of debt and $\theta$. +- Surpluses and real debt follow the Euler equation + $-U'(\Delta, s) = \frac{\hat\beta}{\beta} \sum_{s'} \Pr(s'|s) [-U'(\Delta', s')]$. + +### The Markov outcome (no commitment) + +The polar opposite case in {cite:t}`DovisAccountingMFrevised` is one in which the government has no ability to commit to inflation, so that a Markov equilibrium obtains. + +Setting $\xi(s) = 0$ for all $s$ makes the fiscal-dominant regime always optimal. + +Because the promise $\phi'$ is never honored, it drops out of the problem. + +The problem reduces to + +$$ +V^M(b, s) = \max_{\phi, \Delta, b'} U(\Delta, \theta) + v(\phi) + +\hat\beta \sum_{s'} \Pr(s'|s) V^M(b', s') +$$ + +subject to $\Delta = b + \phi - \beta b' - \beta \sum_{s'} \Pr(s'|s) H(\phi^M(b', s'))$. + +Key properties of the Markov outcome: + +- The static optimality condition $-U'(\Delta, \theta) = v'(\phi^{fd})$ equates the marginal benefit of real balances to the marginal cost of the primary surplus, so the model predicts a higher price level (lower real balances) when the marginal cost of the surplus is high. +- Inflation *responds strongly* to fiscal pressures -- it is high on average, volatile, and closely related to fiscal considerations. +- Debt capacity is *sharply limited* -- as shown in the paper, the term $\frac{\partial J(b', \phi', s)}{\partial b'}/\beta$ is negative, effectively acting as a tax on debt issuance and pushing equilibrium debt below the Ramsey level. +- In the deterministic case with $\beta = \hat\beta$: real debt converges to zero while the Ramsey outcome sustains positive debt levels (Appendix B of the paper). + +The full model *interpolates* between these two extremes depending on the cost $\xi_t$. + +We compute the indirect utility $U(\Delta, \theta)$ from the static problem + +$$ +\max_{l,\,g}\; l - g - \nu(l) + \theta\,u(g) \quad \text{s.t.}\quad (1 - \nu'(l))\,l - g \geq \Delta. +$$ + +With $\nu(l) = \chi\,l^{1+\psi}/(1+\psi)$, the Laffer curve gives tax revenue $T(l) = (1 - \chi\,l^\psi)\,l$. + +The first-order conditions are $\theta\,u'(g) = 1 + \lambda$ and $(1 - \nu'(l))(1+\lambda) = \lambda\,\nu''(l)\,l$, where $\lambda$ is the multiplier on the surplus constraint. + +We bisect on $\lambda \geq 0$ to find the optimal $(l, g)$ for given $(\Delta, \theta)$. + +By the envelope theorem, $U'(\Delta) = -\lambda$. + +The following code implements this procedure, returning both $U(\Delta, \theta)$ and $U'(\Delta, \theta) = -\lambda$. + +```{code-cell} ipython3 +# Large negative value used to mark infeasible allocations +PENALTY = -1e12 +``` + +```{code-cell} ipython3 +@jit +def indirect_utility(Δ, θ, χ, ψ, σ): + """ + Compute U(Δ, θ) and U'(Δ, θ) by bisection on the multiplier λ. + """ + g_star = θ ** (1.0 / σ) + l_star = (1.0 / χ) ** (1.0 / ψ) + l_peak = (1.0 / ((1.0 + ψ) * χ)) ** (1.0 / ψ) + T_max = (1.0 - χ * l_peak**ψ) * l_peak + + def bisect_cond(bounds): + lo, hi = bounds + return (hi - lo) > 1e-4 + + def bisect_body(bounds): + lo, hi = bounds + mid = 0.5 * (lo + hi) + g = (θ / (1.0 + mid)) ** (1.0 / σ) + denom = jnp.maximum(χ * (1.0 + mid * (1.0 + ψ)), 1e-15) + l = jnp.maximum((1.0 + mid) / denom, 1e-15) ** (1.0 / ψ) + surplus = (1.0 - χ * l**ψ) * l - g + return ( + jnp.where(surplus <= Δ, mid, lo), + jnp.where(surplus > Δ, mid, hi), + ) + + # λ in [0, 1000] covers from unconstrained to peak of Laffer curve + lo, hi = lax.while_loop(bisect_cond, bisect_body, (0.0, 1000.0)) + λ_opt = 0.5 * (lo + hi) + g_opt = (θ / (1.0 + λ_opt)) ** (1.0 / σ) + denom = jnp.maximum(χ * (1.0 + λ_opt * (1.0 + ψ)), 1e-15) + l_opt = jnp.maximum((1.0 + λ_opt) / denom, 1e-15) ** (1.0 / ψ) + + U_constrained = ( + l_opt + - g_opt + - χ * l_opt ** (1.0 + ψ) / (1.0 + ψ) + + θ * u_gov(g_opt, σ) + ) + + U_unconstrained = ( + l_star + - χ * l_star ** (1.0 + ψ) / (1.0 + ψ) + - g_star + + θ * u_gov(g_star, σ) + ) + + unconstrained = Δ <= -g_star + infeasible = Δ >= 0.99 * T_max + + U_val = jnp.where(unconstrained, U_unconstrained, + jnp.where(infeasible, PENALTY, U_constrained)) + U_prime = jnp.where(unconstrained, 0.0, + jnp.where(infeasible, PENALTY, -λ_opt)) + return U_val, U_prime +``` + +Let's plot $U(\Delta, \theta)$ and $U'(\Delta, \theta)$ for a range of $\Delta$ values and three different $\theta$ values. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Indirect utility and surplus costs + name: fig-indirect-utility +--- +Δ_grid = jnp.linspace(-5.0, 3.0, 300) +θ_vals = [80.0, 130.0, 200.0] + +iu_vec = vmap(indirect_utility, in_axes=(0, None, None, None, None)) + +fig, axes = plt.subplots(1, 2, figsize=(14, 5)) + +for θ_val in θ_vals: + U_vals, U_primes = iu_vec(Δ_grid, θ_val, χ, ψ, σ) + + mask = U_vals > PENALTY + axes[0].plot(Δ_grid[mask], U_vals[mask], lw=2, + label=f'θ = {θ_val:.0f}') + axes[1].plot(Δ_grid[mask], U_primes[mask], lw=2, + label=f'θ = {θ_val:.0f}') + +axes[0].set_xlabel('primary surplus Δ') +axes[0].set_ylabel('U(Δ, θ)') +axes[0].legend() + +axes[1].set_xlabel('primary surplus Δ') +axes[1].set_ylabel("U'(Δ, θ)") +axes[1].legend() + +plt.tight_layout() +plt.show() +``` + +The left panel confirms that $U(\Delta, \theta)$ is *decreasing and concave* in $\Delta$: higher surpluses are costly because they require more distortionary taxation. + +The right panel shows the marginal cost of surpluses $U'(\Delta, \theta) < 0$, which becomes more negative as $\Delta$ approaches the peak of the Laffer curve. + +Higher $\theta$ shifts the curves because greater social value of government spending makes running a surplus even more costly. + +### Credibility of inflation targets + +We can now illustrate the credibility condition from the recursive formulation. + +For a given level of inherited debt $b$ and optimal continuation choices, the value of entering the period with real balances $\phi$ is approximately + +$$ +V^{md}(\phi;\, b,\, \theta) \;\approx\; U(\phi + D,\, \theta) \;+\; v(\phi), +$$ + +where $D = b - \beta b' - \text{seigniorage}$ collects the non-$\phi$ terms in the surplus $\Delta$. + +This is hump-shaped in $\phi$: low $\phi$ sacrifices money utility while high $\phi$ forces a large, costly surplus. + +Under fiscal dominance the government picks $\phi^{fd}$ to maximize this expression, so $V^{fd} = \max_\phi V^{md}(\phi)$. + +The cutoff cost $\xi^* = V^{fd} - V^{md}(\phi')$ is the temptation to deviate from the promised $\phi'$. + +```{code-cell} ipython3 +--- +tags: [hide-input] +mystnb: + figure: + caption: Credibility of inflation targets + name: fig-credibility-targets +--- +# φ grid from near zero up to just below φ* = κ/(2η_m) +phi_max = φ_star * 0.99 +phi_grid = jnp.linspace(0.05, phi_max, 500) + +# Surplus: Δ = φ + D where D = b - βb' - seigniorage +D = 1.0 +Delta_grid = phi_grid + D + +# Two θ values: baseline (high fiscal pressure) and low θ_L +θ_high = 150.0 +θ_low = 100.0 + +U_high, _ = iu_vec(Delta_grid, θ_high, χ, ψ, σ) +U_low, _ = iu_vec(Delta_grid, θ_low, χ, ψ, σ) +v_phi = v_money(phi_grid, κ, η_m) + +V_blue_raw = np.array(U_high + v_phi) # baseline θ (high) +V_red_raw = np.array(U_low + v_phi) # θ_L (low) +phi_np = np.array(phi_grid) + +mask_b = V_blue_raw > PENALTY +mask_r = V_red_raw > PENALTY + +# Shift curves so peaks are at comparable heights +V_blue = V_blue_raw - V_blue_raw[mask_b].min() +V_red = V_red_raw - V_red_raw[mask_r].min() +V_blue += V_red.max() - V_blue.max() # align peaks + +# Find peaks (φ^fd for each θ) +idx_peak_b = np.argmax(V_blue[mask_b]) +idx_peak_r = np.argmax(V_red[mask_r]) +phi_fd_b = phi_np[mask_b][idx_peak_b] +phi_fd_r = phi_np[mask_r][idx_peak_r] +V_peak_b = V_blue[mask_b][idx_peak_b] +V_peak_r = V_red[mask_r][idx_peak_r] + +# Promise point φ' +phi_prime = 0.5 * (phi_fd_r + phi_max) +phi_star_plot = phi_prime + 0.4 + +# Values at the promise +V_at_b = float(np.interp(phi_prime, phi_np[mask_b], V_blue[mask_b])) +V_at_r = float(np.interp(phi_prime, phi_np[mask_r], V_red[mask_r])) + +fig, ax = plt.subplots(figsize=(8, 6)) +ax.plot(phi_np[mask_b], V_blue[mask_b], lw=2.5) +ax.plot(phi_np[mask_r], V_red[mask_r], lw=2.5) + +# Vertical dashed lines +for xv in [phi_fd_b, phi_fd_r, phi_prime, phi_star_plot]: + ax.axvline(xv, ls='--', color='gray', alpha=0.35, lw=0.7) + +# Horizontal dashed lines at peak levels +ax.hlines(V_peak_b, phi_fd_b, phi_prime, + ls='--', color='C0', alpha=0.35, lw=0.7) +ax.hlines(V_peak_r, phi_fd_r, phi_prime, + ls='--', color='C1', alpha=0.35, lw=0.7) + +# Horizontal dashed lines at promise levels +ax.hlines(V_at_b, phi_fd_b, phi_prime, + ls='--', color='C0', alpha=0.25, lw=0.7) +ax.hlines(V_at_r, phi_fd_r, phi_prime, + ls='--', color='C1', alpha=0.25, lw=0.7) + +# ξ* double arrows +x_xi_b = phi_fd_b +ax.annotate('', xy=(x_xi_b, V_peak_b), + xytext=(x_xi_b, V_at_b), + arrowprops=dict(arrowstyle='<->', color='C0', lw=1.5)) +ax.text(x_xi_b - 0.25, 0.5 * (V_peak_b + V_at_b), + r'$\xi^*$', fontsize=14, color='C0', + ha='right', va='center') + +# orange ξ_hat* +x_xi_r = phi_fd_r +ax.annotate('', xy=(x_xi_r, V_peak_r), + xytext=(x_xi_r, V_at_r), + arrowprops=dict(arrowstyle='<->', color='C1', lw=1.5)) +ax.text(x_xi_r + 0.25, 0.5 * (V_peak_r + V_at_r), + r'$\hat{\xi}^*$', fontsize=14, color='C1', + ha='left', va='center') + +# Curve labels +x_lab_r = phi_prime + 0.9 +x_lab_b = phi_np[mask_b][-1] * 0.92 +ax.text(x_lab_r, + float(np.interp(x_lab_r, phi_np[mask_r], V_red[mask_r])), + r'$V_{md}(\phi,\,\theta_L)$', fontsize=13, color='C1', + va='bottom', ha='left') +ax.text(x_lab_b, + float(np.interp(x_lab_b, phi_np[mask_b], V_blue[mask_b])) + 0.25, + r'$V_{md}(\phi)$', fontsize=13, color='C0', + va='top', ha='left') + +# x-axis labels +trans = ax.get_xaxis_transform() +for xv, lab in [(phi_fd_b, r'$\phi_{fd}$'), + (phi_fd_r, r'$\hat\phi_{fd}$'), + (phi_prime, r"$\phi'$"), + (phi_star_plot, r'$\phi^*$')]: + ax.text(xv, -0.06, lab, transform=trans, + fontsize=12, ha='center', va='top', clip_on=False) + +ax.set_ylabel(r'$V$', fontsize=14) +ax.set_xlabel(r'$\phi$', fontsize=14) +ax.tick_params(labelbottom=False, labelleft=False) +ax.spines['top'].set_visible(False) +ax.spines['right'].set_visible(False) +plt.tight_layout() +plt.show() +``` + +The blue curve plots $V^{md}(\phi) = U(\phi + D,\, \theta) + v(\phi)$ for baseline fiscal pressure $\theta$, and the orange curve for a lower value $\theta_L$. + +Each curve peaks at the corresponding $\phi^{fd}$, the level of real balances chosen under fiscal dominance. + +The gap $\xi^*$ between the peak (the fiscal-dominance value $V^{fd}$) and the value at the promised $\phi'$ is the minimum institutional cost needed to sustain the inflation target. + +With lower fiscal pressure (orange), the gap shrinks: the target becomes easier to sustain. + +## The full model with Gumbel shocks + +Following the paper's computational approach, the cost $\xi$ is decomposed as + +$$ +\xi_t = \xi_{1,t} + \xi^{fd}_t - \xi^{md}_t, +$$ + +where $\xi_{1,t}$ is a persistent component and $\xi^{fd}_t$, $\xi^{md}_t$ are i.i.d. **Gumbel** shocks with mean zero. + +The persistent component $\xi_{1,t}$ follows a Markov chain on $[0, \bar\xi]$ with the following transition probabilities: + +$$ +\Pr(\xi_1' = 0 \mid \xi_1) = \alpha_l, \qquad +\Pr(\xi_1' = \xi_1 \mid \xi_1) = \alpha, \qquad +\Pr(\xi_1' \sim \text{Uniform}[0, \bar\xi]) = 1 - \alpha_l - \alpha. +$$ + +The parameter $\alpha$ controls persistence, $\alpha_l$ is the probability of resetting to zero (making deviation costless), and $\bar\xi$ is a large upper bound. + +The Gumbel specification delivers a **logit** formula for the probability of monetary dominance: + +$$ +\bar\eta(b', \phi', s_1) = \frac{1}{1 + \exp\!\left(-\lambda + \left[V^{md}(b', \phi', s_1) - V^{fd}(b', s_1) + \xi_1\right]\right)} +$$ + +and a smooth **log-sum-exp** formula for the expected continuation value: + +$$ +\Omega(b', \phi', s_1) = \frac{1}{\lambda} +\log\!\left[ + \exp(\lambda\, V^{md}) + \exp\!\left(\lambda\left(V^{fd} - \xi_1\right)\right) +\right]. +$$ + +This makes the value function differentiable and the numerical solution well behaved. + +We discretize $\xi_1$ via the paper's Markov chain on $[0, \bar\xi]$, then build grids for total liabilities $B$, debt $b'$, and promised real balances $\phi'$. + +```{code-cell} ipython3 +def build_ξ_grid(n_ξ, α_l, α, ξ_bar): + """ + Build the paper's persistent credibility-state process on [0, ξ_bar]. + """ + grid = np.linspace(0.0, ξ_bar, n_ξ) + P = np.full((n_ξ, n_ξ), (1.0 - α_l - α) / n_ξ) + for i in range(n_ξ): + P[i, 0] += α_l + P[i, i] += α + P /= P.sum(axis=1, keepdims=True) + return jnp.asarray(grid), jnp.asarray(P) +``` + +## Computational algorithm + +Because the budget constraint depends on $b$ and $\phi$ only through their sum, the problem can be written in terms of a single endogenous state variable $B = b + \phi$ (total real government liabilities), as described in Appendix C of {cite:t}`DovisAccountingMFrevised`. + +We define a **reduced continuation value** $W(B, s_1)$ that strips the current-period money utility $v(\phi)$ out of the recursive problem. + +The full value of entering a period with state $(b, \phi, s_1)$ is recovered as $v(\phi) + W(b + \phi, s_1)$ under monetary dominance, and the fiscal-dominance value is $V^{fd}(b', s_1') = \max_{\phi}\left[W(b' + \phi, s_1') + v(\phi)\right]$. + +The function $W(B, s_1)$ satisfies + +$$ +W(B, s_1) = \max_{\Delta, b', \phi'} U(\Delta, \theta) + +\hat\beta \sum_{s_1'} \Pr(s_1'|s_1)\, \Omega(b', \phi', s_1') +$$ + +where maximization is subject to + +$$ +\Delta = B - \beta b' - \beta \sum_{s_1'} \Pr(s_1'|s_1) + \left[\bar\eta\, H(\phi') + (1 - \bar\eta)\, H(\phi^{fd}(b', s_1'))\right], +$$ + +where $\phi^{fd}(b, s)$ is the solution to the static FOC +$-U'(\Delta, \theta) = v'(\phi^{fd})$ under fiscal dominance. + +The algorithm is: + +1. *Initialize* with a guess $W_0(B, s_1)$ +2. For iteration $n$: + - Compute $\phi^{fd}$ and $\bar\eta$ from the logit formula and the fiscal-dominance FOC + - Compute the Bellman update $W_{n+1}$ from the value function equation above +3. *Iterate* until $\|W_{n+1} - W_n\| < \varepsilon$ + +The implementation fixes $\theta$ inside each model instance, uses multiple $\xi_1$ states, and adopts the quadratic money-utility specification. + +Appendix C of the paper defines fiscal dominance recursively by + +$$ +V^{fd}(b', \xi') = \max_{\phi} \left[ W(b' + \phi, \xi') + v(\phi) \right]. +$$ + +The function `fd_from_continuation` recovers $\phi^{fd}$ by searching over a $\phi$ grid and applying quadratic refinement around the grid maximum. + +Linear interpolation via `jnp.interp` evaluates $W(B, \xi)$ at off-grid points. + +The expectation over $\xi'$ is a matrix multiply against $P_\xi$ via `jnp.einsum`, the search over $(b', \phi')$ is a vectorized `argmax`, and the VFI loop uses `lax.while_loop`. + +All parameters, grids, the transition matrix, and a precomputed table of $U(\Delta)$ values are stored in a `DovisModel` named tuple. + +```{code-cell} ipython3 +class DovisModel(NamedTuple): + β: float + β_hat: float + χ: float + ψ: float + σ: float + κ: float + η_m: float + θ: float + λ: float + φ_star: float + B_grid: jnp.ndarray + b_prime_grid: jnp.ndarray + φ_grid: jnp.ndarray + ξ_grid: jnp.ndarray + P_ξ: jnp.ndarray + Δ_fine: jnp.ndarray + U_fine: jnp.ndarray + H_φ: jnp.ndarray + + +def create_model( + *, + β=0.95, + β_hat=0.92, + χ=0.015, + ψ=1.0, + σ=2.0, + κ=0.70, + η_m=0.06, + θ=130.0, + λ=20.0, + α_l=0.005, + α=0.99, + ξ_bar=0.5, + n_B=40, + n_φ=40, + n_ξ=9, + B_max=20.0, +): + """ + Create the reduced-form model. + + θ is fixed inside each model instance; fundamental disinflation + is studied by comparing solutions across θ values. + """ + + # Satiation point for real balances + φ_star = κ / (2.0 * η_m) + + # Stay below satiation + φ_lo, φ_hi = 0.5, 0.99 * φ_star + + B_grid = jnp.linspace(0.1, B_max, n_B) + + # Max debt consistent with B_max and φ_hi + b_bar = max(B_max - float(φ_hi), 0.1) + b_prime_grid = jnp.linspace(0.1, b_bar, n_B) + φ_grid = jnp.linspace(φ_lo, φ_hi, n_φ) + + ξ_grid, P_ξ = build_ξ_grid(n_ξ, α_l, α, ξ_bar) + + # Wide range covering the full Laffer curve + Δ_fine = jnp.linspace(-50.0, 20.0, 2500) + U_fine, _ = vmap(lambda d: indirect_utility(d, θ, χ, ψ, σ))(Δ_fine) + H_φ = H_func(φ_grid, κ, η_m) + + return DovisModel( + β=β, β_hat=β_hat, χ=χ, ψ=ψ, σ=σ, κ=κ, η_m=η_m, + θ=θ, λ=λ, φ_star=φ_star, + B_grid=B_grid, b_prime_grid=b_prime_grid, φ_grid=φ_grid, + ξ_grid=ξ_grid, P_ξ=P_ξ, Δ_fine=Δ_fine, U_fine=U_fine, + H_φ=H_φ, + ) +``` + +The code below defines the Bellman operator `T(W, model)` from three building blocks. + +`fd_from_continuation` evaluates $W(b'+\phi, \xi') + v(\phi)$ for every $(b', \phi, \xi')$ triple, finds the $\phi$ that maximizes it (with quadratic refinement), and returns $V^{fd}$, $\phi^{fd}$, $H^{fd}$, and the full $V^{md}$ array. + +`compute_continuation` calls `fd_from_continuation`, then computes the logit probability $\bar\eta$, the expected continuation $\Omega$ (via log-sum-exp), and the money-demand term $J$ (via `jnp.einsum` against $P_\xi$). + +`bellman_rhs` uses these to evaluate $\Delta = B - \beta b' - J$ and looks up $U(\Delta)$ for every candidate $(b', \phi')$, returning the full RHS array. + +`T` takes the `max` over choices. + +```{code-cell} ipython3 +def interp_B_values(B_points, B_grid, values): + """ + Linearly interpolate values(B, ξ) over B for each ξ state. + """ + flat = jnp.ravel(B_points) + interp_cols = vmap( + lambda col: jnp.interp(flat, B_grid, col), + in_axes=1, + out_axes=0, + )(values) + return jnp.moveaxis( + interp_cols.reshape(values.shape[1], *B_points.shape), + 0, + -1, + ) + + +def fd_from_continuation(W, B_grid, b_prime_grid, φ_grid, κ, η_m): + """ + Recover V^fd and φ^fd from max_φ [W(b' + φ, ξ) + v(φ)]. + + Uses quadratic refinement around the grid maximum for smoother policies. + """ + B_choices = b_prime_grid[:, None] + φ_grid[None, :] + W_choices = interp_B_values(B_choices, B_grid, W) + V_choices = W_choices + v_money(φ_grid, κ, η_m)[None, :, None] + + best_idx = jnp.argmax(V_choices, axis=1) + idx = best_idx[:, None, :] + n_φ = φ_grid.shape[0] + + idx_lo = jnp.clip(best_idx - 1, 0, n_φ - 1)[:, None, :] + idx_hi = jnp.clip(best_idx + 1, 0, n_φ - 1)[:, None, :] + + v_lo = jnp.take_along_axis(V_choices, idx_lo, axis=1).squeeze(1) + v_0 = jnp.take_along_axis(V_choices, idx, axis=1).squeeze(1) + v_hi = jnp.take_along_axis(V_choices, idx_hi, axis=1).squeeze(1) + + denom = v_lo - 2.0 * v_0 + v_hi + offset = jnp.where( + denom < -1e-20, + jnp.clip(0.5 * (v_lo - v_hi) / denom, -0.5, 0.5), + 0.0, + ) + + dφ = φ_grid[1] - φ_grid[0] + φ_fd_raw = jnp.take_along_axis( + jnp.broadcast_to(φ_grid[None, :, None], V_choices.shape), + idx, + axis=1, + ).squeeze(1) + φ_fd = jnp.clip(φ_fd_raw + offset * dφ, φ_grid[0], φ_grid[-1]) + + V_fd = v_0 - (v_lo - v_hi) ** 2 / jnp.where( + denom < -1e-20, 8.0 * denom, -8.0, + ) + V_fd = jnp.where(denom < -1e-20, V_fd, v_0) + H_fd = H_func(φ_fd, κ, η_m) + + return V_choices, V_fd, φ_fd, H_fd + + +def _continuation_on_grid(W, model, bp_grid, φ_grid, H_φ): + """Compute continuation objects on a (b', φ') grid.""" + V_md, V_fd, φ_fd, H_fd = fd_from_continuation( + W, model.B_grid, bp_grid, φ_grid, + model.κ, model.η_m, + ) + + η_bar = jax.nn.sigmoid( + model.λ * (V_md - V_fd[:, None, :] + model.ξ_grid[None, None, :]) + ) + + H_comb = ( + η_bar * H_φ[None, :, None] + + (1.0 - η_bar) * H_fd[:, None, :] + ) + J = model.β * jnp.einsum("abj,kj->abk", H_comb, model.P_ξ) + + Ω = jnp.logaddexp( + model.λ * V_md, + model.λ * (V_fd[:, None, :] - model.ξ_grid[None, None, :]), + ) / model.λ + EV = jnp.einsum("abj,kj->abk", Ω, model.P_ξ) + + return EV, J, V_fd, H_fd, V_md, η_bar, φ_fd + + +def compute_continuation(W, model): + """Compute continuation objects on the model's coarse grid.""" + return _continuation_on_grid( + W, model, model.b_prime_grid, model.φ_grid, model.H_φ + ) + + +def bellman_rhs(W, model): + EV, J, _, _, _, _, _ = compute_continuation(W, model) + + Δ = ( + model.B_grid[None, None, :, None] + - model.β * model.b_prime_grid[:, None, None, None] + - J[:, :, None, :] + ) + + U_all = jnp.interp(Δ.ravel(), model.Δ_fine, model.U_fine).reshape(Δ.shape) + in_range = (Δ > model.Δ_fine[0]) & (Δ < model.Δ_fine[-1]) + U_all = jnp.where(in_range, U_all, PENALTY) + + val = U_all + model.β_hat * EV[:, :, None, :] + n_bp = model.b_prime_grid.shape[0] + n_φ = model.φ_grid.shape[0] + return val.reshape(n_bp * n_φ, model.B_grid.shape[0], model.ξ_grid.shape[0]) + + +def T(W, model): + return jnp.max(bellman_rhs(W, model), axis=0) +``` + +### Solving the model + +`solve_model` runs value function iteration using `lax.while_loop`, applying a **dampened** Bellman operator $W_{n+1} = \omega\, T(W_n) + (1 - \omega)\, W_n$ with $\omega = 0.01$, terminating when the sup-norm update error falls below `tol` or after `max_iter` iterations. + +`extract_policies` then re-evaluates the Bellman RHS on a choice grid that is 3$\times$ denser in both $b'$ and $\phi'$. + +```{code-cell} ipython3 +def solve_model(model, tol=1e-4, max_iter=10_000, damp=0.01, + log_period=10, verbose=True): + """Solve by dampened VFI: W_{n+1} = damp * T(W_n) + (1 - damp) * W_n. + + Returns (W, error_log). + """ + U0, _ = indirect_utility(0.0, model.θ, model.χ, model.ψ, model.σ) + W_init = jnp.full( + (len(model.B_grid), len(model.ξ_grid)), + U0 / (1.0 - model.β_hat), + ) + log_size = max_iter // log_period + 1 + + @jit + def run_vfi(W0): + err_log = jnp.full(log_size, jnp.nan) + + def cond(state): + W, err, i, _ = state + return (err > tol) & (i < max_iter) + + def body(state): + W, _, i, err_log = state + W_new = damp * T(W, model) + (1.0 - damp) * W + err = jnp.max(jnp.abs(W_new - W)) + log_idx = i // log_period + err_log = err_log.at[log_idx].set(err) + return W_new, err, i + 1, err_log + + W, err, n, err_log = lax.while_loop( + cond, body, (W0, jnp.inf, 0, err_log) + ) + return W, err, n, err_log + + W, err, n_iters, err_log = run_vfi(W_init) + W.block_until_ready() + + if verbose: + bellman_error = float(jnp.max(jnp.abs(T(W, model) - W))) + if bellman_error < tol: + print(f"Converged in {int(n_iters)} iterations " + f"(Bellman error: {bellman_error:0.2e})") + else: + print(f"Did not converge (Bellman error: {bellman_error:0.2e})") + + return W, np.asarray(err_log) + + +@partial(jit, static_argnums=(2, 3)) +def extract_policies(W, model, refine_b=3, refine_φ=3): + """ + Extract policy functions on a dense choice grid from the converged W. + """ + n_B = len(model.B_grid) + n_φ_coarse = len(model.φ_grid) + n_ξ = len(model.ξ_grid) + + n_bp = n_B * refine_b + n_φ = n_φ_coarse * refine_φ + bp_grid = jnp.linspace( + model.b_prime_grid[0], model.b_prime_grid[-1], n_bp) + φ_grid = jnp.linspace(model.φ_grid[0], model.φ_grid[-1], n_φ) + + H_φ_dense = H_func(φ_grid, model.κ, model.η_m) + EV, J, V_fd, H_fd, V_md, η_bar, φ_fd = _continuation_on_grid( + W, model, bp_grid, φ_grid, H_φ_dense, + ) + + Δ = ( + model.B_grid[None, None, :, None] + - model.β * bp_grid[:, None, None, None] + - J[:, :, None, :] + ) + + U_all = jnp.interp( + Δ.ravel(), model.Δ_fine, model.U_fine).reshape(Δ.shape) + in_range = (Δ > model.Δ_fine[0]) & (Δ < model.Δ_fine[-1]) + U_all = jnp.where(in_range, U_all, PENALTY) + + val = U_all + model.β_hat * EV[:, :, None, :] + val_flat = val.reshape(n_bp * n_φ, n_B, n_ξ) + best_idx = jnp.argmax(val_flat, axis=0) + + pol_b = bp_grid[best_idx // n_φ] + pol_φ = φ_grid[best_idx % n_φ] + + Δ_flat = Δ.reshape(n_bp * n_φ, n_B, n_ξ) + pol_Δ = jnp.take_along_axis( + Δ_flat, best_idx[None], axis=0).squeeze(0) + + η_current = jnp.einsum("abj,kj->abk", η_bar, model.P_ξ) + η_flat = η_current.reshape(n_bp * n_φ, n_ξ) + pol_η = jnp.take_along_axis(η_flat, best_idx, axis=0) + + J_flat = J.reshape(n_bp * n_φ, n_ξ) + pol_J = jnp.take_along_axis(J_flat, best_idx, axis=0) + + φ_fd_current = jnp.einsum("aj,kj->ak", φ_fd, model.P_ξ) + φ_fd_flat = jnp.repeat( + φ_fd_current[:, None, :], n_φ, axis=1 + ).reshape(n_bp * n_φ, n_ξ) + pol_φ_fd = jnp.take_along_axis(φ_fd_flat, best_idx, axis=0) + + return pol_b, pol_φ, pol_Δ, pol_η, pol_J, pol_φ_fd +``` + +`solve_policy_cache` solves the model at multiple $\theta$ values and stores the resulting value and policy arrays. + +`build_current_fd_cache` computes the fiscal-dominance value and $\phi^{fd}$ as functions of inherited debt. + +`build_sim_cache` collects all arrays into one dictionary; the simulation functions then use `np.interp` to evaluate policies at state values. + +```{code-cell} ipython3 +def solve_policy_cache(θ_nodes, base_model, **solve_kw): + """Solve for several θ values; returns (cache_dict, err_log).""" + out = {k: [] for k in ["W", "b", "φ", "Δ", "η", "J", "φ_fd"]} + θ_nodes = np.asarray(θ_nodes, dtype=float) + first_err_log = None + + # Batch-compute U_fine for all θ values at once + U_fine_all = vmap( + lambda θ_val: vmap( + lambda d: indirect_utility( + d, θ_val, base_model.χ, base_model.ψ, base_model.σ) + )(base_model.Δ_fine)[0] + )(jnp.asarray(θ_nodes)) + + for i, θ_val in enumerate(θ_nodes): + m = base_model._replace( + θ=float(θ_val), + U_fine=U_fine_all[i], + ) + W, err_log = solve_model(m, verbose=False, **solve_kw) + if first_err_log is None: + first_err_log = err_log + pol_b, pol_φ, pol_Δ, pol_η, pol_J, pol_φ_fd = extract_policies(W, m) + + for name, arr in zip( + ["W", "b", "φ", "Δ", "η", "J", "φ_fd"], + [W, pol_b, pol_φ, pol_Δ, pol_η, pol_J, pol_φ_fd], + ): + out[name].append(np.asarray(arr)) + + return { + "θ_nodes": θ_nodes, + "ξ_grid": np.asarray(base_model.ξ_grid), + "B_grid": np.asarray(base_model.B_grid), + "φ_grid": np.asarray(base_model.φ_grid), + **{k: np.stack(v) for k, v in out.items()}, + }, first_err_log + + +def build_current_fd_cache(cache, base_model): + """Compute V^fd and φ^fd as functions of inherited debt.""" + B_g = jnp.array(cache["B_grid"]) + φ_grid = jnp.array(cache["φ_grid"]) + b_bar = jnp.maximum(B_g[-1] - φ_grid[-1], B_g[0]) + b_g = jnp.linspace(B_g[0], b_bar, B_g.shape[0]) + out_V, out_φ = [], [] + + for θi in range(len(cache["θ_nodes"])): + W_θ = jnp.array(cache["W"][θi]) + _, V_fd_cur, φ_fd_cur, _ = fd_from_continuation( + W_θ, B_g, b_g, φ_grid, base_model.κ, base_model.η_m, + ) + out_V.append(np.asarray(V_fd_cur)) + out_φ.append(np.asarray(φ_fd_cur)) + + return { + "b_grid": np.asarray(b_g), + "V_fd": np.stack(out_V), + "φ_fd": np.stack(out_φ), + } + + +def build_sim_cache(cache, current_fd_cache): + """Collect arrays needed for IRF simulation.""" + return { + "B_grid": np.asarray(cache["B_grid"]), + "b_grid": np.asarray(current_fd_cache["b_grid"]), + "W": cache["W"], + "b": cache["b"], + "φ": cache["φ"], + "Δ": cache["Δ"], + "V_fd": current_fd_cache["V_fd"], + "φ_fd": current_fd_cache["φ_fd"], + } +``` + +We solve the model for both $\theta$ values ($\theta = 130$ baseline and $\theta_H = 200$ for the fundamental disinflation) in a single pass, which triggers JIT compilation on the first call. + +```{code-cell} ipython3 +model = create_model() +θ_high = 200.0 +θ_all = np.array([model.θ, θ_high]) + +cache, err_log = solve_policy_cache(θ_all, model) +current_fd_cache = build_current_fd_cache(cache, model) +sim_cache = build_sim_cache(cache, current_fd_cache) + +# Extract baseline (θ = 130) results for plotting +W = jnp.array(cache["W"][0]) +pol_b = jnp.array(cache["b"][0]) +pol_φ = jnp.array(cache["φ"][0]) +pol_Δ = jnp.array(cache["Δ"][0]) +B_grid = model.B_grid +ξ_grid = model.ξ_grid +n_ξ = len(ξ_grid) +n_ξ_coarse = n_ξ +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: VFI convergence + name: fig-vfi-convergence +--- +valid = ~np.isnan(err_log) +iters = np.arange(len(err_log))[valid] * 10 +fig, ax = plt.subplots(figsize=(8, 4)) +ax.semilogy(iters, err_log[valid], lw=2) +ax.set_xlabel('iteration') +ax.set_ylabel('sup-norm error') +plt.tight_layout() +plt.show() +``` + +The simulation functions below use `np.interp` to evaluate policies at arbitrary state values, compute regime probabilities from the logit formula, and recover equilibrium allocations from the surplus. + +```{code-cell} ipython3 +def _interp(grid, values, x): + """Linear interpolation with clipping.""" + x = float(np.clip(x, grid[0], grid[-1])) + return float(np.interp(x, grid, values)) + + +def interp_current_fd(sim_cache, θi, ξi, b): + """Interpolate current-state FD value and policy at inherited debt b.""" + b_g = sim_cache["b_grid"] + φ_fd = _interp(b_g, sim_cache["φ_fd"][θi, :, ξi], b) + V_fd = _interp(b_g, sim_cache["V_fd"][θi, :, ξi], b) + return φ_fd, V_fd + + +def current_eta_prob(b, φ_promise, θi, ξi, cache, sim_cache, p): + """Probability that today's inherited target is honored.""" + B_g = sim_cache["B_grid"] + ξ_val = cache["ξ_grid"][ξi] + B_md = float(np.clip(b + φ_promise, B_g[0], B_g[-1])) + V_md = _interp(B_g, sim_cache["W"][θi, :, ξi], B_md) + V_md += float(v_money(φ_promise, p.κ, p.η_m)) + _, V_fd = interp_current_fd(sim_cache, θi, ξi, b) + z = float(np.clip(p.λ * (V_md - V_fd + ξ_val), -500.0, 500.0)) + η_prob = 1.0 / (1.0 + np.exp(-z)) + return η_prob, V_md, V_fd + + +def initialize_fd_state(b0, θi, ξi, cache, sim_cache): + """Choose a promise consistent with a selected FD initial debt state.""" + B_g = sim_cache["B_grid"] + φ_fd0, _ = interp_current_fd(sim_cache, θi, ξi, b0) + B0 = float(np.clip(b0 + φ_fd0, B_g[0], B_g[-1])) + φ_promise0 = _interp(B_g, sim_cache["φ"][θi, :, ξi], B0) + return float(b0), φ_promise0 + + +def static_allocation(Δ, θ, χ, ψ, σ): + """Recover equilibrium labor l and government spending g from surplus Δ.""" + g_star = θ ** (1.0 / σ) + l_star = (1.0 / χ) ** (1.0 / ψ) + l_peak = (1.0 / ((1.0 + ψ) * χ)) ** (1.0 / ψ) + T_max = (1.0 - χ * l_peak**ψ) * l_peak + + if Δ <= -g_star: + return l_star, g_star, 0.0 + + if Δ >= 0.999 * T_max: + return l_peak, 1e-8, 0.0 + + # Bisect on λ in [0, 1000] with convergence + iteration guard + lo, hi = 0.0, 1000.0 + for _ in range(200): + if (hi - lo) <= 1e-10: + break + mid = 0.5 * (lo + hi) + g_val = (θ / (1.0 + mid)) ** (1.0 / σ) + denom = max(χ * (1.0 + mid * (1.0 + ψ)), 1e-15) + l_val = max((1.0 + mid) / denom, 1e-15) ** (1.0 / ψ) + T_val = (1.0 - χ * l_val**ψ) * l_val + if T_val - g_val <= Δ: + lo = mid + else: + hi = mid + + lam = 0.5 * (lo + hi) + g_opt = (θ / (1.0 + lam)) ** (1.0 / σ) + denom = max(χ * (1.0 + lam * (1.0 + ψ)), 1e-15) + l_opt = max((1.0 + lam) / denom, 1e-15) ** (1.0 / ψ) + + return l_opt, g_opt, lam +``` + +The next two functions below simulate impulse responses for the fundamental and institutional disinflation experiments, stepping forward in time using the cached policy functions. + +```{code-cell} ipython3 +def simulate_fundamental_irf( + b0, + φ_promise0, + θ_idx_path, + ξ_idx, + cache, + sim_cache, + p, + t_shock, +): + """Simulate a fundamental disinflation (FD throughout).""" + T = len(θ_idx_path) + B_g = cache["B_grid"] + θ_nodes = cache["θ_nodes"] + ξi = int(ξ_idx) + + out = { + k: np.zeros(T) + for k in [ + "b", + "φ", + "φ_prime", + "Δ", + "π", + "η", + "η_prob", + "regime", + "debt_gdp", + "surplus_gdp", + ] + } + + θi_pre = int(θ_idx_path[0]) + η_pre, _, _ = current_eta_prob( + b0, + φ_promise0, + θi_pre, + ξi, + cache, + sim_cache, + p, + ) + φ_pre, _ = interp_current_fd(sim_cache, θi_pre, ξi, b0) + B_pre = float(np.clip(b0 + φ_pre, B_g[0], B_g[-1])) + b_next_pre = _interp(B_g, sim_cache["b"][θi_pre, :, ξi], B_pre) + Δ_pre = _interp(B_g, sim_cache["Δ"][θi_pre, :, ξi], B_pre) + + J_prev = B_pre - Δ_pre - p.β * b_next_pre + π_pre = (J_prev / max(φ_pre, 1e-12) - 1.0) * 100.0 + + l_pre, _, _ = static_allocation( + Δ_pre, + float(θ_nodes[θi_pre]), + p.χ, + p.ψ, + p.σ, + ) + debt_pre = 100.0 * b0 / max(l_pre, 1e-12) + surplus_pre = 100.0 * Δ_pre / max(l_pre, 1e-12) + + out["b"][:t_shock] = b0 + out["φ"][:t_shock] = φ_pre + out["φ_prime"][:t_shock] = φ_promise0 + out["Δ"][:t_shock] = Δ_pre + out["π"][:t_shock] = π_pre + out["η"][:t_shock] = η_pre + out["η_prob"][:t_shock] = η_pre + out["regime"][:t_shock] = 0.0 + out["debt_gdp"][:t_shock] = debt_pre + out["surplus_gdp"][:t_shock] = surplus_pre + + b = float(b0) + φ_promise = float(φ_promise0) + + for t in range(t_shock, T): + θi = int(θ_idx_path[t]) + η_prob, _, _ = current_eta_prob( + b, + φ_promise, + θi, + ξi, + cache, + sim_cache, + p, + ) + + φ_t, _ = interp_current_fd(sim_cache, θi, ξi, b) + B = float(np.clip(b + φ_t, B_g[0], B_g[-1])) + π_t = (J_prev / max(φ_t, 1e-12) - 1.0) * 100.0 + + b_prime = _interp(B_g, sim_cache["b"][θi, :, ξi], B) + φ_prime = _interp(B_g, sim_cache["φ"][θi, :, ξi], B) + Δ_t = _interp(B_g, sim_cache["Δ"][θi, :, ξi], B) + + l_t, _, _ = static_allocation( + Δ_t, + float(θ_nodes[θi]), + p.χ, + p.ψ, + p.σ, + ) + debt_gdp_t = 100.0 * b / max(l_t, 1e-12) + surplus_gdp_t = 100.0 * Δ_t / max(l_t, 1e-12) + + out["b"][t] = b + out["φ"][t] = φ_t + out["φ_prime"][t] = φ_prime + out["Δ"][t] = Δ_t + out["π"][t] = π_t + out["η"][t] = η_prob + out["η_prob"][t] = η_prob + out["regime"][t] = 0.0 + out["debt_gdp"][t] = debt_gdp_t + out["surplus_gdp"][t] = surplus_gdp_t + + J_prev = B - Δ_t - p.β * b_prime + b = float(b_prime) + φ_promise = float(φ_prime) + + return out + + +def simulate_institutional_irf( + b0, + φ_promise0, + θ_idx_path, + ξ_idx_path, + cache, + sim_cache, + p, + t_shock, +): + """Simulate an institutional disinflation (endogenous regime switching).""" + T = len(ξ_idx_path) + B_g = cache["B_grid"] + θ_nodes = cache["θ_nodes"] + + out = { + k: np.zeros(T) + for k in [ + "b", + "φ", + "φ_prime", + "Δ", + "π", + "η", + "η_prob", + "regime", + "debt_gdp", + "surplus_gdp", + ] + } + + θi_pre = int(θ_idx_path[0]) + ξi_pre = int(ξ_idx_path[0]) + η_pre, _, _ = current_eta_prob( + b0, + φ_promise0, + θi_pre, + ξi_pre, + cache, + sim_cache, + p, + ) + φ_pre, _ = interp_current_fd(sim_cache, θi_pre, ξi_pre, b0) + B_pre = float(np.clip(b0 + φ_pre, B_g[0], B_g[-1])) + b_next_pre = _interp(B_g, sim_cache["b"][θi_pre, :, ξi_pre], B_pre) + Δ_pre = _interp(B_g, sim_cache["Δ"][θi_pre, :, ξi_pre], B_pre) + + J_prev = B_pre - Δ_pre - p.β * b_next_pre + π_pre = (J_prev / max(φ_pre, 1e-12) - 1.0) * 100.0 + + l_pre, _, _ = static_allocation( + Δ_pre, + float(θ_nodes[θi_pre]), + p.χ, + p.ψ, + p.σ, + ) + debt_pre = 100.0 * b0 / max(l_pre, 1e-12) + surplus_pre = 100.0 * Δ_pre / max(l_pre, 1e-12) + + out["b"][:t_shock] = b0 + out["φ"][:t_shock] = φ_pre + out["φ_prime"][:t_shock] = φ_promise0 + out["Δ"][:t_shock] = Δ_pre + out["π"][:t_shock] = π_pre + out["η"][:t_shock] = η_pre + out["η_prob"][:t_shock] = η_pre + out["regime"][:t_shock] = 0.0 + out["debt_gdp"][:t_shock] = debt_pre + out["surplus_gdp"][:t_shock] = surplus_pre + + b = float(b0) + φ_promise = float(φ_promise0) + + for t in range(t_shock, T): + θi = int(θ_idx_path[t]) + ξi = int(ξ_idx_path[t]) + + η_t, _, _ = current_eta_prob( + b, + φ_promise, + θi, + ξi, + cache, + sim_cache, + p, + ) + regime_t = float(η_t >= 0.5) + φ_fd_t, _ = interp_current_fd(sim_cache, θi, ξi, b) + φ_t = float(φ_promise if regime_t else φ_fd_t) + B = float(np.clip(b + φ_t, B_g[0], B_g[-1])) + π_t = (J_prev / max(φ_t, 1e-12) - 1.0) * 100.0 + + b_prime = _interp(B_g, sim_cache["b"][θi, :, ξi], B) + φ_prime = _interp(B_g, sim_cache["φ"][θi, :, ξi], B) + Δ_t = _interp(B_g, sim_cache["Δ"][θi, :, ξi], B) + + l_t, _, _ = static_allocation( + Δ_t, + float(θ_nodes[θi]), + p.χ, + p.ψ, + p.σ, + ) + debt_gdp_t = 100.0 * b / max(l_t, 1e-12) + surplus_gdp_t = 100.0 * Δ_t / max(l_t, 1e-12) + + out["b"][t] = b + out["φ"][t] = φ_t + out["φ_prime"][t] = φ_prime + out["Δ"][t] = Δ_t + out["π"][t] = π_t + out["η"][t] = η_t + out["η_prob"][t] = η_t + out["regime"][t] = regime_t + out["debt_gdp"][t] = debt_gdp_t + out["surplus_gdp"][t] = surplus_gdp_t + + J_prev = B - Δ_t - p.β * b_prime + b = float(b_prime) + φ_promise = float(φ_prime) + + return out +``` + +## Two types of disinflation + +A central result of the model is that inflation can decline for two distinct reasons, each with different implications for the dynamics of public debt. + +Following the paper's terminology, a reduction in the marginal value of government spending $\theta$ is called a **fundamental disinflation**, while an increase in the (expected) cost of deviating from the promised inflation $\xi$ is called an **institutional disinflation**. + +### Fundamental disinflation ($\theta$ falls, $\xi$ fixed) + +Consider a path in which the realization of $\xi_t$ is low enough so that it is always optimal to be in the fiscal dominant regime. + +Along this path, the value of real money balances (and inflation) is determined by the static condition $-U'(\Delta, \theta) = v'(\phi^{fd})$ and is therefore closely tied to fiscal considerations, as in a Markov equilibrium. + +When $\theta$ falls from $\theta_H$ to $\theta_L$, the reduction in $\theta$ shifts the policy function $\phi^{fd}(b, \theta)$ upward: for any level of real debt, the government finds it optimal to choose a higher value for real balances, reflecting the lower marginal value of relaxing its budget constraint when government spending is less valuable. + +The optimal policy for debt issuance shifts downward because the government now has stronger precautionary saving motives and therefore chooses to reduce its debt issuance. + +As a result, a decline in $\theta$ while keeping $\xi$ at a low level leads to an increase in real money balances (lower inflation) and a decrease in real debt -- a **positive correlation** between inflation and debt. + +### Institutional disinflation ($\xi$ rises, $\theta$ constant) + +Now consider the effects of an increase in the cost of deviating from the promised inflation target. + +When $\xi$ rises from $\xi_L$ to $\xi_H$ (high enough to make it optimal to switch to the monetary dominant regime), the realized $\phi$ now equals the promised value of real balances, which is higher than the statically optimal level $\phi^{fd}$. + +Critically, if the process for $\xi$ is persistent, an increase in current $\xi$ implies an increase in the expected value for $\xi'$, so the government now has lower incentives to reduce the amount of debt it issues because the wedge in the Euler equation is smaller in absolute value. + +As the government shifts to the monetary-dominant regime, the present value of seigniorage revenues falls, and the government must finance the inherited real liabilities with a higher present value of surpluses -- since the government is impatient, these higher surpluses are back-loaded, also resulting in an increase in the level of debt issued. + +Thus inflation and debt move in *opposite* directions -- the signature of institutional disinflation. + +We now illustrate the two types of disinflation by simulating impulse responses using the paper-style solver introduced above. + +Both experiments treat the shock as an MIT shock: the change is permanent and unanticipated, so the agent does not anticipate the shock before it occurs. + +For the **fundamental disinflation**, we solve two separate models -- one with $\theta_H$ and one with $\theta_L$ -- and simulate the pre-shock path under $\theta_H$ at low $\xi$ (fiscal dominance), then switch to the $\theta_L$ model at the shock date. + +For the **institutional disinflation**, we solve a single model and simulate the pre-shock path at low $\xi$ (fiscal dominance), then switch to a higher $\xi$ state at the shock date. + +We solve for both $\theta$ values using `solve_policy_cache`, build lookup arrays with `build_sim_cache`, then simulate using `simulate_fundamental_irf` and `simulate_institutional_irf`. + +```{code-cell} ipython3 +def plot_irf(irf, θ_path, ξ_path, time, title): + """ + Plot the 4x2 IRF figure. + """ + fig, axes = plt.subplots(4, 2, figsize=(12, 14)) + fig.suptitle(title, fontsize=14, y=1.01) + kw = dict(lw=2, color="tab:blue") + vkw = dict(color="k", ls=":", alpha=0.4) + + axes[0, 0].plot(time, θ_path, **kw) + axes[0, 0].set_title(r"$\theta$") + axes[0, 0].axvline(0, **vkw) + + axes[0, 1].plot(time, ξ_path, **kw) + axes[0, 1].set_title(r"$\xi$") + axes[0, 1].axvline(0, **vkw) + + axes[1, 0].plot(time, irf["regime"], **kw, label="regime") + axes[1, 0].plot( + time, + irf["η_prob"], + lw=2, + color="tab:blue", + ls="--", + label="Pr target met", + ) + axes[1, 0].set_ylim(-0.05, 1.05) + axes[1, 0].set_title("Regime (0 = FD) and Pr target met (dashed)") + axes[1, 0].legend() + axes[1, 0].axvline(0, **vkw) + + axes[1, 1].plot(time, irf["debt_gdp"], **kw) + axes[1, 1].set_title("Debt to GDP (%)") + axes[1, 1].axvline(0, **vkw) + + axes[2, 0].plot(time, irf["surplus_gdp"], **kw) + axes[2, 0].set_title("Surplus to GDP (%)") + axes[2, 0].axvline(0, **vkw) + + axes[2, 1].plot(time, irf["π"], **kw) + axes[2, 1].set_title("Inflation Rate (%)") + axes[2, 1].axvline(0, **vkw) + + axes[3, 0].plot(time, irf["φ"], **kw) + axes[3, 0].set_title(r"Current $\phi$") + axes[3, 0].set_xlabel("time") + axes[3, 0].axvline(0, **vkw) + + axes[3, 1].plot(time, irf["φ_prime"], **kw) + axes[3, 1].set_title(r"Promised $\phi'$") + axes[3, 1].set_xlabel("time") + axes[3, 1].axvline(0, **vkw) + + plt.tight_layout() + return fig +``` + +```{code-cell} ipython3 +p = model # alias used by the simulation functions +``` + +```{code-cell} ipython3 +T_irf = 60 +t_shock = 10 +ξ_pre = 0 +ξ_post = n_ξ_coarse // 2 + +# Fundamental disinflation +b0_fund, φ0_fund = initialize_fd_state(20.0, 1, 0, cache, sim_cache) +θ_fund_idx = np.where(np.arange(T_irf) < t_shock, 1, 0).astype(int) +irf_fund = simulate_fundamental_irf( + b0_fund, + φ0_fund, + θ_fund_idx, + 0, + cache, + sim_cache, + p, + t_shock, +) + +# Institutional disinflation +b0_inst, φ0_inst = initialize_fd_state( + 4.0, + 0, + ξ_pre, + cache, + sim_cache, +) +θ_inst_idx = np.zeros(T_irf, dtype=int) +ξ_inst_idx = np.where( + np.arange(T_irf) < t_shock, ξ_pre, ξ_post +).astype(int) +irf_inst = simulate_institutional_irf( + b0_inst, + φ0_inst, + θ_inst_idx, + ξ_inst_idx, + cache, + sim_cache, + p, + t_shock, +) + +time = np.arange(T_irf) - t_shock +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Fundamental disinflation + name: fig-fundamental +--- +θ_fund = np.where(np.arange(T_irf) < t_shock, θ_high, p.θ) +fig = plot_irf(irf_fund, θ_fund, np.zeros(T_irf), time, + 'Fundamental disinflation') +plt.show() +``` + +**Fundamental disinflation** ({numref}`fig-fundamental`): a permanent drop in $\theta$ from $\theta_H$ to $\theta_L$ reduces fiscal pressure. + +Following the shock, both debt and inflation decline together -- the signature of fundamental disinflation. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Institutional disinflation + name: fig-institutional +--- +θ_inst = np.full(T_irf, p.θ) +ξ_inst = np.asarray(cache["ξ_grid"])[ξ_inst_idx] +fig = plot_irf(irf_inst, θ_inst, ξ_inst, time, + 'Institutional disinflation') +plt.show() +``` + +**Institutional disinflation** ({numref}`fig-institutional`): a permanent rise in $\xi$ pushes the economy from fiscal dominance toward monetary dominance. + +Following the shock, inflation drops while debt rises -- the signature of institutional disinflation. + +## The particle filter + +The empirical strategy centers on a **nonlinear state-space system** estimated with a bootstrap particle filter. + +To keep the lecture computationally light, we illustrate the filtering algorithm on a reduced-form nonlinear state-space system: + +$$ +y_t = f(S_t) + \varepsilon_t^y, \qquad +S_{t+1} = k(S_t, \varepsilon_{t+1}) +$$ + +where + +- $y_t = (\pi_t, 100 b_t)$ are observables (inflation and debt in percent of GDP) +- $S_t = (b_t, \phi_t, \theta_t, \xi_t)$ is the state vector +- $\varepsilon_t^y \sim \mathcal{N}(0, \Sigma)$ are measurement errors + +Because the state transition and observation equations are *nonlinear*, the Kalman filter is not applicable. + +A **bootstrap particle filter** (sequential Monte Carlo) approximates the filtering distribution $p(S_t | y_{1:t})$ with a set of weighted particles. + +We mirror that approach here, but with simplified transition and observation equations. + +### Algorithm + +1. *Initialize*: Draw $N$ particles $\{S_0^{(i)}\}_{i=1}^N$ from the prior +2. *For* $t = 1, \ldots, T$: + + *Propagate*: For each particle $i$, draw $\varepsilon_{t}^{(i)}$ and compute + $S_t^{(i)} = k(S_{t-1}^{(i)}, \varepsilon_t^{(i)})$ + + *Weight*: Compute the likelihood of observed $y_t$ given $S_t^{(i)}$: + $w_t^{(i)} = p(y_t | S_t^{(i)}) \propto + \exp\!\left(-\frac{1}{2}(y_t - f(S_t^{(i)}))^\top \Sigma^{-1} + (y_t - f(S_t^{(i)}))\right)$ + + *Normalize* weights: $\tilde w_t^{(i)} = w_t^{(i)} / \sum_j w_t^{(j)}$ + + *Resample*: Draw $N$ particles from $\{S_t^{(i)}\}$ with probabilities + $\{\tilde w_t^{(i)}\}$ + +3. *Output*: The filtered state estimate is the weighted average of particles + +The JAX implementation below vectorizes over particles with `vmap` and loops over time with `lax.scan`. + +Propagation and weighting are fully parallel across particles. + +Resampling uses `jnp.searchsorted` on the cumulative weight array. + +```{code-cell} ipython3 +@partial(jit, static_argnums=(2,)) +def particle_filter(y_data, key, N_particles, + b_init, φ_init, θ_bar, ξ_init, + ρ_θ, σ_θ, α_l, α_ξ, ξ_bar, + β, κ, η_m, λ, σ_π, σ_b): + """Bootstrap particle filter returning filtered paths and log-likelihood.""" + + φ_star = κ / (2.0 * η_m) + + # Particles: [b, φ, θ, ξ_1, φ_old] + key, *ks = jax.random.split(key, 5) + φ_init_particles = φ_init + 0.2 * jax.random.normal(ks[1], (N_particles,)) + particles = jnp.column_stack([ + b_init + 0.02 * jax.random.normal(ks[0], (N_particles,)), + φ_init_particles, + θ_bar + σ_θ * jax.random.normal(ks[2], (N_particles,)), + jnp.clip(ξ_init + 0.1 * jax.random.normal(ks[3], (N_particles,)), + 0.0, ξ_bar), + φ_init_particles + ]) + + def propagate_one(particle, pk): + b, φ, θ, ξ1, _ = particle + k1, k2, k3 = jax.random.split(pk, 3) + + θ_new = jnp.maximum( + θ_bar + ρ_θ * (θ - θ_bar) + σ_θ * jax.random.normal(k1), 1.0) + + # ξ_1 Markov chain: reset to 0 with prob α_l, + # stay with prob α_ξ, uniform draw otherwise. + u = jax.random.uniform(k2) + ξ_uniform = jax.random.uniform(k3) * ξ_bar + ξ_new = jnp.where(u < α_l, 0.0, + jnp.where(u < α_l + α_ξ, ξ1, ξ_uniform)) + + # Regime probability: calibrate V_gap so low ξ to FD, high ξ to MD + V_gap = -0.15 + η = jax.nn.sigmoid(10.0 * λ * (V_gap + ξ1)) + + # φ dynamics from the static FOC + φ_fd = jnp.clip(6.0 - 0.025 * θ, 1.0, φ_star * 0.95) + φ_md = φ_star * 0.92 + φ_target = η * φ_md + (1 - η) * φ_fd + φ_new = jnp.clip(0.5 * φ + 0.5 * φ_target, 0.5, φ_star * 0.99) + + # Debt dynamics: FD to low debt, MD to higher debt + b_fd_ss = jnp.clip(0.18 - 0.0005 * (θ - θ_bar), 0.10, 0.25) + b_md_ss = jnp.clip(b_fd_ss + 0.25, 0.25, 0.50) + b_target = η * b_md_ss + (1 - η) * b_fd_ss + b_new = jnp.maximum(0.01, 0.90 * b + 0.10 * b_target) + + return jnp.array([b_new, φ_new, θ_new, ξ_new, φ]) + + def observe_one(particle): + """Map state to observables: π = β*H(φ)/φ - 1, debt/GDP.""" + b, φ, θ, ξ1, φ_old = particle + H_val = H_func(φ, κ, η_m) + inflation = (β * H_val / jnp.maximum(φ, 1e-8) - 1.0) * 100.0 + debt_to_gdp = b * 100.0 + return jnp.array([inflation, debt_to_gdp]) + + σ_vec = jnp.array([σ_π, σ_b]) + + def pf_step(carry, inputs): + particles, log_lik = carry + y_t, step_key = inputs + k_prop, k_resamp = jax.random.split(step_key) + + # Propagate each particle forward one period + prop_keys = jax.random.split(k_prop, N_particles) + particles = vmap(propagate_one)(particles, prop_keys) + + # Map particles to observables and compute log-likelihood weights + y_preds = vmap(observe_one)(particles) + resid = y_t[None, :] - y_preds + log_w = (-0.5 * jnp.sum((resid / σ_vec)**2, axis=1) + - jnp.sum(jnp.log(σ_vec)) - jnp.log(2 * jnp.pi)) + + # Normalize weights (log-sum-exp trick for numerical stability) + max_lw = jnp.max(log_w) + w_unnorm = jnp.exp(log_w - max_lw) + sum_w = jnp.sum(w_unnorm) + weights = w_unnorm / sum_w + + # Accumulate log-likelihood + log_lik += max_lw + jnp.log(sum_w) - jnp.log(N_particles) + + # Weighted average gives the filtered state estimate + filtered = jnp.sum(weights[:, None] * particles, axis=0) + + # Resampling + cumsum = jnp.cumsum(weights) + u = jax.random.uniform(k_resamp) / N_particles + targets = u + jnp.arange(N_particles) / N_particles + indices = jnp.clip(jnp.searchsorted(cumsum, targets), + 0, N_particles - 1) + particles = particles[indices] + + return (particles, log_lik), filtered + + step_keys = jax.random.split(key, y_data.shape[0]) + (_, total_ll), filtered_all = lax.scan( + pf_step, (particles, 0.0), (y_data, step_keys)) + + return (filtered_all[:, 2], filtered_all[:, 3], + filtered_all[:, 0], filtered_all[:, 1], total_ll) +``` + +We demonstrate the particle filter on synthetic data that mimics an institutional disinflation: inflation declines from roughly 30% to 5% while debt rises from 20% to 45% of GDP. + +```{code-cell} ipython3 +rng = np.random.default_rng(0) +T_sim = 60 +t_reform = 25 + +inflation_data = np.concatenate([ + 25 + 5 * rng.standard_normal(t_reform), + np.linspace(25, 5, 10) + 2 * rng.standard_normal(10), + 5 + 2 * rng.standard_normal(T_sim - t_reform - 10) +]) +debt_data = np.concatenate([ + 20 + 2 * rng.standard_normal(t_reform), + np.linspace(20, 40, 10) + 3 * rng.standard_normal(10), + 40 + 3 * rng.standard_normal(T_sim - t_reform - 10) +]) + +y_data = jnp.column_stack([inflation_data, debt_data]) + +# Particle filter parameters (θ is stochastic in the PF, +# unlike the fixed-θ model above) +θ_bar_pf = 130.0 +ρ_θ = 0.8 +σ_θ = float(np.sqrt(15.0 * (1.0 - ρ_θ**2))) +α_l_pf = 0.005 +α_ξ_pf = 0.99 +ξ_bar_pf = 0.5 + +pf_key = jax.random.PRNGKey(123) + +θ_filt, ξ_filt, b_filt, φ_filt, ll = particle_filter( + y_data, pf_key, 5000, + b_init=0.22, φ_init=2.5, + θ_bar=θ_bar_pf, ξ_init=0.1, + ρ_θ=ρ_θ, σ_θ=σ_θ, + α_l=α_l_pf, α_ξ=α_ξ_pf, ξ_bar=ξ_bar_pf, + β=β, κ=κ, η_m=η_m, λ=λ_gumbel, + σ_π=2.0, σ_b=2.0 +) +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Particle filter on synthetic data + name: fig-particle-filter +--- +years = 1960 + np.arange(T_sim) + +fig, axes = plt.subplots(2, 2, figsize=(14, 10)) + +axes[0, 0].plot(years, θ_filt, lw=2) +axes[0, 0].set_ylabel('θ') +axes[0, 0].axvline(1960 + t_reform, color='gray', ls='--', alpha=0.5, + label='reform date') +axes[0, 0].legend() + +axes[0, 1].plot(years, ξ_filt, lw=2) +axes[0, 1].set_ylabel('ξ_1') +axes[0, 1].axvline(1960 + t_reform, color='gray', ls='--', alpha=0.5) + +axes[1, 0].plot(years, inflation_data, lw=2, label='data') +H_filt = H_func(φ_filt, κ, η_m) +y_model_π = (β * H_filt / jnp.maximum(φ_filt, 1e-8) - 1.0) * 100 +axes[1, 0].plot(years, y_model_π, '--', lw=2, label='model') +axes[1, 0].set_ylabel('inflation (%)') +axes[1, 0].set_xlabel('year') +axes[1, 0].legend() + +axes[1, 1].plot(years, debt_data, lw=2, label='data') +axes[1, 1].plot(years, b_filt * 100, '--', lw=2, label='model') +axes[1, 1].set_ylabel('debt/GDP (%)') +axes[1, 1].set_xlabel('year') +axes[1, 1].legend() + +plt.tight_layout() +plt.show() +``` + +## Case studies + +{cite:t}`DovisAccountingMFrevised` apply the model to two prominent disinflation episodes in Latin America. + +The full estimation is not reproduced here, but the main empirical findings are summarized below. + +### Colombia (1980–2017) + +In 1991 Colombia instituted a new constitution that granted substantial independence to its central bank, Banco de la República, explicitly mandating price stability as its primary objective and significantly insulating monetary policy from political influence ({cite}`PerezReynaOsorio2017`). + +In 2001 Colombia adopted an explicit inflation targeting regime with a long-term inflation goal of 3%. + +Prior to the 1991 reform, the central bank lacked autonomy, often making monetary policy susceptible to government pressures, and as a result Colombia suffered from persistent high inflation despite the relatively low level of debt. + +The particle filter identifies an increase in the cost of deviating from the inflation target ($\xi$) starting in **1997**, not 1992 -- the first year after the reform. + +One possible explanation is that it took several years before the public came to view the reformed central bank as genuinely independent rather than a symbolic change. + +The model accounts for the reduction in inflation in the 1990s with an increase in the cost of deviating from the inflation target in 1997, resulting in a persistent shift to a monetary-dominant regime from 1997 onward. + +The observed increase in the debt-to-GDP ratio from 1994 to 2002 is driven by the switch to a monetary-dominant regime that allows for greater debt issuances and by higher-than-average realizations of $\theta_t$. + +A counterfactual with $\xi_t = 0$ throughout shows that, without a credible constitutional reform, debt would have similarly increased driven by the high realizations of $\theta_t$, but inflation would have remained constant or even risen during the latter half of the decade. + +This result underscores the crucial role credible institutional reforms played in simultaneously achieving higher debt levels and declining inflation in Colombia during this period. + +### Chile (1990–2017) + +Beginning in the late 1980s, Chile enacted a variety of fiscal and monetary reforms ({cite}`CaputoSaravia2018`). + +It tightened public finances and, for roughly three decades, consistently posted budget surpluses. + +On the monetary front, a 1989 constitutional law granted the Central Bank of Chile full autonomy and the country moved to an explicit inflation regime targeting soon after. + +In contrast to Colombia, both inflation and the debt-to-GDP ratio declined over this period. + +During the first half of the 1990s, the drop in inflation can be replicated either by a fall in fiscal needs or by a rise in the penalty for deviating from the inflation target -- each channel on its own is sufficient to match the joint movements in inflation and debt. + +The distinction between them becomes critical in the second half of the decade: inflation keeps falling while debt-to-GDP merely flattens out. + +Replicating this pattern requires credibility shocks -- an isolated increase in fiscal needs would stabilize the debt ratio but, counterfactually, would drive inflation back up. + +The contrasting experiences of Colombia and Chile illuminate the two disinflation channels implied by the model: in Colombia the data can only be reconciled with a credibility gain (positive $\xi_t$ shocks), whereas in Chile the early-1990s disinflation could be matched either way, yet the continued decline in inflation once debt-to-GDP leveled off required additional credibility gains. + +## Key Mechanisms: A Summary + +The model revolves around three interconnected mechanisms. + +*1. Endogenous regime switching.* + +Whether the government honors or abrogates its inflation mandate depends on the state $(b, \phi, \theta, \xi)$. + +The regime emerges from optimization -- the government weighs the benefit of fiscal flexibility against a stochastic institutional cost -- rather than from an exogenous rule. + +Inflation moves because the government actively chooses to change its monetary policy, in the spirit of {cite:t}`SargentWallace1981`, rather than because agents coordinate on a different equilibrium. + +*2. Incentive effects on debt and inflation targets.* + +Under imperfect commitment, the current government strategically limits borrowing and chooses a less ambitious inflation target to reduce future governments' temptation to abrogate. + +These incentive effects create a *downward wedge* in debt issuance relative to the Ramsey Euler equation and an *upward bias* in the inflation target relative to the Friedman rule. + +Both distortions vanish as $\xi \to \infty$ (Ramsey) and are maximal at $\xi = 0$ (Markov). + +The incentive to limit indebtedness becomes stronger as the probability of switching to the fiscal dominant regime increases. + +See {cite:t}`Ljungqvist2012`, chapter 23, for a broader discussion of the credibility problem. + +*3. Two disinflation sources with distinct debt dynamics.* + +Fundamental disinflations generate a positive correlation between inflation and the level of government debt, whereas institutional disinflations produce a negative correlation between the two. + +This contrasting behavior allows the authors to use the dynamics of debt and inflation to identify the contribution of institutional changes to inflation dynamics. + +| | $\Delta\pi$ | $\Delta b$ | Mechanism | +|---|:---:|:---:|---| +| Fundamental ($\theta \downarrow$) | $\downarrow$ | $\downarrow$ | Lower spending needs $\to$ less borrowing, less inflation | +| Institutional ($\xi \uparrow$) | $\downarrow$ | $\uparrow$ | Credible mandate $\to$ lower inflation, relaxed incentive wedge $\to$ more borrowing | + +A key takeaway is that credible monetary institutions are a prerequisite for sustaining high public debt at low inflation. + +## Exercises + +```{exercise-start} +:label: dovis_ex1 +``` + +For the middle $\xi_1$ state on the coarse grid, compute + +$$ +\phi^{fd}(b') = \arg\max_{\phi} \left[ W(b' + \phi, \xi_1) + v(\phi) \right] +$$ + +using the solved value function. + +Plot $\phi^{fd}(b')$ and the associated fiscal-dominance value $V^{fd}(b', \xi_1)$. + +```{exercise-end} +``` + +```{solution-start} dovis_ex1 +:class: dropdown +``` + +The following code plots the fiscal-dominance policy $\phi^{fd}(b')$ and the associated value $V^{fd}(b', \xi_1)$ from continuation values. + +```{code-cell} ipython3 +# Use the solved value function W from the baseline model +ξ_mid = n_ξ // 2 +b_g = model.b_prime_grid + +# Compute φ^fd and V^fd using fd_from_continuation +V_md_ex, V_fd_vals, φ_fd_vals, _ = fd_from_continuation( + W, model.B_grid, b_g, model.φ_grid, model.κ, model.η_m +) + +# Extract the middle ξ state +φ_fd_mid = np.asarray(φ_fd_vals[:, ξ_mid]) +V_fd_mid = np.asarray(V_fd_vals[:, ξ_mid]) +b_np = np.asarray(b_g) +feasible_fd = V_fd_mid > PENALTY + +fig, axes = plt.subplots(1, 2, figsize=(12, 4)) +axes[0].plot(b_np[feasible_fd], φ_fd_mid[feasible_fd], 'b-', lw=2) +axes[0].set_xlabel("next-period debt $b'$") +axes[0].set_ylabel(r"$\phi^{fd}(b', \xi_1)$") + +axes[1].plot(b_np[feasible_fd], V_fd_mid[feasible_fd], 'r-', lw=2) +axes[1].set_xlabel("next-period debt $b'$") +axes[1].set_ylabel(r"$V^{fd}(b', \xi_1)$") + +plt.tight_layout() +plt.show() +``` + +```{solution-end} +``` + +```{exercise-start} +:label: dovis_ex2 +``` + +Run the particle filter on the synthetic data with different numbers of particles ($N = 500, 2000, 10000$). + +Plot the recovered $\xi$ path for each and assess convergence. + +```{exercise-end} +``` + +```{solution-start} dovis_ex2 +:class: dropdown +``` + +The following code runs the particle filter with different numbers of particles and plots the recovered paths to assess convergence. + +```{code-cell} ipython3 +fig, axes = plt.subplots(1, 2, figsize=(14, 5)) + +for i, (N_part, color) in enumerate(zip( + [500, 2000, 10000], ['tab:orange', 'tab:blue', 'tab:green'])): + pf_k = jax.random.PRNGKey(100 + i) + θ_f, ξ_f, b_f, φ_f, ll = particle_filter( + y_data, pf_k, N_part, + b_init=0.20, φ_init=3.0, + θ_bar=θ_bar_pf, ξ_init=0.1, + ρ_θ=ρ_θ, σ_θ=σ_θ, + α_l=α_l_pf, α_ξ=α_ξ_pf, ξ_bar=ξ_bar_pf, + β=β, κ=κ, η_m=η_m, λ=λ_gumbel, + σ_π=2.5, σ_b=3.0 + ) + + axes[0].plot(years, ξ_f, color=color, lw=2, + label=f'N={N_part} (LL={ll:.1f})') + axes[1].plot(years, θ_f, color=color, lw=2, + label=f'N={N_part}') + +axes[0].set_ylabel('ξ_1') +axes[0].set_xlabel('year') +axes[0].legend() +axes[0].axvline(1960 + t_reform, color='gray', ls='--', alpha=0.5) + +axes[1].set_ylabel('θ') +axes[1].set_xlabel('year') +axes[1].legend() +axes[1].axvline(1960 + t_reform, color='gray', ls='--', alpha=0.5) + +plt.tight_layout() +plt.show() +``` + +```{solution-end} +``` diff --git a/lectures/gorman_heterogeneous_households.md b/lectures/gorman_heterogeneous_households.md index 33df7705..431026d7 100644 --- a/lectures/gorman_heterogeneous_households.md +++ b/lectures/gorman_heterogeneous_households.md @@ -724,9 +724,7 @@ $$ ```{code-cell} ipython3 def doublej2(A1, B1, A2, B2, tol=1e-15, max_iter=10_000): - r""" - Compute V = Σ_{t=0}^∞ A1^t B1 B2' (A2')^t via a doubling algorithm. - """ + """Compute V = sum_{t=0}^inf A1^t B1 B2' (A2')^t via a doubling algorithm.""" A1 = np.asarray(A1, dtype=float) A2 = np.asarray(A2, dtype=float) B1 = np.asarray(B1, dtype=float) @@ -1687,7 +1685,7 @@ We use the same technology and preference parameters. We set household-specific parameters below and impose $\sum_j \phi_j = 1$. ```{code-cell} ipython3 -np.random.seed(42) +rng = np.random.default_rng(42) N = 100 # Aggregate endowment process parameters @@ -1697,8 +1695,8 @@ N = 100 # Mean endowments α_j and aggregate exposure φ_j -αs = np.random.uniform(3.0, 5.0, N) -φs_raw = np.random.uniform(0.5, 1.5, N) +αs = rng.uniform(3.0, 5.0, N) +φs_raw = rng.uniform(0.5, 1.5, N) φs = φs_raw / np.sum(φs_raw) # normalize so Σ φ_j = 1 # Rank households by mean endowment to assign idiosyncratic risk @@ -1900,7 +1898,7 @@ By contrast, capital is an endogenous stock that accumulates when the planner s The positive endowment shock increases resources temporarily, but Hall-style preferences imply the planner saves part of the windfall rather than consuming it immediately. -This causes capital to rise as $d_{a,t}$ falls — this is permanent income logic at work. +This causes capital to rise as $d_{a,t}$ falls -- this is permanent income logic at work. Next, we examine whether the household consumption and endowment paths generated by the simulation obey the Gorman sharing rule diff --git a/lectures/hansen_jagannathan_1991.md b/lectures/hansen_jagannathan_1991.md new file mode 100644 index 00000000..9ecd9ae8 --- /dev/null +++ b/lectures/hansen_jagannathan_1991.md @@ -0,0 +1,1301 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.17.1 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(hansen_jagannathan_1991)= +```{raw} jupyter + +``` + +# The Hansen-Jagannathan Bound + +```{contents} Contents +:depth: 2 +``` + +## Overview + +This lecture is based on {cite:t}`Hansen_Jagannathan_1991`. + +In a rich class of models of dynamic economies, the equilibrium price of a +future payoff on any traded security can be represented as the expectation of +the product of the payoff and an intertemporal marginal rate of substitution +(IMRS). + +Hansen and Jagannathan ask: what can asset market data alone tell us about +$m$, without committing to any particular model? + +Their answer is a set of **volatility bounds** -- lower bounds on how +volatile $m$ must be. + +These bounds require no parametric assumptions and +apply to a wide range of models. + +They are constructed by: + +1. projecting $m$ onto the space of traded payoffs to find the least-volatile + $m$ consistent with observed prices, +2. exploiting a duality between this SDF frontier and the familiar + mean-variance frontier for asset returns, and +3. tightening the bound further by requiring $m \geq 0$ (ruling out + arbitrage), which introduces option-like truncations of portfolio payoffs. + +The resulting admissible region in $[E(m),\, \sigma(m)]$ space is a diagnostic: +any candidate model must place $m$ inside this region. + +Applied to U.S. stock and bond data, the bounds provide an alternative +characterization of the **equity premium puzzle** ({cite}`MehraPrescott1985`): +a representative consumer with standard CRRA preferences needs implausibly +high risk aversion to generate enough IMRS volatility to match the data. + +In this lecture we derive these bounds, implement them in Python, and +replicate the key results of the paper. + +We start with some standard imports. + +```{code-cell} ipython3 +from pathlib import Path + +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from scipy.optimize import minimize +import json +``` + +## The asset pricing framework + +### General model + +Consider an economy in which multiple consumers (possibly with heterogeneous +preferences and information sets) trade a vector $x$ of $n$ asset payoffs at +date $T$. + +Let $q$ denote the $n \times 1$ vector of prices at date 0. + +For any consumer $j$ with IMRS $m^j$, + +$$ +q = E\!\left(x \, m^j \mid I^j\right). +$$ + +Applying the law of iterated expectations, this implies the pricing relation +for any consumer's IMRS $m$ and the common information set +$I = \cap_j I^j$: + +$$ +q = E(xm \mid I). +$$ + +Taking unconditional expectations of both sides gives: + +**Restriction 1** (Pricing restriction): + +$$ +Eq = E(xm). +$$ + +**Restriction 2** (Positivity): + +$$ +m \geq 0. +$$ + +Restriction 1 must hold in any model consistent with consumer optimality. + +Restriction 2 rules out arbitrage opportunities: {doc}`hansen_richard_1987` show that no-arbitrage implies $m > 0$ with probability one, so in particular $m \geq 0$. + +Together, they imply that +$[E(m), \sigma(m)]$ must lie in a certain admissible region in the +mean-standard deviation plane. + +### The CRRA benchmark + +A natural benchmark is the IMRS of a representative consumer with CRRA +preferences. + +If the period utility function is $U(c) = c^{1+\gamma}/(1+\gamma)$ for +$\gamma < 0$, then + +$$ +m = \beta \left(\frac{c_{t+1}}{c_t}\right)^{\gamma}, +$$ + +where $\beta$ is a subjective discount factor and $-\gamma > 0$ is the +coefficient of relative risk aversion. + +Later we evaluate this model by computing $[E(m), \sigma(m)]$ from consumption +data for various values of $\gamma$ and checking whether the implied pairs lie +inside the admissible region. + +```{code-cell} ipython3 +def crra_points_from_consumption(consumption, β=0.95, γ_grid=None): + """Mean and std of IMRS m = β(c_{t+1}/c_t)^γ for each γ < 0.""" + if γ_grid is None: + γ_grid = -np.arange(31) + + growth = np.asarray(consumption[1:] / consumption[:-1], dtype=float) + means = [] + sigmas = [] + + + for γ in γ_grid: + m = β * growth ** γ + means.append(m.mean()) + sigmas.append(m.std()) + + return np.asarray(means), np.asarray(sigmas) +``` + +### Sample moments and population moments + +Under ergodicity, the time-series averages + +$$ +\hat{E}(x) = \frac{1}{T}\sum_{t=1}^T x_t, \quad +\hat{E}(q) = \frac{1}{T}\sum_{t=1}^T q_t, \quad +\widehat{\mathrm{Cov}}(x) = \frac{1}{T}\sum_{t=1}^T (x_t - \hat{E}x)(x_t - \hat{E}x)^\top +$$ + +converge to their population counterparts. + +In what follows we use population moments (or simulated sample moments) +interchangeably. + +```{code-cell} ipython3 +:tags: [hide-input] + +import json, urllib.request + +DATA_URL = ( + "_static/lecture_specific/hansen_jagannathan_1991/" + "hansen_jagannathan_1991_data.json" +) + + +def _load_bundle(url): + """Load the data bundle from a local path or remote URL.""" + if url.startswith(("http://", "https://")): + with urllib.request.urlopen(url) as resp: + raw = json.loads(resp.read().decode()) + else: + path = Path(url) + if not path.exists(): + path = Path("lectures") / url + with open(path) as f: + raw = json.load(f) + bundle = {} + for key in raw: + bundle[key] = pd.DataFrame( + raw[key]["data"], + columns=raw[key]["columns"], + index=raw[key]["index"], + ) + return bundle + + +DATA_BUNDLE = _load_bundle(DATA_URL) + + +def compute_moments(returns, prices=None): + """Return (μ_x, μ_q, Σ) from payoff and price arrays.""" + returns = np.asarray(returns, dtype=float) + if returns.ndim == 1: + returns = returns[:, None] + + μ_x = returns.mean(axis=0) + Σ = np.cov(returns.T, bias=True) + if Σ.ndim == 0: + Σ = np.array([[float(Σ)]]) + + if prices is None: + μ_q = np.ones(returns.shape[1]) + else: + prices = np.asarray(prices, dtype=float) + if prices.ndim == 1: + prices = prices[:, None] + μ_q = prices.mean(axis=0) + + return μ_x, μ_q, Σ +``` + +We use data built from three sources: + +- *Annual* (1891--1985): stock, bond, and consumption series from + Robert Shiller's [chapter-26 workbook](http://www.econ.yale.edu/~shiller/data.htm). +- *Monthly*: real stock returns from Shiller's Irrational Exuberance + workbook; real Treasury bill returns and per-capita consumption from + [FRED](https://fred.stlouisfed.org/) (TB3MS, CPIAUCSL, DNDGRG3M086SBEA, + DSERRG3M086SBEA, POPTHM). +- *Quarterly*: holding-period returns on 3-, 6-, 9-, and 12-month bills + constructed from FRED yields (TB3MS, TB6MS, GS1) deflated by CPIAUCSL. + +```{code-cell} ipython3 + +def load_annual_paper_data(): + data = DATA_BUNDLE["annual"].copy() + return ( + data["year"].to_numpy(), + data["stock"].to_numpy(), + data["bond"].to_numpy(), + data["consumption"].to_numpy(), + ) +``` + +## The linear volatility bound (without positivity) + +### Constructing $m^*$ + +Suppose we only impose Restriction 1. + +Among all random variables $m$ satisfying $Eq = E(xm)$, what is the minimum +variance? + +Hansen and Jagannathan show that the answer is the minimum second-moment +projection of $m$ onto the space $P = \{c^\top x : c \in \mathbb{R}^n\}$. + +This projection, call it $m^*$, satisfies + +$$ +m^* = x^\top \alpha^*, \qquad \alpha^* = (Exx^\top)^{-1} Eq. +$$ + +For any valid $m$, the residual $m - m^*$ is orthogonal to every element of +$P$. + +To see this, note that for any $c \in \mathbb{R}^n$: + +$$ +E[x^\top c \cdot (m - m^*)] = c^\top E[x(m - m^*)] = c^\top (Eq - Eq) = 0, +$$ + +The last equality uses the fact that both $m$ and $m^*$ satisfy the pricing +restriction $E(xm) = Eq$. + +Since $m^*$ is in $P$ and $m - m^*$ is orthogonal to $P$, we have +$E[m^*(m - m^*)] = 0$. + +Writing $m = m^* + (m - m^*)$ and expanding the second moment: + +$$ +E(m^2) = E(m^{*2}) + 2E[m^*(m - m^*)] + E[(m - m^*)^2] + = E(m^{*2}) + E[(m - m^*)^2]. +$$ + +When $Em = Em^*$, we can subtract $(Em)^2 = (Em^*)^2$ from both sides to obtain +the **variance decomposition**: + +$$ +\sigma^2(m) = \sigma^2(m^*) + \sigma^2(m - m^*) \geq \sigma^2(m^*). +$$ + +The inequality holds because $\sigma^2(m - m^*) \geq 0$. + +### When there is a riskless asset + +If $x$ includes a riskless bond -- a security that costs $q = 1$ today and +pays $x = r_f$ with certainty -- then Restriction 1 applied to this payoff +gives $1 = E(r_f \cdot m) = r_f \cdot Em$, so $Em = 1/r_f$. + +Since every valid $m$ must have the same mean, the variance decomposition +yields a single bound: + +$$ +\sigma(m) \geq \sigma(m^*). +$$ + +### When there is no riskless asset + +The variance decomposition requires $Em = Em^*$ (otherwise the cross-terms do +not cancel). + +When $x$ does not include a unit payoff, the pricing restriction $Eq = E(xm)$ +provides $n$ equations in the $n$ coefficients $\alpha$ but places no +constraint on $Em$. + +To see why: $m^* = x^\top \alpha^*$ has mean $Em^* = (Ex)^\top \alpha^*$, +determined by $\alpha^* = (Exx^\top)^{-1} Eq$. + +A different valid $m$ (not in $P$) can satisfy the same $n$ pricing equations +with a different mean. + +Since we cannot rule out valid $m$'s with other means, we must compute the +bound separately for each candidate mean. + +The bound therefore traces out a curve. + +For each hypothetical mean $v = Em$, we augment $x$ with a unit payoff +assigned expected price $v$ and construct + +$$ +m^v = x_a^\top \alpha^v, \qquad \alpha^v = (Ex_a x_a^\top)^{-1} Eq_a, +$$ + +where $x_a = (x^\top, 1)^\top$ and $q_a = (q^\top, v)^\top$. + +The bound is + +$$ +\sigma(m) \geq \sigma(m^v) = +\left[(Eq - v \, Ex)^\top \Sigma^{-1} (Eq - v \, Ex)\right]^{1/2}, +$$ + +where $\Sigma = \mathrm{Cov}(x)$ is the covariance matrix of payoffs. + +This formula requires only the means of prices and payoffs and the covariance +matrix of payoffs. + +```{code-cell} ipython3 +def hj_bound_no_positivity(μ_x, μ_q, Σ, v_grid=None): + """HJ volatility bound without positivity: σ(m^v) = sqrt[(Eq - v*Ex)' Σ^{-1} (Eq - v*Ex)].""" + if v_grid is None: + v_grid = np.linspace(0.85, 1.15, 300) + + Σ_inv = np.linalg.pinv(Σ) + σ_bound = np.array([ + np.sqrt(np.maximum((μ_q - v * μ_x) @ Σ_inv @ (μ_q - v * μ_x), 0.0)) + for v in v_grid + ]) + return v_grid, σ_bound +``` + +### Duality with the mean-variance frontier for returns + +Now we derive the relation between the mean-standard deviation frontier for +$m$ and the mean-variance frontier for asset returns. + +Let $\pi(p) = E(mp)$ denote the expected-price functional that maps each +payoff $p$ in $P$ to its price (see {doc}`hansen_richard_1987` for the full +development of $\pi$ as the Riesz representation of the pricing functional). + +Define the set of returns as + +$$ +R \equiv \{p \in P : \pi(p) = 1\}. +$$ + +$R$ contains all payoffs in $P$ with expected prices equal to one. + +Suppose $P$ contains a unit payoff and $\pi(1) \neq 0$. + +Then $1/\pi(1)$ is in $R$. + +A second payoff in $R$ is $r^* \equiv m^*/\pi(m^*)$, where +$\pi(m^*) = E(m^{*2})$, so + +$$ +\|r^*\| = \frac{\|m^*\|}{\|m^*\|^2} = \frac{1}{\|m^*\|}. +$$ + +{doc}`hansen_richard_1987` established that $r^*$ is the payoff in $R$ with the +smallest norm (second moment). + +Since $m^* = \pi(m^*) \cdot r^*$, the frontier IMRS $m^*$ is proportional to +$r^*$. + +Consequently, $r^*$ solves + +$$ +\min_{r \in R} \sigma(r) \quad \text{subject to} \quad Er = \mu +$$ + +when $\mu$ is set equal to $Er^*$. + +The proportionality $m^* = \pi(m^*) \cdot r^*$ implies + +$$ +\frac{\sigma(m^*)}{Em^*} = \frac{\sigma(r^*) \|m^*\|^2}{Em^*} = \frac{\sigma(r^*)}{Er^*}. +$$ + +Since $E(r^2) = \sigma(r)^2 + (Er)^2$, the mean-standard deviation frontier +for $R$ is a cone with apex at $[0,\; 1/\pi(1)]$ in $(\sigma, \mu)$ space. + +The point $r^*$ lies on the lower (efficient) portion of this frontier. + +The lower portion is a ray from $[0, 1/\pi(1)]$ through +$[\sigma(r^*), Er^*]$. + +The slope of this ray is the Sharpe ratio of $r^*$: $\{Er^* - [1/\pi(1)]\}/\sigma(r^*)$. + +The circle of radius $\|r^*\|$ centered at the origin passes through +$[\sigma(r^*), Er^*]$ with slope $-\sigma(r^*)/Er^*$. + +Equating the two slopes gives (equation (16) of the paper) + +$$ +\frac{\sigma(r^*)}{Er^*} = \frac{[1/\pi(1)] - Er^*}{\sigma(r^*)}. +$$ + +Combining with the variance decomposition $\sigma(m) \geq \sigma(m^*)$: + +$$ +\frac{\sigma(m)}{Em} +\geq \frac{\sigma(m^*)}{Em^*} += \frac{\sigma(r^*)}{Er^*} += \frac{[1/\pi(1)] - Er^*}{\sigma(r^*)}. +$$ + +This is the **Hansen-Jagannathan bound** (HJ bound): $\sigma(m)/Em$ is bounded +below by the absolute value of the slope of the mean-standard deviation +frontier for $R$. + +```{note} +Footnote 4 of the paper notes an alternative: apply Cauchy-Schwarz to any +zero-price payoff $z$ to get $\sigma(m)/Em \geq |Ez|/\sigma(z)$, then +maximise over $z$. + +{doc}`Doubts or Variability? ` uses this route. + +This lecture follows the paper's projection construction (Section III) and +duality argument (Section III.C). +``` + +The following two functions implement the mean-variance frontier and the +maximum Sharpe ratio. + +```{code-cell} ipython3 +def mean_variance_frontier(μ_x, Σ, n_points=300): + """Mean-standard-deviation frontier via the two-fund formula.""" + n = len(μ_x) + Σ_inv = np.linalg.pinv(Σ) + ones = np.ones(n) + + A = μ_x @ Σ_inv @ μ_x + B = μ_x @ Σ_inv @ ones + C = ones @ Σ_inv @ ones + D = A * C - B**2 + + c_min = B / C + c_grid = np.linspace(c_min - 0.10, c_min + 0.15, n_points) + + var_c = (C * c_grid**2 - 2 * B * c_grid + A) / D + std_c = np.sqrt(np.maximum(var_c, 0)) + return c_grid, std_c + + +def max_sharpe_ratio(μ_x, μ_q, Σ, rf=None): + """Maximum Sharpe ratio from the asset menu.""" + n = len(μ_x) + Σ_inv = np.linalg.pinv(Σ) + + if rf is not None: + μ_exc = μ_x - rf + w_tan = Σ_inv @ μ_exc + sr_max = (μ_exc @ w_tan) / np.sqrt(w_tan @ Σ @ w_tan) + else: + ones = np.ones(n) + A = μ_x @ Σ_inv @ μ_x + B = μ_x @ Σ_inv @ ones + C = ones @ Σ_inv @ ones + D = A * C - B**2 + sr_max = np.sqrt(D / C) + + return float(sr_max) +``` + +## Computing the annual frontier + +We now compute the HJ bound from annual US stock and bond returns +(1891--1985). + +```{code-cell} ipython3 +annual_years, annual_stock, annual_bond, annual_consumption = load_annual_paper_data() +annual_payoffs = np.column_stack([annual_stock, annual_bond]) +annual_prices = np.ones_like(annual_payoffs) + +μ_x_annual, μ_q_annual, Σ_annual = compute_moments(annual_payoffs, annual_prices) +v_annual, σ_annual = hj_bound_no_positivity( + μ_x_annual, μ_q_annual, Σ_annual, v_grid=np.linspace(0.84, 1.16, 400) +) + +annual_γ_grid = -np.arange(31) +annual_crra_mean, annual_crra_std = crra_points_from_consumption( + annual_consumption, β=0.95, γ_grid=annual_γ_grid +) +``` + + + +The HJ bound traces out the boundary of the admissible region $S$ in +$[E(m),\, \sigma(m)]$ space. + +Any parametric model must place its implied $[E(m), \sigma(m)]$ pair inside +$S$. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Annual IMRS frontier + name: fig-annual-imrs-frontier +--- +fig, ax = plt.subplots(figsize=(8, 5)) + +ax.fill_between(v_annual, σ_annual, 2.4, alpha=0.15) +ax.plot(v_annual, σ_annual, lw=2) +ax.scatter( + annual_crra_mean, + annual_crra_std, + marker="s", + s=20, + facecolors="white", + edgecolors="black", + linewidths=0.8, +) +annual_log_point = np.array([np.mean(1.0 / annual_stock), np.std(1.0 / annual_stock)]) +ax.scatter( + annual_log_point[0], + annual_log_point[1], + marker="x", + s=50, + color="black", +) + +ax.set_xlim(0.84, 1.16) +ax.set_ylim(0.0, 2.4) +ax.set_xlabel("mean") +ax.set_ylabel("standard deviation") + +plt.tight_layout() +plt.show() +``` + +The shaded region is the admissible set $S$: any valid IMRS must have a +$[E(m), \sigma(m)]$ pair inside it. + +The squares show the IMRS implied by +CRRA preferences $m = \beta (c_{t+1}/c_t)^{\gamma}$ for $\gamma = 0, -1, +\ldots, -30$ with $\beta = 0.95$. + +Only at high values of $|\gamma|$ do the +squares enter the admissible region. + +The cross marks the reciprocal of the +stock return, $1/r_{\text{stock}}$, as a simple benchmark. + +## The duality theorem + +The preceding figure illustrates the duality between the two frontiers -- +the SDF frontier and the asset return mean-variance frontier -- that Hansen +and Jagannathan establish formally. + +````{prf:theorem} Duality (Section III.C) +:label: hj_duality_theorem + +For any $v$, let $R_v$ denote the set of returns augmented by a hypothetical +riskless bond priced at $v$, and let $r_v^*$ denote the minimum second-moment +return in $R_v$. + +Then $m_v$ is proportional to $r_v^*$, and + +$$ +\frac{\sigma(m_v)}{v} = \frac{\sigma(r_v^*)}{Er_v^*} += \frac{|[1/v] - Er_v^*|}{\sigma(r_v^*)}, +$$ + +i.e. the bound on $\sigma(m)/Em$ at mean $v$ equals the absolute value of the +slope of the mean-standard deviation frontier for $R_v$. +```` + +To illustrate this duality, we compute the mean-variance frontier for asset +returns using quarterly Treasury bill data (3-, 6-, 9-, and 12-month +holding-period returns). + +We also locate the minimum second-moment payoff $r^*$ on the frontier, which +is the return proportional to the minimum-variance IMRS $m^*$. + +```{code-cell} ipython3 +def load_quarterly_bill(): + return DATA_BUNDLE["quarterly"].copy() + +# Mean-variance frontier from quarterly bill returns +quarterly_bill_data = load_quarterly_bill().to_numpy() +μ_bill = quarterly_bill_data.mean(axis=0) +Σ_bill = np.cov(quarterly_bill_data.T, bias=True) +Σ_inv_bill = np.linalg.pinv(Σ_bill) +ones_bill = np.ones(len(μ_bill)) + +A_bill = μ_bill @ Σ_inv_bill @ μ_bill +B_bill = μ_bill @ Σ_inv_bill @ ones_bill +C_bill = ones_bill @ Σ_inv_bill @ ones_bill +D_bill = A_bill * C_bill - B_bill**2 + +# Frontier: σ^2 = (C*μ^2 - 2B*μ + A) / D => μ = (B +/- sqrt(D*(C*σ^2 - 1))) / C +σ_min_bill = 1.0 / np.sqrt(C_bill) +σ_grid_bill = np.linspace(σ_min_bill * 1.001, 1.3, 1000) +disc_bill = D_bill * (C_bill * σ_grid_bill**2 - 1) +disc_bill = np.maximum(disc_bill, 0) +μ_upper_bill = (B_bill + np.sqrt(disc_bill)) / C_bill +μ_lower_bill = (B_bill - np.sqrt(disc_bill)) / C_bill + +# Minimum second-moment payoff r* +μ_star_bill = B_bill / (C_bill + D_bill) +σ_star_bill = np.sqrt( + max( + (C_bill * μ_star_bill**2 - 2 * B_bill * μ_star_bill + A_bill) / D_bill, 0) +) +r_star_norm = np.sqrt(σ_star_bill**2 + μ_star_bill**2) +``` + +The next figure plots the mean-standard deviation frontier for returns in $R$ +together with a quarter-circle of radius $\|r^*\|$. + +The tangency point locates the minimum second-moment payoff $r^*$. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Minimum second-moment payoff in $R$ + name: fig-min-second-moment-payoff +--- +θ_circle = np.linspace(0, np.pi / 2, 400) +σ_circle = r_star_norm * np.cos(θ_circle) +μ_circle = r_star_norm * np.sin(θ_circle) + +σ_combined = np.concatenate([σ_grid_bill[::-1], σ_grid_bill]) +μ_combined = np.concatenate([μ_lower_bill[::-1], μ_upper_bill]) + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.plot(σ_combined, μ_combined, lw=2) +ax.plot(σ_circle, μ_circle, lw=2) +ax.scatter([σ_star_bill], [μ_star_bill], s=30, zorder=5) +ax.set_xlim(0.0, 1.3) +ax.set_ylim(0.0, 1.0) +ax.set_xlabel("standard deviation") +ax.set_ylabel("mean") +plt.tight_layout() +plt.show() +``` + +The next figure zooms in on the frontier and adds the augmented set $R_v$, +which includes a hypothetical riskless bond priced at $v$. + +The lines through $(0, 1/v)$ tangent to the $R$ frontier trace the boundary +of $R_v$. + +By {prf:ref}`hj_duality_theorem`, the slope of these tangent lines equals +$\sigma(m^v)/v$. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: Mean-standard-deviation frontiers for $R$ and $R_v$ + name: fig-frontiers-r-rv +--- +σ_zoom = np.linspace(σ_min_bill * 1.001, 0.04, 500) +disc_zoom = D_bill * (C_bill * σ_zoom**2 - 1) +disc_zoom = np.maximum(disc_zoom, 0) +μ_up_zoom = (B_bill + np.sqrt(disc_zoom)) / C_bill +μ_lo_zoom = (B_bill - np.sqrt(disc_zoom)) / C_bill + +σ_comb_zoom = np.concatenate([σ_zoom[::-1], σ_zoom]) +μ_comb_zoom = np.concatenate([μ_lo_zoom[::-1], μ_up_zoom]) + +# Augmented R_v: tangent from (0, 1/v) to the frontier +μ_vertex_bill = B_bill / C_bill +v_fig3 = 1.0 / (μ_vertex_bill + 0.006) +rf_fig3 = 1.0 / v_fig3 + +valid_σ = σ_zoom > 1e-10 +slopes_up = np.abs(μ_up_zoom[valid_σ] - rf_fig3) / σ_zoom[valid_σ] +slopes_lo = np.abs(μ_lo_zoom[valid_σ] - rf_fig3) / σ_zoom[valid_σ] +max_slope = max(np.max(slopes_up), np.max(slopes_lo)) + +σ_line = np.linspace(0, 0.04, 200) + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.plot(σ_comb_zoom, μ_comb_zoom, lw=2, label=r"$R$") +rv_line = ax.plot(σ_line, rf_fig3 + max_slope * σ_line, lw=2, label=r"$R_v$") +ax.plot(σ_line, rf_fig3 - max_slope * σ_line, lw=2, color=rv_line[0].get_color()) +ax.set_xlim(0.0, 0.04) +ax.set_ylim(0.98, 1.02) +ax.set_xlabel("standard deviation") +ax.set_ylabel("mean") +ax.legend(frameon=False, fontsize=9) +plt.tight_layout() +plt.show() +``` + +## Tightening the bound: imposing positivity of $m$ + +### Option-based construction + +When we also impose Restriction 2 ($m \geq 0$), the bound can be tightened +because many of the frontier $m^v$'s that solve the linear problem may be +negative with positive probability. + +Hansen and Jagannathan show that the minimum variance **nonnegative** $m$ +satisfying Restriction 1 is of the form + +$$ +\tilde{m}^v = \left(x_a^\top \tilde{\alpha}^v\right)^+ = \max\!\left\{x_a^\top \tilde{\alpha}^v,\ 0\right\}, +$$ + +which is the payoff on a **European call (or put) option** on a portfolio of +the assets. + +Note that $\tilde{\alpha}^v$ is *not* the same coefficient vector as +$\alpha^v$ from the unconstrained problem: the positivity constraint changes +the optimal portfolio weights, and the positive part is then applied to the +result. + +The positive bound $\sigma(\tilde{m}^v)$ satisfies: + +- $\sigma(\tilde{m}^v) \geq \sigma(m^v)$ (it is tighter). +- The admissible region $S^+$ (with positivity) is a proper subset of $S$. +- $S^+$ is **convex**. + +Computing $\sigma(\tilde{m}^v)$ requires knowing the distribution of +$x_a^\top \tilde{\alpha}^v$, not just its first two moments. + +For the figures below, we use the exact sample analogue of the truncation +problem and solve it numerically over a grid of candidate means. + +```{code-cell} ipython3 +def positive_frontier_from_sample(payoffs, prices, v_grid, maxiter=2_000): + """Positivity-restricted HJ frontier via constrained optimisation.""" + x = np.asarray(payoffs, dtype=float) + q = np.asarray(prices, dtype=float) + + if x.ndim == 1: + x = x[:, None] + if q.ndim == 1: + q = q[:, None] + + T = x.shape[0] + μ_q = q.mean(axis=0) + x_aug = np.column_stack([x, np.ones(T)]) + second_moment = x_aug.T @ x_aug / T + pinv_second = np.linalg.pinv(second_moment) + + means = [] + sigmas = [] + w_prev = None + + for v in v_grid: + q_aug = np.r_[μ_q, v] + + w0 = pinv_second @ q_aug + scale = q_aug @ w0 + + if abs(scale) < 1e-12: + means.append(np.nan) + sigmas.append(np.nan) + continue + + w0 = w0 / scale + + candidates = [w0] + if w_prev is not None: + s2 = q_aug @ w_prev + if abs(s2) > 1e-12: + candidates.append(w_prev / s2) + + best_obj = np.inf + best_result = None + + for w_init in candidates: + def objective(w): + r = x_aug @ w + return np.mean(np.maximum(r, 0.0) ** 2) + + def jac(w): + r = x_aug @ w + rp = np.maximum(r, 0.0) + return 2.0 * (x_aug.T @ rp) / T + + result = minimize( + objective, + w_init, + jac=jac, + method="SLSQP", + constraints=( + { + "type": "eq", + "fun": lambda w, qa=q_aug: qa @ w - 1.0, + "jac": lambda w, qa=q_aug: qa, + }, + ), + options={"maxiter": maxiter, "ftol": 1e-14}, + ) + + if result.fun < best_obj: + best_obj = result.fun + best_result = result + + r_plus = np.maximum(x_aug @ best_result.x, 0.0) + δ_v = np.mean(r_plus ** 2) + + if δ_v < 1e-14: + means.append(np.nan) + sigmas.append(np.nan) + continue + + m = r_plus / δ_v + means.append(m.mean()) + sigmas.append(m.std()) + w_prev = best_result.x.copy() + + return np.asarray(means), np.asarray(sigmas) +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: IMRS frontier with and without positivity + name: fig-imrs-positivity +--- +annual_pos_mean, annual_pos_std = positive_frontier_from_sample( + annual_payoffs, + annual_prices, + v_annual, +) + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.fill_between(v_annual, σ_annual, 2.4, alpha=0.1) +ax.plot(v_annual, σ_annual, "--", lw=2, label="without positivity") + +valid_annual = np.isfinite(annual_pos_std) +order = np.argsort(annual_pos_mean[valid_annual]) +ax.fill_between( + annual_pos_mean[valid_annual][order], + annual_pos_std[valid_annual][order], + 2.4, + alpha=0.2, +) +ax.plot( + annual_pos_mean[valid_annual][order], + annual_pos_std[valid_annual][order], + lw=2, + label="with positivity", +) + +ax.set_xlim(0.84, 1.16) +ax.set_ylim(0.0, 2.4) +ax.set_xlabel("mean") +ax.set_ylabel("standard deviation") +ax.legend(frameon=False, fontsize=9, loc="lower right") +plt.tight_layout() +plt.show() +``` + +The dashed curve is the linear bound (without positivity). + +The solid curve is the tighter bound obtained by imposing $m \geq 0$. + +The admissible region $S^+$ (with positivity) is a proper subset of $S$. + +Requiring positivity eliminates a portion of the admissible region near the +extremes of $E(m)$, where the linear frontier $m^v$ would need to take +negative values with high probability. + +## The equity premium puzzle revisited + +The HJ bound provides a nonparametric restatement of the equity premium puzzle. + +For the bound to be met, the IMRS of the representative agent must be far more +volatile than consumption growth alone can generate under standard preferences. + +For a CRRA consumer with risk aversion $-\gamma$ (recall $\gamma < 0$), + +$$ +m = \beta \left(\frac{c_{t+1}}{c_t}\right)^{\gamma}. +$$ + +If consumption growth is lognormal with mean $\mu_c$ and standard deviation +$\sigma_c$, then + +$$ +E(m) = \beta \exp\!\left(\gamma \mu_c + \tfrac{1}{2} \gamma^2 \sigma_c^2\right), +\quad +\frac{\sigma(m)}{E(m)} = \sqrt{\exp\!\left(\gamma^2 \sigma_c^2\right) - 1} +\approx |\gamma| \sigma_c. +$$ + +To meet the HJ bound $\sigma(m)/E(m) \geq \text{SR}_{\max}$, we need + +$$ +|\gamma| \sigma_c \gtrsim \text{SR}_{\max}. +$$ + +With U.S. annual data, $\text{SR}_{\max} \approx 0.37$ and $\sigma_c \approx +0.033$, so the required risk aversion is roughly $|\gamma| \approx 11$ +(i.e. $\gamma \approx -11$). + +This is far higher than the values of 1--5 that are typically considered +plausible. + +The table reports $E(m)$ and $\sigma(m)$ for selected values of $\gamma$ for **positivity-restricted** frontier +and indicates whether the implied IMRS lies inside the admissible region. + +```{code-cell} ipython3 +# Use the positivity-restricted frontier for the bound +valid_pos = np.isfinite(annual_pos_std) +pos_order = np.argsort(annual_pos_mean[valid_pos]) +pos_mean_sorted = annual_pos_mean[valid_pos][pos_order] +pos_std_sorted = annual_pos_std[valid_pos][pos_order] + +rows = [] +for g, E_m, s_m in zip(annual_γ_grid, annual_crra_mean, annual_crra_std): + bound_val = float(np.interp(E_m, pos_mean_sorted, pos_std_sorted)) + if g in {0, -1, -2, -5, -10, -15, -20, -25, -30}: + rows.append({'γ': g, 'E(m)': round(E_m, 4), + 'σ(m)': round(s_m, 4), + 'Bound': round(bound_val, 4), + 'Inside': s_m >= bound_val}) + +pd.DataFrame(rows).set_index('γ') +``` + +## Time-nonseparable preferences + +Section V of the paper examines whether relaxing time separability can help +close the gap to the HJ bound. + +Consider the nonseparable service flow + +$$ +s_t = c_t + \theta c_{t-1}, +$$ + +where $\theta > 0$ represents **local durability** and $\theta < 0$ represents +**habit persistence** (intertemporal complementarity). + +The IMRS becomes more complex because it depends on current and future +marginal utilities: + +$$ +m = \beta \frac{(s_{t+1})^\gamma + \theta \beta E[(s_{t+2})^\gamma \mid I_{t+1}]} + {(s_t)^\gamma + \theta \beta E[(s_{t+1})^\gamma \mid I_t]}. +$$ + +The paper shows (Figure 5) that habit persistence ($\theta < 0$) dramatically +increases $\sigma(m)$ for given $\gamma$. + +Local durability ($\theta > 0$) barely reduces it. + +The paper's Figure 5 uses a consumption process estimated by Gallant and +Tauchen, which is not bundled with this lecture. + +Instead, we use monthly U.S. stock and bill returns as the two base payoffs. + +We then simulate the nonseparable IMRS for three values of $\theta$ (0, 0.5, +$-0.5$) across a range of $\gamma < 0$ values, and plot the resulting +$[E(m), \sigma(m)]$ pairs against the HJ frontier. + +```{code-cell} ipython3 +monthly_panel = DATA_BUNDLE["monthly"].copy() + +# Two base payoffs: monthly real stock and bill returns +monthly_payoffs = monthly_panel[['stock', 'bill']].dropna().to_numpy() +monthly_prices = np.ones_like(monthly_payoffs) + +v_monthly = np.linspace(0.975, 1.025, 200) +μ_x_monthly, μ_q_monthly, Σ_monthly = compute_moments(monthly_payoffs, monthly_prices) +v_m_nopositivity, σ_m_nopositivity = hj_bound_no_positivity( + μ_x_monthly, μ_q_monthly, Σ_monthly, v_grid=v_monthly +) + + +def simulate_nonseparable_imrs( + T=20_000, + γ=-5, + θ=0.0, + δ=1.0, + μ_c=0.0045, + σ_c=0.0055, + seed=1, +): + rng = np.random.default_rng(seed) + growth = np.exp(μ_c + σ_c * rng.standard_normal(T + 2)) + + c = np.ones(T + 2) + for t in range(T + 1): + c[t + 1] = c[t] * growth[t + 1] + + s = c[1:] + θ * c[:-1] + s = np.maximum(s, 1e-30) # avoid 0**γ when γ < 0 + s_γ = s ** γ + + # Precompute κ via Monte Carlo. + g_mc = np.exp(μ_c + σ_c * rng.standard_normal(500_000)) + κ = np.mean(np.maximum(g_mc + θ, 1e-30) ** γ) + + c_γ = np.maximum(c, 1e-30) ** γ + + num = s_γ[1:T+1] + θ * δ * c_γ[2:T+2] * κ + denom = s_γ[0:T] + θ * δ * c_γ[1:T+1] * κ + + with np.errstate(divide='ignore', invalid='ignore'): + m = δ * num / denom + return m[np.isfinite(m) & (np.abs(m) < 1e6)] +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: IMRS frontier using monthly data + name: fig-imrs-monthly +--- +fig, ax = plt.subplots(figsize=(8, 5)) + +ax.fill_between(v_m_nopositivity, σ_m_nopositivity, 0.4, alpha=0.15) +ax.plot(v_m_nopositivity, σ_m_nopositivity, lw=2) + +for θ, marker, label in [ + (0.0, "s", r"$\theta = 0$"), + (0.5, "o", r"$\theta = 0.5$"), + (-0.5, "^", r"$\theta = -0.5$"), +]: + pts = [] + for γ in range(0, -15, -1): + m_sim = simulate_nonseparable_imrs(γ=γ, θ=θ, seed=abs(γ) + 5) + pts.append((m_sim.mean(), m_sim.std())) + pts = np.asarray(pts) + ax.scatter( + pts[:, 0], + pts[:, 1], + marker=marker, + s=18, + facecolors="white", + edgecolors="black", + linewidths=0.8, + label=label, + ) + +monthly_log_point = np.array([np.mean(1.0 / monthly_panel["stock"]), np.std(1.0 / monthly_panel["stock"])]) +ax.scatter(monthly_log_point[0], monthly_log_point[1], marker="x", s=50, color="black") + +ax.set_xlim(0.975, 1.0) +ax.set_ylim(0.0, 0.4) +ax.set_xlabel("mean") +ax.set_ylabel("standard deviation") +ax.legend(frameon=False, fontsize=9, loc="upper left") +plt.tight_layout() +plt.show() +``` + +The squares ($\theta = 0$, time-separable), circles ($\theta = 0.5$, local +durability), and triangles ($\theta = -0.5$, habit persistence) trace out +$[E(m), \sigma(m)]$ pairs as $|\gamma|$ increases. + +Habit persistence shifts the IMRS points upward, making it easier to enter +the admissible region at moderate $|\gamma|$. + +Local durability has little effect. + +```{note} +The paper's Figure 5 uses 8 payoffs (2 base returns plus 6 instrument-scaled +returns) and the Gallant-Tauchen consumption process. + +Here we use just the 2 base payoffs (stock and bill returns) from FRED proxy +data. + +The qualitative pattern matches: habit persistence ($\theta < 0$) helps enter +the admissible region at moderate $|\gamma|$. +``` + +## Treasury bill data and monetary models + +Figure 6 in the paper uses monthly prices on 3-, 6-, 9-, and 12-month +discount bonds to construct real quarterly holding-period returns. + +We build a proxy from FRED Treasury yields and the same real-consumption +deflator used above. + +We compute both the linear and positivity-restricted bounds from these +quarterly bill returns. + +```{code-cell} ipython3 +quarterly_payoffs = load_quarterly_bill().to_numpy() +quarterly_prices = np.ones_like(quarterly_payoffs) + +μ_x_quarterly, μ_q_quarterly, Σ_quarterly = compute_moments( + quarterly_payoffs, quarterly_prices +) +v_quarterly, σ_quarterly = hj_bound_no_positivity( + μ_x_quarterly, + μ_q_quarterly, + Σ_quarterly, + v_grid=np.linspace(0.985, 1.005, 200), +) +quarterly_pos_mean, quarterly_pos_std = positive_frontier_from_sample( + quarterly_payoffs, + quarterly_prices, + v_quarterly, +) +``` + +The figure below plots the resulting IMRS frontier, replicating Figure 6 of +the paper. + +Because our FRED proxy differs from the original CRSP bill data, the levels +differ slightly, but the qualitative features match: the positivity-restricted +region $S^+$ (shaded) is a proper subset of $S$ (dashed boundary), and the +bounds near $Em \approx 1$ are large. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: IMRS frontier using quarterly returns + name: fig-imrs-quarterly +--- +fig, ax = plt.subplots(figsize=(8, 5)) + +# S+ (with positivity) -- shaded region +valid_quarterly = np.isfinite(quarterly_pos_std) +order = np.argsort(quarterly_pos_mean[valid_quarterly]) +ax.fill_between( + quarterly_pos_mean[valid_quarterly][order], + quarterly_pos_std[valid_quarterly][order], + 2.0, + alpha=0.15, + color='C0', +) +ax.plot( + quarterly_pos_mean[valid_quarterly][order], + quarterly_pos_std[valid_quarterly][order], + lw=2, + label="with positivity", +) + +# S (without positivity) -- dashed boundary +ax.plot(v_quarterly, σ_quarterly, "--", lw=2, label="without positivity") + +ax.set_xlim(0.985, 1.005) +ax.set_ylim(0.0, 2.0) +ax.set_xlabel("mean") +ax.set_ylabel("standard deviation") +ax.legend(frameon=False, fontsize=9, loc="lower right") +plt.tight_layout() +plt.show() +``` + + +## Exercises + +The exercises below use a small simulated exchange economy to verify the +projection and variance-decomposition logic algebraically. + +We provide the code here for you to use in your solution. + +```{code-cell} ipython3 +def simulate_economy( + T=10_000, + γ=2.0, + δ=0.99, + μ_c=0.018, + σ_c=0.033, + μ_d=0.02, + σ_d=0.12, + ρ=0.3, + seed=42, +): + rng = np.random.default_rng(seed) + + Ω = np.array( + [ + [σ_c**2, ρ * σ_c * σ_d], + [ρ * σ_c * σ_d, σ_d**2], + ] + ) + shocks = rng.multivariate_normal([0.0, 0.0], Ω, T) + + gc = np.exp(μ_c + shocks[:, 0]) + gd = np.exp(μ_d + shocks[:, 1]) + m_true = δ * gc ** (-γ) + + rf = 1.0 / np.mean(m_true) + stock_raw = gd + stock = stock_raw / np.mean(m_true * stock_raw) + bond = np.full(T, rf) + + returns = np.column_stack([stock, bond]) + prices = np.ones((T, 2)) + return returns, prices, m_true + + +def crra_imrs_moments(γ, δ=0.99, μ_c=0.018, σ_c=0.033): + E_m = δ * np.exp(-γ * μ_c + 0.5 * γ**2 * σ_c**2) + var_m = δ**2 * np.exp(-2.0 * γ * μ_c + 2.0 * γ**2 * σ_c**2) - E_m**2 + σ_m = np.sqrt(max(var_m, 0.0)) + return E_m, σ_m +``` + +```{exercise} +:label: hj91_ex1 + +Using `simulate_economy` with $\gamma = 5$: + +(a) Construct $m^* = x^\top \alpha^*$ with + $\alpha^* = (E x x^\top)^{-1} E q$ from the simulated payoff data. + +(b) Verify that $m^*$ satisfies the pricing restriction $E(x m^*) \approx \mu_q$. + +(c) Verify the variance decomposition + $\text{Var}(m) = \text{Var}(m^*) + \text{Var}(m - m^*)$ + and check that $m - m^*$ is orthogonal to $m^*$. +``` + +```{solution-start} hj91_ex1 +:class: dropdown +``` + +```{code-cell} ipython3 +# Simulate economy with γ = 5 +returns_g5, prices_g5, m_true_g5 = simulate_economy(T=10000, γ=5.0, seed=7) +T5 = len(m_true_g5) + +# Construct m* by projecting onto P = span(returns) +Mxx = (returns_g5.T @ returns_g5) / T5 +alpha_star = np.linalg.solve(Mxx, np.ones(2)) +m_star = returns_g5 @ alpha_star + +residual = m_true_g5[:T5] - m_star + +pd.DataFrame({ + 'E[r_i * m*]': [np.mean(returns_g5[:, i] * m_star) for i in range(2)], +}, index=['Stock', 'Bond']).T +``` + +Both entries are close to 1, confirming that $m^*$ satisfies the pricing +restriction $E(x m^*) = \mu_q$. + +```{code-cell} ipython3 +pd.DataFrame({ + 'Var(m)': [np.var(m_true_g5)], + 'Var(m*)': [np.var(m_star)], + 'Var(m - m*)': [np.var(residual)], + 'Var(m*) + Var(m - m*)': [np.var(m_star) + np.var(residual)], + 'E[(m - m*) m*]': [np.mean(residual * m_star)], +}).T.rename(columns={0: 'Value'}) +``` + +The first and fourth rows are nearly equal, confirming the variance +decomposition $\sigma^2(m) = \sigma^2(m^*) + \sigma^2(m - m^*)$. + +The last row is close to zero, verifying that $m - m^*$ is orthogonal to +$m^* \in P$. + +Since $\text{Var}(m - m^*) > 0$, we have $\sigma(m) > \sigma(m^*)$. + +```{solution-end} +``` \ No newline at end of file diff --git a/lectures/hansen_richard_1987.md b/lectures/hansen_richard_1987.md new file mode 100644 index 00000000..9ea937f1 --- /dev/null +++ b/lectures/hansen_richard_1987.md @@ -0,0 +1,1156 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.17.1 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +(hansen_richard_1987)= +```{raw} html + +``` + +```{index} single: python +``` + +# The Role of Conditioning Information in Asset Pricing + +```{contents} Contents +:depth: 2 +``` + +## Overview + +{cite:t}`HansenRichard1987` investigate testable implications of equilibrium asset pricing models. + +This lecture builds on the mean-variance frontier and stochastic discount factor framework +developed in {doc}`asset_pricing_lph`. + +In a competitive equilibrium model, prices are determined by a **pricing function** +that maps uncertain future payoffs into current prices. + +Alternative models of asset prices -- built from different assumptions about +preferences, endowments, and technology -- imply alternative pricing functions. + +Two models that imply the same pricing function are **observationally +indistinguishable** using payoff and price data alone. + +So models of asset prices can be indexed by +their implied pricing functions. + +A key challenge for empirical work is the role of **conditioning information**. + +Theoretical models have traders forming portfolios contingent on +information available at the time of trading. + +But empirical tests typically +use *unconditional* moments -- time-series averages of payoffs and prices -- +that do not depend on this conditioning information. + +Hansen and Richard develop the theory needed to navigate between these two +levels. The paper proceeds in two steps: + +1. **Derive pricing functions** from the primitive assumptions of + value-additivity and continuity, and show that each pricing function can + be represented using a unique **stochastic discount factor** (SDF) $p^*$ + via an inner product on a conditional Hilbert space. + +2. **Deduce testable restrictions** that these pricing functions imply for + population moments of payoffs and prices -- moments that an econometrician + can estimate from time-series data. + +The main results are: + +- A **conditional Riesz Representation Theorem** showing $\pi(p) = E(p \, p^* \mid \mathcal{G})$ + for a unique benchmark payoff $p^*$. + +- A **conditional two-fund theorem** characterizing the mean-variance frontier + conditioned on $\mathcal{G}$. + +- A precise characterization of the **unconditional mean-variance frontier**, + and a demonstration that omitting conditioning information can cause a return + that is on the conditional frontier to fall *off* the unconditional frontier. + +- A **single-beta representation** (conditional CAPM), and conditions under + which it does and does not survive aggregation to unconditional moments. + +- A **pseudo-pricing function** $\pi^*(p) = E[\pi(p)]$ that maps payoffs to + real numbers and connects directly to the GMM approach of + {cite:t}`hansen1982generalized`. + +We make the following imports. + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +from scipy.optimize import minimize +from scipy import stats +import pandas as pd + + +``` + +## Data generation + +Hansen and Richard begin by describing a class of data-generation processes to +which their theoretical analysis applies. + +This section connects their abstract +framework to the kind of data an econometrician can observe. + +### The probability space and stationarity + +Let $(\Omega, \mathcal{F}, \Pr)$ be a probability space. + +A **measure-preserving, ergodic transformation** $S : \Omega \to \Omega$ +governs the deterministic evolution of the state of the world over time. + +If $\omega$ is the state at time zero, then $S^t(\omega)$ is the state at +time $t$. + +A vector of observables $x(\omega)$ maps $\Omega$ into $\mathbb{R}^k$. + +The +time-$t$ observation vector is + +$$ +x_t(\omega) = x[S^t(\omega)], +$$ + +which defines a **strictly stationary** stochastic process $\{x_t : t = 1, 2, \ldots\}$. + +```{note} +A process $\{x_t\}$ is **strictly stationary** if its joint distribution is invariant +to time shifts: for any times $t_1, \ldots, t_k$ and any shift $h$, + +$$ +(x_{t_1}, \ldots, x_{t_k}) \stackrel{d}{=} (x_{t_1+h}, \ldots, x_{t_k+h}). +$$ + +This is stronger than *weak* (covariance) stationarity, which only requires +time-invariant first and second moments. + +Here strict stationarity follows from +$S$ being measure-preserving: since $x_t = x[S^t(\omega)]$, shifting time by $h$ +is the same as applying $S^h$, which preserves the probability measure. +``` + +Because $S$ is ergodic, *time-series averages converge almost surely to +population means*: + +$$ +\frac{1}{T} \sum_{t=1}^{T} x_t \xrightarrow{a.s.} E(x) \quad \text{as } T \to \infty, +$$ + +as long as $x$ has a finite first moment. + +This lets an econometrician learn about unconditional moments by computing +sample averages from observed time series. + +### Information and payoffs + +At each date $t$, traders observe information captured by the sigma-algebra + +$$ +\mathcal{G}_t = \{A_t : A_t = S^{-t}(A) \text{ for some } A \in \mathcal{G}\}, +\quad t = 1, 2, \ldots +$$ + +where $\mathcal{G}$ is the information at time zero. + +We write $I_t$ for the set +of random variables measurable with respect to $\mathcal{G}_t$, and $I$ for +those measurable with respect to $\mathcal{G}$. + +A **one-period security** purchased at time $t$ has a payoff at time $t+1$. + +Let $p$ denote a random variable in $I_1$ used to define a sequence of payoffs + +$$ +p_{t+1}(\omega) = p[S^t(\omega)]. +$$ + +The **pricing function** $\pi$ maps payoffs into prices. + +The time-zero price of $p$ is $\pi(p)$, a random variable in $I$. + +Since both the payoff sequence $\{p_{t+1}\}$ and the price sequence +$\{\pi_t(p_{t+1})\}$ are strictly stationary, their moments can be +estimated by time-series averages. + +## Pricing functions and Hilbert space machinery + +### Properties of pricing functions + +Hansen and Richard assume the pricing function $\pi$ maps a set of payoffs +$P$ into prices in $I$. + +Four assumptions are imposed: + +````{prf:assumption} Conditionally complete payoff space +:label: hr87_assumption_21 + +The set of payoffs $P$ is a **conditionally complete linear subspace** of +$P^+ = \{p \in I_1 : E(p^2 \mid \mathcal{G}) < \infty\}$. +```` + +This assumption has two parts. + +*Linear subspace* means that $P$ is closed +under conditional linear combinations: traders can form portfolios with +information-contingent weights and the resulting payoffs remain in $P$. + + +*Conditionally complete* means that limits of conditionally Cauchy sequences +also belong to $P$, which is needed to make $P$ a conditional Hilbert space. + +````{prf:assumption} Value-additivity +:label: hr87_value_additivity + +For any payoffs $p_1, p_2 \in P$ and any $w_1, w_2 \in I$, + +$$ +\pi(w_1 p_1 + w_2 p_2) = w_1 \pi(p_1) + w_2 \pi(p_2). +$$ +```` + +This says that the price of a portfolio is the portfolio of the prices. + +The +portfolio weights $w_1, w_2$ are allowed to be random variables measurable +with respect to $\mathcal{G}$, reflecting the fact that traders choose +portfolio weights based on current information. + +````{prf:assumption} Conditional continuity +:label: hr87_cond_continuity + +If a sequence of payoffs +$\{p_j\}$ converges conditionally to zero, then their prices converge in +probability to zero. +```` + +````{prf:assumption} Existence of returns +:label: hr87_assumption_24 + +The set of returns $R = \{p \in P : \pi(p) = 1\}$ is nonempty. +```` + +This guarantees that there exist payoffs with unit price, i.e., assets that +can be purchased for one unit of the numeraire today. + +{prf:ref}`hr87_value_additivity` and {prf:ref}`hr87_cond_continuity` together +make $\pi$ a **conditional continuous linear functional** on $P$. + +### The conditional Hilbert space + +To apply the theory of linear functionals, Hansen and Richard build a +**conditional Hilbert space** of payoffs. + +Let + +$$ +P^+ = \{p \in I_1 : E(p^2 \mid \mathcal{G}) < \infty\} +$$ + +be the set of payoffs with finite conditional second moment. + +Define a **conditional inner product** for $p_1, p_2 \in P^+$: + +$$ +\langle p_1 \mid p_2 \rangle_{\mathcal{G}} = E(p_1 p_2 \mid \mathcal{G}), +$$ + +and the associated **conditional norm** + +$$ +\|p\|_{\mathcal{G}} = \left[\langle p \mid p \rangle_{\mathcal{G}}\right]^{1/2}. +$$ + +Both the inner product and the norm take values in $I$. + +They are +*random variables*, not scalars. + +This is the key difference from a standard +$L^2$ Hilbert space. + +Convergence is defined using convergence in probability of these +random variables: + +````{prf:definition} Conditional convergence +:label: hr87_cond_convergence + +$\{p_j\}$ converges conditionally to $p_0$ if +$\lim_{j \to \infty} \Pr\{\|p_j - p_0\|_{\mathcal{G}} > \varepsilon\} = 0$ for all $\varepsilon > 0$. +```` + +````{prf:definition} Conditionally Cauchy +:label: hr87_cond_cauchy + +$\{p_j\}$ is conditionally Cauchy if +$\lim_{j,k \to \infty} \Pr\{\|p_j - p_k\|_{\mathcal{G}} > \varepsilon\} = 0$ for all $\varepsilon > 0$. +```` + +A key technical result (proved in the Appendix of the paper) is that $P^+$ is +*conditionally complete*: every conditional Cauchy sequence converges +conditionally to an element of $P^+$. + +This is the conditional analogue of the Riesz-Fischer theorem. + +This is exactly the property required of $P$ by +{prf:ref}`hr87_assumption_21`. + +## The Riesz representation: the stochastic discount factor + +The conditional completeness and the conditional continuity of $\pi$ together +deliver the central representation theorem of the paper. + +````{prf:theorem} Conditional Riesz Representation +:label: hr87_cond_riesz + +Suppose {prf:ref}`hr87_assumption_21` -- {prf:ref}`hr87_assumption_24` +are satisfied. Then there exists a *unique* payoff $p^* \in P$ such that + +$$ +\pi(p) = \langle p \mid p^* \rangle_{\mathcal{G}} = E(p \, p^* \mid \mathcal{G}) +\quad \text{for all } p \in P. +$$ + +Moreover, $\Pr\{\|p^*\|_{\mathcal{G}} > 0\} = 1$. +```` + +The payoff $p^*$ is the **stochastic discount factor** (SDF), also called the +**benchmark payoff**. + +Recall that the pricing function $\pi$ was introduced above as an abstract +mapping from payoffs to prices, subject only to value-additivity +({prf:ref}`hr87_value_additivity`) and continuity ({prf:ref}`hr87_cond_continuity`). + +The theorem says that *any* such $\pi$ can be represented concretely as +$\pi(p) = E(p \, p^* \mid \mathcal{G})$ for a unique $p^*$. + +The payoff $p^*$ is called the **stochastic discount factor** (SDF) or +**benchmark payoff**. + +Different equilibrium models of asset prices correspond +to different choices of $p^*$. + +### No-arbitrage and positivity of $p^*$ + +````{prf:definition} No-arbitrage +:label: hr87_no_arb + +A pricing function $\pi$ has **no arbitrage opportunities** on $P$ if for any +nonnegative payoff $p \geq 0$ with $\Pr\{p > 0\} > 0$, + +$$ +\Pr\{\pi(p) \leq 0\} \cap \{p > 0\}\} = 0. +$$ + +This is the conditional counterpart to the no-arbitrage assumption used by +{cite:t}`Ross_78`. +```` + +Since the price $\pi(p)$ is a random variable (it depends on $\mathcal{G}$), +this says that in no state of the world can the price be non-positive while +the payoff is strictly positive. + +When $\pi$ has no arbitrage opportunities and $P = P^+$, then $p^*$ is +*strictly positive* with probability one: + +$$ +\Pr\{p^* > 0\} = 1. +$$ + +In this case $p^*$ can be interpreted as the **intertemporal marginal rate +of substitution** of the numeraire good -- it converts future payoffs +into today's prices. + +### The benchmark return $r^*$ + +Since $\pi(p^*) = \langle p^* \mid p^* \rangle_{\mathcal{G}} = E(p^{*2} \mid \mathcal{G})$ +is positive with probability one, we can define the **benchmark return** + +$$ +r^* = \frac{p^*}{\pi(p^*)}. +$$ + +This return belongs to $R = \{p \in P : \pi(p) = 1\}$, the set of all +unit-price payoffs (returns). + +````{prf:lemma} Minimum second moment +:label: hr87_lemma31 + +$r^*$ has the *minimum conditional second moment* among all returns: + +$$ +\langle r^* \mid r^* \rangle_{\mathcal{G}} \leq \langle r \mid r \rangle_{\mathcal{G}} +\quad \text{for all } r \in R. +$$ + +By the Law of Iterated Expectations, + +$$ +E(r^{*2}) \leq E(r^2) \quad \text{for all } r \in R. +$$ + +This extends to unconditional second moments: $r^*$ has the minimum +unconditional second moment as well. +```` + +Let's illustrate {prf:ref}`hr87_lemma31` with a numerical example. + +We draw a lognormal $p^*$ and generate five asset payoffs that depend linearly on $p^*$ plus idiosyncratic noise. + +Prices are computed as $\pi(p_i) = E(p_i \cdot p^*)$, and returns are $r_i = p_i / \pi(p_i)$. + +We first verify the pricing equation $E(r_i \cdot p^*) = 1$ holds up to sampling error. + +We then search over portfolio weights (constrained to sum to one) to minimize the unconditional second moment $E(r^2)$. + +By {prf:ref}`hr87_lemma31`, this portfolio approximates $r^*$, so its $E(r^2)$ should be lower than that of any individual asset. + +```{code-cell} ipython3 +def simulate_sdf_and_returns(T=10000, n_assets=5, seed=42): + """Simulate lognormal SDF and multiple asset returns.""" + rng = np.random.default_rng(seed) + σ_m = 0.15 + mu_m = -0.5 * σ_m**2 + + pstar = np.exp(mu_m + σ_m * rng.standard_normal(T)) + + # Asset payoffs: p_i = α_i + β_i * p* + noise + βs = rng.uniform(-2, 2, n_assets) + αs = rng.uniform(1, 3, n_assets) + payoffs = αs + np.outer(pstar, βs) + 0.3 * rng.standard_normal((T, n_assets)) + + prices = np.mean(payoffs * pstar[:, None], axis=0) + returns = payoffs / prices + + return pstar, returns, prices + + +pstar, returns, prices = simulate_sdf_and_returns() + +pricing_errors = np.mean(returns * pstar[:, None], axis=0) - 1.0 +print("Pricing errors E[r*p* - 1] (should be approx 0):") +for i, err in enumerate(pricing_errors): + print(f" Asset {i+1}: {err:.6f}") + +n = returns.shape[1] + +def objective(w): + r_p = returns @ w + return np.mean(r_p**2) + +result = minimize(objective, np.ones(n)/n, method='SLSQP', + constraints=[{'type': 'eq', 'fun': lambda w: w.sum() - 1}], + bounds=[(-2, 2)] * n) +r_star_approx = returns @ result.x + +print(f"\nMinimum E[r^2] portfolio: {np.mean(r_star_approx**2):.6f}") +print(f"Individual asset E[r^2]: {[f'{np.mean(returns[:,i]**2):.4f}' for i in range(n)]}") +``` + +## Implications for omitting conditioning information + +Now we analyze the effect of omitting conditioning information when studying +the mean-variance implications of asset pricing models. + +### Returns, zero-price payoffs, and the decomposition of $R$ + +Define two key level sets of $\pi$: + +$$ +R = \{p \in P : \pi(p) = 1\}, \quad Z = \{p \in P : \pi(p) = 0\}. +$$ + +$R$ is the set of all **returns** (unit-price payoffs) and $Z$ is the set of +**zero-price payoffs** (excess returns, hedging payoffs, etc.). + +Since $\pi$ is a conditional linear functional, the zero payoff is in $Z$, and +{prf:ref}`hr87_assumption_24` guarantees that $R$ is nonempty. + +Using $r^*$ as a benchmark, any return $r \in R$ can be decomposed as + +$$ +r = r^* + z \quad \text{for some } z \in Z, +$$ + +since $\pi(r) = \pi(r^*) + \pi(z) = 1 + 0 = 1$. + +Because $r^*$ has minimum conditional second moment ({prf:ref}`hr87_lemma31`), +it is conditionally orthogonal to all of $Z$: +$\langle r^* \mid z \rangle_{\mathcal{G}} = 0$ for all $z \in Z$. + +This gives a conditionally orthogonal decomposition of $R$. + +### The role of $z^*$ + +The set $Z$ itself can be decomposed. + +There is a unique payoff $z^* \in Z$ that +satisfies + +$$ +\langle z \mid z^* \rangle_{\mathcal{G}} = E(z \mid \mathcal{G}) \quad \text{for all } z \in Z. +$$ + +This $z^*$ plays the role of the "conditional mean direction" in $Z$. + +Its conditional second moment equals its conditional mean: + +$$ +E(z^{*2} \mid \mathcal{G}) = E(z^* \mid \mathcal{G}), +$$ + +which implies $0 < E(z^* \mid \mathcal{G}) \leq 1$ whenever markets are not +risk-neutral. + +Using $z^*$, the set $Z$ decomposes as + +$$ +Z = \{z : z = w z^* + n \text{ for some } w \in I, \; n \in N\}, +$$ + +where $N = \{z \in Z : E(z \mid \mathcal{G}) = 0\}$. + +Combining this with the $R = r^* + Z$ decomposition gives the full +representation of all returns: + +$$ +R = \{r : r = r^* + w z^* + n \text{ for some } w \in I, \; n \in N\}. +$$ + +### The conditional mean-variance frontier + +Recall from {doc}`asset_pricing_lph` that the unconditional mean-variance frontier +can be derived from $E(mR) = 1$ via the Cauchy-Schwarz inequality. + +Here we develop the conditional counterpart using the $r^*$, $z^*$ decomposition. + +With this decomposition in hand, the conditional mean-variance problem becomes +straightforward. + +````{prf:lemma} Conditional two-fund theorem +:label: hr87_lemma33 + +Minimize $\langle r \mid r \rangle_{\mathcal{G}}$ for $r \in R$ +subject to $E(r \mid \mathcal{G}) = w$ for some target $w \in I$. + +The solution is + +$$ +r_w = r^* + w^* z^*, +$$ + +where + +$$ +w^* = \frac{w - E(r^* \mid \mathcal{G})}{E(z^* \mid \mathcal{G})}. +$$ + +Every conditionally efficient +return is a conditional linear combination of $r^*$ and $z^*$, with the +weight $w^*$ being a *random variable* that depends on the conditioning +information $\mathcal{G}$. +```` + +### The unconditional mean-variance frontier + +To connect to data, restrict attention to payoffs with finite unconditional +second moments: + +$$ +P^* = \{p \in P : E(p^2) < \infty\}, +$$ + +with the *unconditional* inner product $\langle p_1 \mid p_2 \rangle = E(p_1 p_2)$. + +Define the unconditional counterparts: + +$$ +R^* = R \cap P^*, \quad Z^* = Z \cap P^*, \quad N^* = \{z \in Z^* : E(z) = 0\}. +$$ + +By the Law of Iterated Expectations, $z^*$ continues to represent the mean +direction unconditionally: + +$$ +\langle z \mid z^* \rangle = E(z) \quad \text{for all } z \in Z^*. +$$ + +````{prf:lemma} Unconditional two-fund theorem +:label: hr87_lemma34 + +Minimize $\langle r \mid r \rangle = E(r^2)$ for $r \in R^*$ +subject to $E(r) = c$ for some constant $c$. + +The solution is + +$$ +r_c = r^* + c^* z^*, +$$ + +where + +$$ +c^* = \frac{c - E(r^*)}{E(z^*)}. +$$ + +The key difference: $c^*$ is a *constant*, while $w^*$ in the conditional +problem is a random variable. +```` + +### Conditional efficiency does not imply unconditional efficiency + +This is the central empirical message of the paper. + +A conditionally efficient return $r_w = r^* + w^* z^*$ is on the +unconditional frontier *only when* $w^*$ is constant with probability one. + +When $w^*$ varies with the state of the world -- which is the typical case +when traders use conditioning information -- the return will be off the +unconditional frontier. + +This has direct implications: + +- The *CAPM* (Sharpe-Lintner-Mossin) implies that the market portfolio is a + conditional reference return. But the market return need not be a reference + return for unconditional single-beta tests. + +- *Breeden's* consumption CAPM implies the return on aggregate consumption is + a conditional reference return. Again, it need not serve as an unconditional + reference. + +- Portfolio managers whose returns appear to be conditionally efficient may + look *inefficient* when evaluated with unconditional data. + +The following simulation illustrates this phenomenon. + +```{code-cell} ipython3 +def compute_mv_frontier(mean_returns, cov_matrix): + """Compute the analytical mean-variance frontier.""" + n = len(mean_returns) + ones = np.ones(n) + inv_cov = np.linalg.inv(cov_matrix) + + A = mean_returns @ inv_cov @ mean_returns + B = mean_returns @ inv_cov @ ones + C = ones @ inv_cov @ ones + D = A * C - B**2 + + frontier_means = np.linspace(mean_returns.min() - 0.05, + mean_returns.max() + 0.05, 300) + frontier_vars = (C * frontier_means**2 - 2*B*frontier_means + A) / D + frontier_vols = np.sqrt(np.maximum(frontier_vars, 0)) + return frontier_means, frontier_vols + + +def mv_weights(mu_vec, Sigma, target_mu): + """Minimum-variance portfolio weights for a given target mean.""" + n = len(mu_vec) + inv_cov = np.linalg.inv(Sigma) + ones = np.ones(n) + A = mu_vec @ inv_cov @ mu_vec + B = mu_vec @ inv_cov @ ones + C = ones @ inv_cov @ ones + D = A * C - B**2 + g = (A * (inv_cov @ ones) - B * (inv_cov @ mu_vec)) / D + h = (C * (inv_cov @ mu_vec) - B * (inv_cov @ ones)) / D + return g + target_mu * h +``` + +```{code-cell} ipython3 +def simulate_conditional_vs_unconditional(T=50000, seed=0): + """Show that a conditionally efficient portfolio can be unconditionally inefficient.""" + rng = np.random.default_rng(seed) + n_assets = 3 + + state = rng.integers(0, 2, T) + + # Two regimes with different conditional means, common covariance + mu_low = np.array([0.05, 0.10, 0.08]) + mu_high = np.array([0.12, 0.07, 0.09]) + + σ = np.array([0.15, 0.20, 0.18]) + corr = np.array([[1.0, 0.3, 0.5], + [0.3, 1.0, 0.2], + [0.5, 0.2, 1.0]]) + cov = np.diag(σ) @ corr @ np.diag(σ) + + rets_low = rng.multivariate_normal(mu_low, cov, T) + rets_high = rng.multivariate_normal(mu_high, cov, T) + returns = np.where(state[:, None] == 0, rets_low, rets_high) + + # Conditionally efficient weights for target mean = 0.09 + target = 0.09 + w_low = mv_weights(mu_low, cov, target) + w_high = mv_weights(mu_high, cov, target) + + # Dynamic portfolio switches weights by state + port_rets = np.where(state == 0, + rets_low @ w_low, + rets_high @ w_high) + + mu_unc = returns.mean(axis=0) + cov_unc = np.cov(returns.T) + front_mu, front_std = compute_mv_frontier(mu_unc, cov_unc) + + return (port_rets.mean(), port_rets.std(), + front_mu, front_std, mu_unc, cov_unc) + + +mu_port, std_port, front_mu, front_std, mu_unc, cov_unc = \ + simulate_conditional_vs_unconditional() +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: > + A conditionally efficient portfolio (star) lies to the left of the + constant-weight frontier built from the three primitive assets (curve). + name: fig-hr-cond-vs-uncond +--- +fig, ax = plt.subplots(figsize=(8, 5)) +ax.plot(front_std, front_mu, lw=2, + label='Primitive-asset constant-weight frontier', color='steelblue') +ax.scatter(np.sqrt(np.diag(cov_unc)), mu_unc, color='red', + zorder=5, s=60, label='Individual assets') +ax.scatter(std_port, mu_port, color='orange', zorder=6, s=150, + marker='*', label='Conditionally efficient portfolio') +ax.set_xlabel('standard deviation of return') +ax.set_ylabel('expected return') +ax.legend() +plt.tight_layout() +plt.show() +``` + +### The risk-free return + +Let's consider another example. + +When $P$ contains a unit payoff and $\pi$ has no arbitrage opportunities, there +is a risk-free return + +$$ +r^f = \frac{1}{\langle p^* \mid 1 \rangle_{\mathcal{G}}} + = \frac{\langle r^* \mid r^* \rangle_{\mathcal{G}}}{\langle r^* \mid 1 \rangle_{\mathcal{G}}}. +$$ + +In the decomposition $R = \{r^* + wz^* + n\}$, the risk-free return is +$r^f = r^* + r^f z^*$. + +Because $r^f$ is in general a *random variable* (it depends on $\mathcal{G}$), +it lies on the *conditional* frontier but will be *off* the unconditional +frontier unless $r^f$ is constant. + + +## The single-beta representation + +### Conditional CAPM + +A return $r_\beta \in R$ is a reference return for a conditional single-beta +representation conditioned on $\mathcal{G}$ if +$\Pr\{\mathrm{Var}(r_\beta \mid \mathcal{G}) = 0\} = 0$ and for all $r \in R$, + +$$ +E(r \mid \mathcal{G}) - \alpha = \frac{\mathrm{Cov}(r_\beta, r \mid \mathcal{G})} +{\mathrm{Var}(r_\beta \mid \mathcal{G})} \left[E(r_\beta \mid \mathcal{G}) - \alpha\right], +$$ + +where $\alpha \in I$ is the conditional zero-beta return. + +This is the conditional analogue of the CAPM security market line. + +````{prf:lemma} Conditional Roll's theorem +:label: hr87_lemma35 + +$r_\beta$ is a reference return +for a conditional single-beta representation if and only if +$r_\beta = r^* + w^* z^*$ for some $w^* \in I$ satisfying + +$$ +\Pr\left\{w^* = \frac{E(r^* \mid \mathcal{G})}{1 - E(z^* \mid \mathcal{G})}\right\} = 0. +$$ +```` + +### Unconditional single-beta representation + +The unconditional expected-return-beta representation was derived in +{doc}`asset_pricing_lph`. + +The key question here is: when does the *conditional* +single-beta representation survive aggregation to unconditional moments? + +````{prf:corollary} Unconditional single-beta representation +:label: hr87_cor31 + +$r_\beta$ is a reference return for an *unconditional* +single-beta representation if and only if $r_\beta = r^* + c^* z^*$ for a +*constant* $c^*$ satisfying + +$$ +c^* \neq \frac{E(r^*)}{1 - E(z^*)}. +$$ +```` + +This result has sharp empirical implications: + +- Even if the CAPM holds *conditionally* (e.g., the market portfolio is on + the conditional frontier), the standard unconditional regression test -- + regressing asset returns on market returns and testing $\alpha = 0$ -- is + testing a *different* hypothesis. + +- The unconditional single-beta representation holds only for returns built + with *constant* portfolio weights. + +We illustrate this by running CAPM regressions $r_i = \alpha + \beta \, r_{\text{ref}} + \varepsilon$ using two different reference returns. + +The first uses a portfolio on the *unconditional* mean-variance frontier, constructed with constant weights from the unconditional moments. + +By {prf:ref}`hr87_cor31`, this is a valid reference for an unconditional single-beta representation, so the regression intercepts should be consistent with the zero-beta return $\alpha$ implied by the corollary. + +The second uses a conditionally efficient portfolio whose weights switch across regimes. + +This portfolio is on the *conditional* frontier in each state, but its state-dependent weights violate the constant-weight requirement of {prf:ref}`hr87_cor31`, so the unconditional single-beta representation need not hold. + +```{code-cell} ipython3 +def capm_regression(returns, ref_return): + """Run unconditional CAPM regressions: r_i = alpha + beta * r_ref + eps.""" + n_assets = returns.shape[1] + alphas = np.empty(n_assets) + for i in range(n_assets): + slope, intercept, _, _, _ = stats.linregress(ref_return, returns[:, i]) + alphas[i] = intercept + return alphas + + +# Simulate a two-regime economy +rng = np.random.default_rng(42) +T_capm = 50000 +n_assets_capm = 4 +state = rng.integers(0, 2, T_capm) + +mu_low = np.array([0.02, 0.12, 0.04, 0.14]) +mu_high = np.array([0.14, 0.02, 0.12, 0.01]) + +cov_capm = np.array([[0.01, 0.002, 0.004, 0.001], + [0.002, 0.01, 0.002, 0.001], + [0.004, 0.002, 0.01, 0.001], + [0.001, 0.001, 0.001, 0.01]]) + +rets_low = rng.multivariate_normal(mu_low, cov_capm, T_capm) +rets_high = rng.multivariate_normal(mu_high, cov_capm, T_capm) +returns_capm = np.where(state[:, None] == 0, rets_low, rets_high) + +# Case 1: unconditional frontier portfolio (constant weights) +# Use the unconditional mean and covariance to find frontier weights +mu_unc = returns_capm.mean(axis=0) +cov_unc = np.cov(returns_capm.T) +w_frontier = np.linalg.solve(cov_unc, mu_unc) +w_frontier /= w_frontier.sum() +r_frontier = returns_capm @ w_frontier + +# Case 2: conditionally efficient portfolio (state-dependent weights) +w_low = np.linalg.solve(cov_capm, mu_low) +w_low /= w_low.sum() +w_high = np.linalg.solve(cov_capm, mu_high) +w_high /= w_high.sum() +r_dynamic = np.where(state == 0, + rets_low @ w_low, + rets_high @ w_high) + +alphas_frontier = capm_regression(returns_capm, r_frontier) +alphas_dynamic = capm_regression(returns_capm, r_dynamic) + +print(f"{'Asset':<8} {'Frontier (const)':>18} {'Cond. eff. (dyn)':>18}") +for i in range(n_assets_capm): + print(f"{i+1:<8} {alphas_frontier[i]:>18.6f} {alphas_dynamic[i]:>18.6f}") +``` + +The constant-weight frontier portfolio produces intercepts close to a common value for every asset, confirming that the unconditional single-beta representation holds as predicted by {prf:ref}`hr87_cor31`. + +In general, {prf:ref}`hr87_cor31` guarantees a real zero-beta return $\alpha$, but that $\alpha$ need not be zero -- it equals zero only under an extra normalization or for a specially chosen reference portfolio. + +The conditionally efficient portfolio, whose weights switch between regimes, produces non-zero alphas despite being on the conditional frontier in each state. + +This is exactly the gap that {cite:t}`HansenRichard1987` warns about: a return that is conditionally efficient need not serve as a valid reference for unconditional single-beta tests. + +## The pseudo-pricing function and connection to GMM + +### Constructing $\pi^*$ + +Everything we have derived so far is *conditional* on $\mathcal{G}$: the +pricing function $\pi(p) = E(p \, p^* \mid \mathcal{G})$ maps payoffs into +random variables, not numbers. + +An econometrician, however, works with time-series data and computes +sample averages, which estimate *unconditional* moments. + +The question is: how do we go from the conditional theory to restrictions that +can be tested with unconditional data? + +Hansen and Richard's solution is to define the **pseudo-pricing function** + +$$ +\pi^*(p) = E[\pi(p)] \quad \text{for all } p \in P^*. +$$ + +This function maps payoffs to *real numbers*. + +It behaves like a pricing +function where the conditioning information set is the trivial sigma-algebra +(containing only $\Omega$ and $\emptyset$). + +For $\pi^*$ to be well defined on $P^*$, the benchmark payoff $p^*$ must itself have a finite unconditional second moment, i.e., $p^* \in P^*$. + +This is the content of Assumption 4.1 in {cite:t}`HansenRichard1987`. + +Whether it holds can depend on the choice of numeraire. + +````{prf:theorem} Pseudo-pricing function +:label: hr87_thm41 + +Suppose $p^* \in P^*$ (equivalently, $E(p^{*2}) < \infty$). + +Then $(P^*, \pi^*)$ satisfies all the assumptions +imposed on $(P, \pi)$, with the trivial sigma-algebra replacing $\mathcal{G}$. +```` + +Crucially, the same $p^*$ represents both $\pi$ and $\pi^*$: + +$$ +\pi^*(p) = E[\pi(p)] = E\!\left[E(p \, p^* \mid \mathcal{G})\right] = E(p \, p^*) += \langle p \mid p^* \rangle, +$$ + +where the third equality uses the Law of Iterated Expectations and the last +is the unconditional inner product. + +Hansen and Richard show that if two pricing functions $\pi$ and $\pi^+$ agree on the **full payoff space** $P^*$, then their benchmark payoffs coincide almost surely. + +Thus conditioning down from $\pi$ to $\pi^*$ does *not* inherently lose discriminatory power. + +The loss of information arises instead when an econometrician tests moment restrictions using only a *subset* of the payoffs in $P^*$. + +Two distinct pricing functions may imply the same $\pi^*$ on that subset even though they differ on $P^*$ as a whole. + +### Connection to Hansen-Singleton GMM + +The pseudo-pricing function underlies the {cite:t}`hansen1982generalized` +econometric approach. + +If a model specifies $p^*$ as a function of observable data -- e.g., a +parametric function of consumption growth $p^* = p^*(\Delta c_{t+1}, \theta)$ +-- then the pricing restriction + +$$ +E(p \, p^*) = \pi^*(p) = E[\pi(p)] +$$ + +holds for every payoff $p \in P^*$. + +An econometrician exploits this by forming **moment conditions**. + +Multiplying +by instruments $z_t \in I$ (variables in the traders' information set) gives + +$$ +E\!\left[p_{t+1} \, p^*(\Delta c_{t+1}, \theta) \cdot z_t\right] += E\!\left[\pi(p_{t+1}) \cdot z_t\right]. +$$ + +The parameter vector $\theta$ is then estimated by GMM. + +The **choice of instruments** determines how much conditioning information is +exploited -- more instruments increase efficiency but also increase the +dimensionality of the GMM problem. + +Notice that the payoffs used in this procedure can themselves be conditional +linear combinations of primitive payoffs, as long as the conditioning weights +are measurable with respect to $\mathcal{G}$. + +This gives the analyst +flexibility in constructing the moment conditions, and corresponds to the +instrumental variables used in the Hansen-Singleton analysis. + +We now put this to work by testing a specific model of $p^*$. + +We construct a CRRA stochastic discount factor $p^* = e^{-\delta - \gamma \Delta c}$ with $\gamma = 2$ and simulated consumption growth, then check the Euler equation $E(r \cdot p^*) = 1$ against returns generated by a *different* SDF. + +The test computes the sample average of $r \cdot p^* - 1$ for each asset, along with its standard error and t-statistic. + +If the model is correct, these averages should be near zero. + +We also test the instrumented moment conditions $E[(r \cdot p^* - 1) \cdot z] = 0$ using lagged consumption growth as an instrument, which exploits additional conditioning information. + +```{code-cell} ipython3 +def gmm_euler_equation_test(pstar_hat, returns, instruments=None): + """Test Euler equation restrictions E[r*p* - 1] = 0, optionally with instruments.""" + T = len(pstar_hat) + n_assets = returns.shape[1] + + moments = returns * pstar_hat[:, None] - 1.0 + mean_m = moments.mean(axis=0) + se_m = moments.std(axis=0) / np.sqrt(T) + + print("Euler equation tests: E[r*p* - 1] = 0") + print(f"{'Asset':<8} {'Mean':>10} {'Std err':>10} {'t-stat':>10}") + for i in range(n_assets): + t = mean_m[i] / se_m[i] + print(f"{i+1:<8} {mean_m[i]:>10.5f} {se_m[i]:>10.5f} {t:>10.2f}") + + if instruments is not None: + print("\nInstrumented moments: E[(r*p* - 1)*z] = 0") + k = instruments.shape[1] + for j in range(k): + z = instruments[:, j] + inst_m = moments * z[:, None] + mean_inst = inst_m.mean(axis=0) + se_inst = inst_m.std(axis=0) / np.sqrt(T) + print(f" Instrument {j+1}:") + for i in range(n_assets): + t = mean_inst[i] / se_inst[i] + print(f" Asset {i+1}: mean={mean_inst[i]:.5f}, " + f"se={se_inst[i]:.5f}, t={t:.2f}") + + +T = 10000 +rng = np.random.default_rng(7) +δ, γ = 0.02, 2.0 +c_growth = rng.normal(0.02, 0.03, T) +pstar_model = np.exp(-δ - γ * c_growth) +pstar_model /= pstar_model.mean() + +_, returns_data, _ = simulate_sdf_and_returns(T=T) + +instruments = c_growth[:-1].reshape(-1, 1) +gmm_euler_equation_test(pstar_model[1:], returns_data[1:], instruments) +``` + +The large t-statistics reject the Euler equations for every asset, both with and without instruments. + +This is by construction: the returns were generated by `simulate_sdf_and_returns`, which uses a lognormal SDF with $\sigma_m = 0.15$, while the CRRA model being tested has $\gamma = 2$ and $\sigma_c = 0.03$, implying far less SDF volatility. + +The two SDFs do not match, so the moment conditions $E(r \cdot p^* - 1) = 0$ fail. + +This illustrates how GMM tests can detect misspecification: when the proposed $p^*$ is not the true pricing kernel, the Euler equation restrictions are violated and the data reject the model. + +## Summary + +The main contributions of {cite:t}`HansenRichard1987` are: + +1. **Conditional Riesz Representation**: Every admissible pricing function can + be written as $\pi(p) = E(p \, p^* \mid \mathcal{G})$ for a unique SDF $p^*$. + Different models of asset prices are indexed by their $p^*$. + +2. **Conditional mean-variance frontier**: The frontier is spanned by $r^*$ + and $z^*$, giving a conditional two-fund theorem with *random* weights. + +3. **Conditional vs unconditional frontiers**: A return can be on the + conditional frontier without being on the unconditional frontier. The + unconditional frontier uses constant weights $c^*$; the conditional + frontier uses random weights $w^*$. The gap matters for empirical tests. + +4. **Single-beta representation**: The conditional CAPM extends Roll's + characterization to conditioning information. The unconditional version + holds only for returns with constant portfolio weights -- so testing the + CAPM with unconditional regressions tests a weaker hypothesis. + +5. **Pseudo-pricing function**: $\pi^*(p) = E[\pi(p)] = E(p \, p^*)$ maps + payoffs to real numbers and connects directly to {cite:t}`hansen1982generalized` + GMM estimation. + On the full payoff space $P^*$, two pricing functions that imply the same + $\pi^*$ must share the same benchmark payoff $p^*$. + + - The loss of discriminatory power comes from testing only a *subset* of + payoffs, not from conditioning down per se. + +## Exercises + +```{exercise} +:label: hr87_ex1 + +The benchmark return $r^*$ minimizes the unconditional second moment +$E(r^2)$ over the set $R^*$ of returns with finite unconditional second moments. + +(a) Starting from the decomposition $r = r^* + z$ for some $z \in Z$, and + using the fact that $r^*$ is conditionally orthogonal to $Z$, show that + +$$ +E(r^2) = E(r^{*2}) + E(z^2) \geq E(r^{*2}). +$$ + +(b) Write a Python function that, given a vector of asset returns and an + estimate of the SDF $p^*$, computes the return closest to $r^*$ (the one + with minimum sample second moment), and verifies numerically that it has + a smaller second moment than all other returns in $R^*$. +``` + +```{solution-start} hr87_ex1 +:class: dropdown +``` + +**(a) Analytical proof** + +Any return $r \in R$ can be written as $r = r^* + z$ for some $z \in Z^*$ +(where $Z^* = Z \cap P^*$). + +Since $r^*$ is conditionally orthogonal to $Z$ -- meaning +$\langle r^* \mid z \rangle_{\mathcal{G}} = E(r^* z \mid \mathcal{G}) = 0$ for all $z \in Z$ +-- taking unconditional expectations gives $E(r^* z) = 0$. + +Therefore, + +$$ +E(r^2) = E[(r^* + z)^2] = E(r^{*2}) + 2 E(r^* z) + E(z^2) + = E(r^{*2}) + E(z^2) \geq E(r^{*2}), +$$ + +with equality if and only if $z = 0$, i.e., $r = r^*$. + +**(b) Numerical verification** + +```{code-cell} ipython3 +def find_min_second_moment_return(returns): + """Find the portfolio minimizing E[r^2] -- the empirical analogue of r*.""" + n = returns.shape[1] + + def objective(w): + return np.mean((returns @ w)**2) + + result = minimize(objective, np.ones(n)/n, method='SLSQP', + constraints=[{'type': 'eq', 'fun': lambda w: w.sum() - 1}], + bounds=[(-2, 2)] * n) + r_star = returns @ result.x + + print(f"Minimum second-moment portfolio:") + print(f" E[r*^2] = {np.mean(r_star**2):.6f}") + print(f"\nSecond moments of individual assets:") + for i in range(n): + e2 = np.mean(returns[:, i]**2) + check = "yes" if e2 >= np.mean(r_star**2) - 1e-10 else "no" + print(f" Asset {i+1}: E[r^2] = {e2:.6f} " + f"(>= E[r*^2]: {check})") + return result.x + + +pstar_sim, returns_sim, _ = simulate_sdf_and_returns(T=10000) +w_star = find_min_second_moment_return(returns_sim) +``` + +```{solution-end} +``` \ No newline at end of file diff --git a/lectures/info_projection.md b/lectures/info_projection.md index 04b57951..4926ae8a 100644 --- a/lectures/info_projection.md +++ b/lectures/info_projection.md @@ -33,9 +33,9 @@ This lecture presents the analysis in {cite}`sargent1976econometric`, which exam The topic involves three estimators: -- **Cagan's estimator** — {cite:t}`Cagan` regresses real balances on past inflation rates, and is consistent when its orthogonality condition holds. -- **Jacobs' estimator** — {cite:t}`jacobs1975difficulty` inverts the equation and regresses real balances on past money growth rates, and is consistent only if money is exogenous. -- **Sargent's critique** — uses a rational expectations model to show that Jacobs' estimator is biased when money is not exogenous, and predicts its population limit {cite}`sargent1976econometric`. +- **Cagan's estimator** -- {cite:t}`Cagan` regresses real balances on past inflation rates, and is consistent when its orthogonality condition holds. +- **Jacobs' estimator** -- {cite:t}`jacobs1975difficulty` inverts the equation and regresses real balances on past money growth rates, and is consistent only if money is exogenous. +- **Sargent's critique** -- uses a rational expectations model to show that Jacobs' estimator is biased when money is not exogenous, and predicts its population limit {cite}`sargent1976econometric`. The key computational technique is **information projection** via the Wiener–Kolmogorov formula, which computes the optimal linear least-squares projection of one covariance-stationary process onto current and past values of another. @@ -112,7 +112,7 @@ then {eq}`eq:orthogonality_cond` generally *fails*. When money does not respond to disturbances $u_t$, the price level -must absorb them — creating a correlation between $p_t$ and $u_t$ that +must absorb them -- creating a correlation between $p_t$ and $u_t$ that invalidates Cagan's orthogonality condition. ## Jacobs' estimator @@ -597,9 +597,9 @@ The table below reproduces the estimates from {cite}`jacobs1975difficulty` as re | Country | $k$ (Jacobs) | $\hat{\delta}$ | |-----------|:-----------:|:-------------------:| | Austria | 0.143 | 0.87 | -| Germany | −0.131 | 1.14 | -| Greece | −0.262 | 1.30 | -| Hungary | −0.199 | 1.22 | +| Germany | -0.131 | 1.14 | +| Greece | -0.262 | 1.30 | +| Hungary | -0.199 | 1.22 | | Poland | 0.139 | 0.87 | | Russia | 0.857 | 0.43 | diff --git a/lectures/risk_aversion_or_mistaken_beliefs.md b/lectures/risk_aversion_or_mistaken_beliefs.md new file mode 100644 index 00000000..81f3b13e --- /dev/null +++ b/lectures/risk_aversion_or_mistaken_beliefs.md @@ -0,0 +1,1836 @@ +--- +jupytext: + text_representation: + extension: .md + format_name: myst +kernelspec: + display_name: Python 3 + language: python + name: python3 +--- + +(risk_aversion_or_mistaken_beliefs)= +```{raw} html + +``` + +# Risk Aversion or Mistaken Beliefs? + +## Overview + +This lecture explores how *risk aversion* and *mistaken beliefs* are confounded in asset pricing data. + +In a rational expectations equilibrium containing a risk-averse representative investor, higher mean returns compensate for higher risks. + +But in a non-rational expectations model in which a representative investor holds beliefs that differ from "the econometrician's", observed average returns depend on *both* risk aversion *and* misunderstood return distributions. + +```{note} +Whether beliefs are 'correct' or 'wrong' is itself subjective -- it depends on an observer's point of view. +``` + +Wrong beliefs contribute what look like "stochastic discount factor shocks" when viewed from the perspective of an econometrician who trusts his model. + +Such divergent beliefs can potentially explain what look like countercyclical risk price from the perpective of someone who trusts the +econometrician's model. + +A key building block of this model will be an econometrician's model that takes the form of a linear state-space model driven by Gaussian disturbances. + +This model will play two key roles: + +* it forms the perspective from which 'mistaken' beliefs and their consequences are viewed +* it forms the 'baseline' model of a dubious representative agent who distrusts it and wants to value assets payoffs by using alternative models that +seem to fit the historical data about as well as does the 'baseline' model. + +```{note} +When we discuss the setup with a twisted entry ball below, the econometrician's model will be one of *two* baseline models that the distrustful agent is concerned about. +``` + +We'll formalize different beliefs in terms of divergent probability distributions. + +It is convenient to characterize those differences in terms of a likelihood ratio process, the object studied in this quantecon lecture +{doc}`intermediate:likelihood_ratio_process`. + +Thus, we'll organize this lecture around a single mathematical device, namely, a **likelihood ratio**, a non-negative random variable with unit mean that twists one probability distribution into another. + +Likelihood ratios, equivalently multiplicative martingale increments, appear in at least four distinct roles in modern asset pricing: + +| Probability | Likelihood ratio | Describes | +|:--------------|:----------------------------------|:----------------------| +| Econometric | $1$ (no twist) | macro risk factors | +| Risk neutral | $m_{t+1}^\lambda$ | prices of risks | +| Mistaken | $m_{t+1}^w$ | experts' forecasts | +| Doubtful | $m_{t+1} \in \mathcal{M}$ | misspecification fears| + +Each of the key likelihood ratios in this lecture takes the log-normal form +$m_{t+1}^b = \exp(-b_t^\top \varepsilon_{t+1} - \frac{1}{2} b_t^\top b_t)$ +with $b_t = 0$, $\lambda_t$, $w_t$, or a worst-case distortion. + +The lecture draws primarily on three lines of work: + +1. {cite:t}`Lucas1978` and {cite:t}`hansen1983stochastic`: a representative investor's risk + aversion generates a likelihood ratio that prices risks. +2. {cite:t}`piazzesi2015trend`: survey data on professional forecasters + decompose the likelihood ratio into a smaller risk price and a belief distortion. +3. {cite:t}`hansen2020twisted` and {cite:t}`szoke2022estimating`: robust control theory + constructs twisted probability models from tilted discounted entropy balls to + price model uncertainty, generating state-dependent uncertainty prices that + explain puzzling term-structure movements. + +We start with some standard imports: + +```{code-cell} ipython3 +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from scipy.linalg import solve_discrete_lyapunov +from numpy.linalg import eigvals, norm +from scipy.stats import norm as normal_dist +``` + +## Likelihood ratios and twisted densities + + +Let $\varepsilon$ denote a vector of risks to be taken and priced. + +Under the econometrician's probability model, $\varepsilon$ has a standard multivariate normal density: + +```{math} +:label: eq_baseline + +\phi(\varepsilon) \propto \exp \left(-\frac{1}{2} \varepsilon^\top\varepsilon\right), \qquad \varepsilon \sim \mathcal{N}(0, I) +``` + +To twist this baseline density into another one, we multiply it by a non-negative random variable with unit mean called a **likelihood ratio**: + +```{math} +:label: eq_lr + +m(\varepsilon) = \exp \left(-\lambda^\top\varepsilon - \frac{1}{2} \lambda^\top\lambda\right) \geq 0 +``` + +The quadratic term $-\frac{1}{2}\lambda^\top\lambda$ in the exponent is precisely what guarantees $E m(\varepsilon) = 1$ when the mathematical expectation $E$ is taken with respect to the econometrician's model. + +Multiplying the baseline density by this likelihood ratio produces the **twisted density**: + +```{math} +:label: eq_twisted + +\hat\phi(\varepsilon) = m(\varepsilon) \phi(\varepsilon) \propto \exp \left(-\frac{1}{2}(\varepsilon + \lambda)^\top(\varepsilon + \lambda)\right) +``` + +Completing the square in the exponent reveals that this is a $\mathcal{N}(-\lambda, I)$ density. + +The likelihood ratio has shifted the mean of $\varepsilon$ from $0$ to $-\lambda$ while preserving the covariance. + +We will see this idea repeatedly in different contexts. + +````{exercise} +:label: lr_exercise_1 + +Verify that: + +1. $E m(\varepsilon) = 1$ by computing $\int m(\varepsilon) \phi(\varepsilon) d\varepsilon$ using the moment-generating function of a standard normal. +2. The twisted density $\hat\phi(\varepsilon) = m(\varepsilon) \phi(\varepsilon)$ is indeed $\mathcal{N}(-\lambda, I)$ by combining exponents: + +$$ +m(\varepsilon) \phi(\varepsilon) \propto \exp \left(-\lambda^\top\varepsilon - \tfrac{1}{2}\lambda^\top\lambda\right) \exp \left(-\tfrac{1}{2}\varepsilon^\top\varepsilon\right) = \exp \left(-\tfrac{1}{2}\bigl[\varepsilon^\top\varepsilon + 2\lambda^\top\varepsilon + \lambda^\top\lambda\bigr]\right) +$$ + +and complete the square to obtain $-\frac{1}{2}(\varepsilon + \lambda)^\top(\varepsilon + \lambda)$. + +```` + +````{solution} lr_exercise_1 +:class: dropdown + +For part 1, write $E m(\varepsilon) = \int \exp(-\lambda^\top\varepsilon - \tfrac{1}{2}\lambda^\top\lambda) \phi(\varepsilon) d\varepsilon = \exp(-\tfrac{1}{2}\lambda^\top\lambda) E[\exp(-\lambda^\top\varepsilon)]$. + +The moment-generating function of $\varepsilon \sim \mathcal{N}(0,I)$ (or expectation of the log-normal random variable) gives $E[\exp(-\lambda^\top\varepsilon)] = \exp(\tfrac{1}{2}\lambda^\top\lambda)$. + +So $E m(\varepsilon) = \exp(-\tfrac{1}{2}\lambda^\top\lambda)\exp(\tfrac{1}{2}\lambda^\top\lambda) = 1$. + +For part 2, combine the exponents: + +$$ +m(\varepsilon) \phi(\varepsilon) \propto \exp \left(-\tfrac{1}{2}\varepsilon^\top\varepsilon - \lambda^\top\varepsilon - \tfrac{1}{2}\lambda^\top\lambda\right) +$$ + +Recognise the argument as $-\tfrac{1}{2}(\varepsilon^\top\varepsilon + 2\lambda^\top\varepsilon + \lambda^\top\lambda) = -\tfrac{1}{2}(\varepsilon + \lambda)^\top(\varepsilon + \lambda)$. + +This is the kernel of a $\mathcal{N}(-\lambda, I)$ density. + +```` + +### Relative entropy + +How far apart are the baseline and twisted densities? + +The **relative entropy** (Kullback-Leibler divergence) answers this question with a single number: + +```{math} +:label: eq_entropy + +E\bigl[m(\varepsilon)\log m(\varepsilon)\bigr] = \frac{1}{2} \lambda^\top\lambda +``` + +Because it equals half the squared length of $\lambda$, larger distortion vectors correspond to greater statistical distance between the two models. + +The vector $\lambda$ is the key object. + +Depending on context it represents *risk prices*, *belief distortions*, or *worst-case mean perturbations* under model uncertainty. + +For illustration, consider the scalar case $\varepsilon \in \mathbb{R}$ with $\lambda = 1.5$. + +```{code-cell} ipython3 +ε = np.linspace(-5, 5, 500) +λ_val = 1.5 + +ϕ_base = normal_dist.pdf(ε, 0, 1) +m_lr = np.exp(-λ_val * ε - 0.5 * λ_val**2) +ϕ_twist = m_lr * ϕ_base + +fig, axes = plt.subplots(1, 3, figsize=(14, 4)) + +axes[0].plot(ε, ϕ_base, 'steelblue', lw=2) +axes[0].set_xlabel(r"$\varepsilon$") + +axes[1].plot(ε, m_lr, 'firebrick', lw=2) +axes[1].axhline(1, color='grey', lw=0.8, ls='--') +axes[1].set_xlabel(r"$\varepsilon$") + +axes[2].plot(ε, ϕ_base, 'steelblue', lw=2, + ls='--', alpha=0.6, label='Baseline') +axes[2].plot(ε, ϕ_twist, 'firebrick', lw=2, + label='Twisted') +axes[2].set_xlabel(r"$\varepsilon$") +axes[2].legend() + +axes[0].set_ylabel("Density") +axes[1].set_ylabel("Likelihood ratio") +axes[2].set_ylabel("Density") +plt.tight_layout() +plt.show() +``` + +The left panel shows the baseline $\mathcal{N}(0,1)$ density. + +The middle panel shows the likelihood ratio $m(\varepsilon)$, which up-weights negative $\varepsilon$ values and down-weights positive ones when $\lambda > 0$. + +The right panel shows the resulting twisted density $\hat\phi(\varepsilon) = \mathcal{N}(-\lambda, 1)$. + + +## The econometrician's state-space model + +The econometrician works with a linear Gaussian state-space system at a *monthly* frequency. + +The state $x_t$, an augmented $n \times 1$ vector, evolves according to: + +```{math} +:label: eq_state + +x_{t+1} = A x_t + C \varepsilon_{t+1} +``` + +The econometrician observes $y_{t+1}$, which is a noisy linear function of the state and the same shocks: + +```{math} +:label: eq_obs + +y_{t+1} = D x_t + G \varepsilon_{t+1} +``` + +The $k \times 1$ shock vector driving both equations is i.i.d. standard normal: + +```{math} +:label: eq_shocks + +\varepsilon_{t+1} \sim \mathcal{N}(0, I) +``` + +To accommodate constant terms easily, we assume that the first entry in the state vector is a constant $1$ so that + +$$ +x_t = \begin{bmatrix} 1 \\ \check{x}_t \end{bmatrix}, \qquad +A = \begin{bmatrix} 1 & 0 \\ 0 & \check{A} \end{bmatrix}, \qquad +C = \begin{bmatrix} 0 \\ \check{C} \end{bmatrix} +$$ + +where $\check{A}$ is a stable matrix, $\check{C}$ is square and invertible, and the first component of $x_0$ is $1$. + +With this convention, the number of shocks equals the dimension of the stochastic block: $k = n - 1$. + +Whenever we back out distortion coefficient matrices from alternative transition matrices, we invert only the lower block $\check{C}$ and compare the lower rows of the augmented transition matrices, not the full augmented matrix $C$. + +Whenever we refer to stability below, we mean stability of the stochastic block $\check{A}$ (or its distorted counterpart). + +The observation $y_{t+1}$ represents consumption growth, $c_{t+1} - c_t = D x_t + G \varepsilon_{t+1}$. + +Separately, the risk-free one-period interest rate is a linear function of the augmented state: + +$$ +r_t = \bar{r}^\top x_t +$$ + +```{figure} /_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fig2_tom.png +The econometrician's model: estimated state dynamics. +``` + + +## Asset pricing with likelihood ratios + +### Risk-neutral rational expectations pricing + +The simplest benchmark is a risk-neutral representative investor with rational expectations, for whom the stock price $p_t$ (the ex-dividend market value of a claim to the stream $\{d_{t+j}\}_{j=1}^\infty$) is simply the discounted expected payoff: + +$$ +p_t = \exp(-r_t) E_t(p_{t+1} + d_{t+1}) +$$ + +The same logic applies maturity by maturity to the term structure: a zero-coupon risk-free claim to one dollar at time $t+n$ is priced by iterating the one-period discounting: + +```{math} +:label: eq_rn_recursion + +p_t(1) = \exp(-r_t), \qquad p_t(n+1) = \exp(-r_t) E_t p_{t+1}(n), \qquad p_t(n) = \exp(\bar{A}_n^{RN} + B_n^{RN} x_t) +``` + +The last equality states that bond prices take an **exponential-affine** form in the state. + +This is a consequence of the linear Gaussian structure and can be verified by substituting the guess into the recursion and matching coefficients (see Exercise 3 in {doc}`Affine Models of Asset Prices `). + +These formulas work "pretty well" for conditional means but less well for conditional variances, i.e. the Shiller *volatility puzzles*. + +### Modern asset pricing: adding risk aversion + +It would be convenient if versions of the same pricing formulas worked even when investors are risk averse or hold distorted beliefs. + +The likelihood ratio makes this possible. + +We now promote the static vector $\lambda$ from {eq}`eq_lr` to a *state-dependent* risk price vector. + +With a slight abuse of notation, we now let $\lambda$ denote a $k \times n$ **matrix** of risk price coefficients, so that $\lambda_t = \lambda x_t$ is a $k \times 1$ vector at each date $t$. + +In the code below, this matrix is the parameter `Λ`. + +The likelihood ratio increment is + +```{math} +:label: eq_sdf_lr + +m_{t+1}^\lambda = \exp \left(-\lambda_t^\top \varepsilon_{t+1} - \frac{1}{2} \lambda_t^\top\lambda_t\right), \qquad \lambda_t = \lambda x_t +``` + +with $E_t m_{t+1}^\lambda = 1$ and $m_{t+1}^\lambda \geq 0$. + +The likelihood ratio $m_{t+1}^\lambda$ distorts the conditional distribution of $\varepsilon_{t+1}$ from $\mathcal{N}(0,I)$ to $\mathcal{N}(-\lambda x_t, I)$. + +Covariances of returns with $m_{t+1}^\lambda$ affect mean returns. + +This is the channel through which risk aversion prices risks. + +With this device, *modern asset pricing* replaces the ordinary conditional expectation with one that is tilted by the likelihood ratio. + +For stocks, the Lucas-Hansen pricing equation discounts the next-period payoff under the distorted measure: + +```{math} +:label: eq_stock_lr + +p_t = \exp(-r_t) E_t\bigl(m_{t+1}^\lambda (p_{t+1} + d_{t+1})\bigr) +``` + +For the term structure, Dai-Singleton-Backus-Zin pricing applies the same distortion recursively across maturities: + +```{math} +:label: eq_ts_lr + +p_t(1) = \exp(-r_t), \qquad p_t(n+1) = \exp(-r_t) E_t\bigl(m_{t+1}^\lambda p_{t+1}(n)\bigr), \qquad p_t(n) = \exp(\bar{A}_n + B_n x_t) +``` + +The coefficients $\bar{A}_n$, $B_n$ here differ from the risk-neutral coefficients $\bar{A}_n^{RN}$, $B_n^{RN}$ in {eq}`eq_rn_recursion` because the likelihood ratio modifies the recursion. + +### Risk-neutral dynamics + +The risk-neutral representation implies **twisted dynamics**. + +Under the twisted measure, define $\tilde\varepsilon_{t+1} := \varepsilon_{t+1} + \lambda_t = \varepsilon_{t+1} + \lambda x_t$. + +Since $\varepsilon_{t+1} \sim \mathcal{N}(0, I)$ under the econometrician's measure, $\tilde\varepsilon_{t+1}$ has mean $\lambda x_t$ and is *not* standard normal under that measure. + +However, the likelihood ratio $m_{t+1}^\lambda \propto \exp\bigl(-(\lambda x_t)^\top \varepsilon_{t+1} - \tfrac{1}{2}\|\lambda x_t\|^2\bigr)$ tilts the probability measure in exactly the right way to absorb this shift. + +By the standard exponential tilting result for Gaussian models, $\tilde\varepsilon_{t+1} \sim \mathcal{N}(0, I)$ under the risk-neutral measure. + +Substituting $\varepsilon_{t+1} = \tilde\varepsilon_{t+1} - \lambda x_t$ into {eq}`eq_state` gives: + +```{math} +:label: eq_rn_dynamics + +x_{t+1} = (A - C\lambda) x_t + C \tilde\varepsilon_{t+1}, \qquad \tilde\varepsilon_{t+1} \sim \mathcal{N}(0,I) +``` + +The dependence of $\lambda_t = \lambda x_t$ on the state modifies the dynamics relative to the econometrician's model. + +### Expectation under a twisted distribution + +A useful notational shorthand captures the connection between the two measures: the expected value of $y_{t+1}$ under the twisted distribution can be computed as a likelihood-ratio-weighted average under the original distribution: + +$$ +\tilde{E}_t y_{t+1} = E_t m_{t+1} y_{t+1} +$$ + +With this notation, the term structure recursion under risk-neutral dynamics takes a particularly clean form: + +$$ +p_t(1) = \exp(-r_t), \qquad p_t(n+1) = \exp(-r_t) \tilde{E}_t p_{t+1}(n), \qquad p_t(n) = \exp(\tilde{\bar{A}}_n + \tilde{B}_n x_t) +$$ + +These are the same formulas as rational-expectations asset pricing, but expectations are taken with respect to a probability measure *twisted by risk aversion*. + +The derivation of the recursive bond price coefficients is the same as in Exercise 3 of {doc}`Affine Models of Asset Prices `, applied here under the risk-neutral dynamics {eq}`eq_rn_dynamics`. + +Now let's implement the state-space model and its asset pricing implications. + +```{code-cell} ipython3 +class LikelihoodRatioModel: + """ + Gaussian state-space model with an augmented constant state. + + x_{t+1} = A x_t + C ε_{t+1}, ε ~ N(0,I), x_t[0] = 1 + y_{t+1} = D x_t + G ε_{t+1} + r_t = r_bar'x_t, λ_t = Λ x_t + """ + + def __init__(self, A, C, D, G, r_bar, Λ): + self.A = np.atleast_2d(A).astype(float) + self.C = np.atleast_2d(C).astype(float) + self.D = np.atleast_2d(D).astype(float) + self.G = np.atleast_2d(G).astype(float) + self.r_bar = np.asarray(r_bar, dtype=float) + self.Λ = np.atleast_2d(Λ).astype(float) + self.n = self.A.shape[0] + self.k = self.C.shape[1] + # risk-neutral dynamics + self.A_Q = self.A - self.C @ self.Λ + self.A_core = self.A[1:, 1:] + self.A_Q_core = self.A_Q[1:, 1:] + + def short_rate(self, x): + return self.r_bar @ x + + def risk_prices(self, x): + return self.Λ @ x + + def relative_entropy(self, x): + λ = self.risk_prices(x) + return 0.5 * λ @ λ + + def bond_coefficients(self, n_max): + """Bond price coefficients: log p_t(n) = A_bar_n + B_n' x_t.""" + A_bar = np.zeros(n_max + 1) + B = np.zeros((n_max + 1, self.n)) + B[1] = -self.r_bar + CCt = self.C @ self.C.T + for nn in range(1, n_max): + A_bar[nn + 1] = A_bar[nn] + 0.5 * B[nn] @ CCt @ B[nn] + B[nn + 1] = self.A_Q.T @ B[nn] - self.r_bar + return A_bar, B + + def yields(self, x, n_max): + """Yield curve: y_t(n) = -(A_bar_n + B_n'x_t) / n.""" + A_bar, B = self.bond_coefficients(n_max) + return np.array([(-A_bar[n] - B[n] @ x) / n + for n in range(1, n_max + 1)]) + + def simulate(self, x0, T, rng=None): + """Simulate under the econometrician's model.""" + if rng is None: + rng = np.random.default_rng(0) + X = np.zeros((T + 1, self.n)) + X[0] = x0 + for t in range(T): + X[t + 1] = self.A @ X[t] + self.C @ rng.standard_normal(self.k) + X[t + 1, 0] = 1.0 + return X + + def simulate_twisted(self, x0, T, rng=None): + """Simulate under the risk-neutral (twisted) model.""" + if rng is None: + rng = np.random.default_rng(0) + X = np.zeros((T + 1, self.n)) + X[0] = x0 + for t in range(T): + X[t + 1] = self.A_Q @ X[t] + self.C @ rng.standard_normal(self.k) + X[t + 1, 0] = 1.0 + return X + + +def augment_state_space(A_core, C_core, D_core, G, r_bar_core, r_const=0.0): + """Add a leading constant state x0_t = 1 to a linear Gaussian model.""" + A_core = np.atleast_2d(A_core).astype(float) + C_core = np.atleast_2d(C_core).astype(float) + D_core = np.atleast_2d(D_core).astype(float) + G = np.atleast_2d(G).astype(float) + r_bar_core = np.asarray(r_bar_core, dtype=float) + + n_core = A_core.shape[0] + k = C_core.shape[1] + + A = np.eye(n_core + 1) + A[1:, 1:] = A_core + + C = np.zeros((n_core + 1, k)) + C[1:, :] = C_core + + D = np.hstack([np.zeros((D_core.shape[0], 1)), D_core]) + r_bar = np.concatenate(([r_const], r_bar_core)) + return A, C, D, G, r_bar + + +def augment_state(x_core): + """Augment a stochastic state vector with a leading constant 1.""" + return np.concatenate(([1.0], np.asarray(x_core, dtype=float))) +``` + +### Example: a two-factor model + +We set up a two-factor model with a persistent "level" factor and a +less persistent "slope" factor, mimicking the U.S. yield curve. + +```{code-cell} ipython3 +A_core = np.array([[0.97, -0.03], + [0.00, 0.90]]) + +C_core = np.array([[0.007, 0.000], + [0.000, 0.010]]) + +D_core = np.array([[0.5, 0.3]]) # consumption growth loading +G = np.array([[0.004, 0.003]]) # consumption shock loading + +r_const = 0.004 # short rate intercept (~4.8% annual) +r_bar_core = np.array([0.06, 0.04]) # short rate loading + +# Augment with a leading constant state +A, C, D, G, r_bar = augment_state_space( + A_core, C_core, D_core, G, r_bar_core, r_const=r_const +) + +# Risk prices: no constant loading in this benchmark calibration +Λ = np.hstack([ + np.zeros((2, 1)), + np.array([[-3.0, 0.0], + [ 0.0, -6.0]]) +]) + +model = LikelihoodRatioModel(A, C, D, G, r_bar, Λ) + +print(f"Eigenvalues of check(A): {eigvals(model.A_core).round(4)}") +print(f"Eigenvalues of check(A_Q): {eigvals(model.A_Q_core).round(4)}") +assert all(np.abs(eigvals(model.A_Q_core)) < 1), "check(A_Q) must be stable!" +``` + +The yield curve's shape depends on the current state $x_t$. + +We evaluate the model at three representative states to see how the two factors, level and slope, generate upward-sloping, relatively flat, and inverted yield curves. + +```{code-cell} ipython3 +n_max = 120 +maturities = np.arange(1, n_max + 1) + +states = { + "Normal (upward-sloping)": augment_state(np.array([-0.005, -0.015])), + "Relatively flat": augment_state(np.array([ 0.008, -0.005])), + "Inverted": augment_state(np.array([ 0.020, 0.010])), +} + +fig, ax = plt.subplots(figsize=(9, 5)) +for label, x in states.items(): + y = model.yields(x, n_max) * 1200 # annualise (monthly, x1200) + ax.plot(maturities, y, lw=2, label=label) + +ax.set_xlabel("Maturity (months)") +ax.set_ylabel("Yield (annualised %)") +ax.legend() +plt.tight_layout() +plt.show() +``` + +### Econometrician's model vs. risk-neutral model + +The econometrician estimates state dynamics under the **physical measure** $P$, which governs the actual data-generating process: + +$$ +x_{t+1} = A x_t + C \varepsilon_{t+1}, \qquad \varepsilon_{t+1} \sim \mathcal{N}(0,I) \text{ under } P +$$ + +The **risk-neutral measure** $Q$ is the probability distribution twisted by the likelihood ratio $m_{t+1}^\lambda$. + +Under $Q$, the state evolves as + +$$ +x_{t+1} = A_Q x_t + C \tilde\varepsilon_{t+1}, \qquad \tilde\varepsilon_{t+1} \sim \mathcal{N}(0,I) \text{ under } Q +$$ + +where $A_Q = A - C\lambda$. + +The two measures agree on which events are possible but disagree on their probabilities. + +The physical measure $P$ governs forecasting and estimation, while the risk-neutral measure $Q$ governs asset pricing. + +```{code-cell} ipython3 +print("A:\n", model.A) +print("\nA_Q = A - CΛ:\n", model.A_Q) +print(f"\nEigenvalues of check(A): {eigvals(model.A_core).round(4)}") +print(f"Eigenvalues of check(A_Q): {eigvals(model.A_Q_core).round(4)}") +``` + +To see the difference in action, we simulate both models from the same initial state using the same shock sequence. + +Both simulations draw the same standard normal random vectors, but the transition matrices $A$ and $A_Q$ govern how those shocks cumulate over time. + +```{code-cell} ipython3 +T = 300 +x0 = augment_state(np.array([0.01, 0.005])) +rng1 = np.random.default_rng(123) +rng2 = np.random.default_rng(123) # same seed for comparability + +X_econ = model.simulate(x0, T, rng=rng1) +X_rn = model.simulate_twisted(x0, T, rng=rng2) + +fig, axes = plt.subplots(2, 1, figsize=(10, 7), sharex=True) +for idx, (ax, lab) in zip([1, 2], zip(axes, ["Level factor", "Slope factor"])): + ax.plot(X_econ[:, idx], 'steelblue', lw=2, + label="Econometrician (P)") + ax.plot(X_rn[:, idx], 'firebrick', lw=2, + alpha=0.8, label="Risk-neutral (Q)") + ax.set_ylabel(lab) + ax.legend() + +axes[1].set_xlabel("Period") +plt.tight_layout() +plt.show() +``` + +Both factors are more persistent under $Q$ than under $P$: the eigenvalues of the stochastic block $\check{A}_Q$ are closer to unity than those of $\check{A}$. + +The risk-neutral paths (red) exhibit wider swings and slower mean reversion. + +This is because the negative risk prices $\lambda$ in our calibration make $A_Q = A - C\lambda$ larger, slowing the rate at which factors revert to zero. + +The gap between the $P$ and $Q$ dynamics is precisely what generates a term premium in bond yields, because long bonds are priced under $Q$, where risks look more persistent. + +## An identification challenge + +The risk price vector $\lambda_t = \lambda x_t$ can be interpreted as either: + +- a **risk price vector** expressing the representative agent's risk aversion, or +- the representative agent's **belief distortion** relative to the econometrician's + model. + +Because the pricing formulas {eq}`eq_stock_lr` to {eq}`eq_ts_lr` depend only on the composite $\lambda_t$, not on whether it reflects risk aversion or belief distortion, the two interpretations produce identical asset prices and econometric fits. + +> Relative to the model of a risk-averse representative investor with rational +> expectations, a model of a risk-neutral investor with appropriately mistaken +> beliefs produces *observationally equivalent* predictions. + +This insight was articulated by {cite:t}`HST_1999` and +{cite:t}`piazzesi2015trend`. + +To distinguish risk aversion from belief distortion, one needs either +*more information* (the PSS approach using survey data) or *more theory* +(the Hansen-Szőke robust control approach), or both (the {cite:t}`szoke2022estimating` approach). + +```{code-cell} ipython3 +x_test = augment_state(np.array([0.01, 0.005])) +y_risk_averse = model.yields(x_test, 60) * 1200 + +# Mistaken belief model +model_mistaken = LikelihoodRatioModel( + A=model.A_Q, C=C, D=D, G=G, + r_bar=r_bar, Λ=np.zeros_like(Λ) +) +y_mistaken = model_mistaken.yields(x_test, 60) * 1200 + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.plot(np.arange(1, 61), y_risk_averse, 'steelblue', lw=2, + label='Risk averse + rational expectations') +ax.plot(np.arange(1, 61), y_mistaken, 'firebrick', lw=2, ls='--', + label='Risk neutral + mistaken beliefs') +ax.set_xlabel("Maturity (months)") +ax.set_ylabel("Yield (annualised %)") +ax.legend() +plt.tight_layout() +plt.show() +``` + +The two yield curves are identical. + +Without additional information (e.g., surveys of forecasters), we cannot tell them apart from asset price data alone. + + +## More information: experts' forecasts (PSS) + +### The PSS framework + +{cite:t}`piazzesi2015trend` (henceforth PSS) exploit data on professional forecasters' expectations to decompose the likelihood ratio into risk prices and belief distortions. + +Their setup posits: + +- The representative agent's risk aversion leads him to price risks + $\varepsilon_{t+1}$ with prices $\lambda_t^* = \lambda^* x_t$, where $\lambda^*$ is a $k \times n$ matrix. +- The representative agent has **twisted beliefs** $(A^*, C) = (A - C W^*, C)$ + relative to the econometrician's model $(A, C)$, where $W^*$ is a $k \times n$ matrix of belief distortion coefficients, so that $w_t^* = W^* x_t$. +- Professional forecasters use the twisted beliefs $(A^*, C)$ to answer + survey questions about their forecasts. + +### Estimation strategy + +PSS proceed in four steps: + +1. Use data $\{x_t\}_{t=0}^T$ to estimate the econometrician's model $A$, $C$. +2. Project experts' one-step-ahead forecasts $E_t^*[x_{t+1}]$ on $x_t$ to obtain + $E_t^*[x_{t+1}] = A^* x_t$ and interpret $A^*$ as incorporating belief + distortions. +3. Back out the mean distortion matrix from the stochastic block: + + $$ + W^* = -\check{C}^{-1}(A^*_{2:n,\cdot} - A_{2:n,\cdot}) + $$ + + so that $w_t^* = W^* x_t$ is the state-dependent mean shift applied to the + density of $\varepsilon_{t+1}$. (This requires $\check{C}$ to be invertible.) +4. Reinterpret the $\lambda$ estimated by the rational-expectations econometrician + as $\lambda = \lambda^* + W^*$, where $\lambda_t^* = \lambda^* x_t$ is the + (smaller) price of risk vector actually charged by the representative agent with + distorted beliefs. + +An econometrician who mistakenly imposes rational expectations estimates risk prices $\lambda_t = \lambda x_t$ that sum two parts: +- *smaller risk prices* $\lambda_t^* = \lambda^* x_t$ actually charged by the + representative agent with mistaken beliefs, and +- *conditional mean distortions* $w_t^* = W^* x_t$ of the risks $\varepsilon_{t+1}$ that + the twisted-beliefs representative agent's model displays relative to the + econometrician's. + +We illustrate this using a simple numerical example with the same two-factor structure as above. + +PSS find that experts perceive the level and slope of the yield curve to be *more persistent* than the econometrician's estimates imply. + +Hence we set up the numbers to reflect that finding, with the experts' subjective transition matrix $A^*$ having larger eigenvalues than the econometrician's $A$ + + +```{code-cell} ipython3 +A_econ_core = np.array([[0.97, -0.03], + [0.00, 0.90]]) + +A_star_core = np.array([[0.985, -0.025], # experts' subjective transition + [0.000, 0.955]]) + +C_mat_core = np.array([[0.007, 0.000], + [0.000, 0.010]]) + +A_econ, C_mat, _, _, _ = augment_state_space( + A_econ_core, C_mat_core, np.zeros((1, 2)), np.zeros((1, 2)), np.zeros(2) +) +A_star, _, _, _, _ = augment_state_space( + A_star_core, C_mat_core, np.zeros((1, 2)), np.zeros((1, 2)), np.zeros(2) +) + +# Belief distortion recovered from the stochastic block +W_star = -np.linalg.solve(C_mat[1:, :], A_star[1:, :] - A_econ[1:, :]) + +Λ_total = np.hstack([ + np.zeros((2, 1)), + np.array([[-3.0, 0.0], + [ 0.0, -6.0]]) +]) +Λ_true = Λ_total - W_star # true risk prices + +print("Belief distortion W*:\n", W_star.round(3)) +print("\nTotal risk prices Λ:\n", Λ_total.round(3)) +print("\nTrue risk prices Λ*:\n", Λ_true.round(3)) +``` + +```{code-cell} ipython3 +x_grid = np.linspace(-0.02, 0.04, 200) +fig, axes = plt.subplots(1, 2, figsize=(12, 5)) + +for i, (ax, lab) in enumerate(zip(axes, + ["Level factor risk price", "Slope factor risk price"])): + x_vals = np.ones((200, 3)) + x_vals[:, 1:] = 0.005 + x_vals[:, i + 1] = x_grid + + λ_total = np.array([Λ_total @ x for x in x_vals])[:, i] + λ_true = np.array([Λ_true @ x for x in x_vals])[:, i] + + ax.plot(x_grid, λ_total, 'steelblue', lw=2, + label=r"$\lambda_t$ (RE econometrician)") + ax.plot(x_grid, λ_true, 'seagreen', lw=2, + label=r"$\lambda^*_t$ (true risk price)") + ax.fill_between(x_grid, λ_true, λ_total, alpha=0.15, color='firebrick', + label=r"$w^*_t$ (belief distortion)") + ax.axhline(0, color='black', lw=0.5) + ax.set_xlabel(f"Factor $\\check{{x}}_{{{i+1},t}}$") + ax.set_ylabel(lab) + ax.legend() + +plt.tight_layout() +plt.show() +``` + +Subjective risk prices $\lambda^* x_t$ vary less than the $\lambda x_t$ estimated by the rational-expectations econometrician. + +However, PSS offer no explanation for *why* beliefs are distorted. + +Are they mistakes, ignorance of good econometrics, or something else? + +## A theory of belief distortions: robust control + +The standard justification for rational expectations treats it as the outcome of learning from an infinite history: least-squares learning converges to rational expectations. + +That argument requires agents to know correct functional forms and relies on a stochastic approximation argument that partitions dynamics into a fast part (justifying a law of large numbers) and a slow part (justifying an ODE). + +However, long intertemporal dependencies make *rates of convergence slow*. + +Good econometricians have limited data and only hunches about functional forms, and they fear that their fitted models are incorrect. + +An agent who is like a good econometrician: + +- has a parametric model estimated from limited data, +- acknowledges that many other specifications fit nearly as well, including other parameter + values, other functional forms, omitted variables, neglected nonlinearities, + and history dependencies, +- fears that one of those other models actually prevails, and +- seeks "good enough" decisions under *all* such alternative models, i.e. **robustness**. + +Robust control theory formalises this idea by having the agent optimally distort probability assessments toward a worst-case scenario, producing belief distortions that look like the "mistakes" identified by PSS but that arise from a coherent response to model uncertainty rather than from ignorance. + +(See {doc}`Robustness ` for the decision-theoretic foundations of multiplier and constraint preferences.) + +### Hansen's dubious agent + +Inspired by robust control theory, consider a dubious investor who: + +- shares the econometrician's model $A$, $C$, $D$, $G$, +- expresses doubts by using a continuum of likelihood ratios to form a **discounted + entropy ball** of size $\eta$ around the econometrician's model, +- wants a valuation that is good for every model in the entropy ball, and +- constructs a *lower bound* on values and a *worst-case model* that attains it. + +Under the econometrician's linear Gaussian model with shocks $\varepsilon_{t+1} \sim \mathcal{N}(0,I)$: + +$$ +c_{t+1} - c_t = D x_t + G \varepsilon_{t+1}, \qquad x_{t+1} = A x_t + C \varepsilon_{t+1} +$$ + +the dubious agent's value function is + +$$ +V(x_0, c_0) := E \left[\sum_{t=0}^{\infty} \beta^t c_t \middle| x_0, c_0\right] = c_0 + \beta E \left[V(x_1, c_1) \middle| x_0, c_0\right] +$$ + +Note that the objective is *linear* in consumption. + +There is no concave utility function $u(c_t)$. + +All aversion to uncertainty here comes from the *worst-case model selection* (the $\min$ over likelihood ratios below), not from utility curvature. + +This separation is a key feature of the robust control approach: the agent expresses doubt through the entropy ball, rather than through a curved utility function. + +### The sequence problem + +The dubious agent solves a *min* problem in which a malevolent "nature" chooses the worst-case probability distortion subject to an entropy budget. + +Nature's instrument is a sequence of one-step likelihood ratios $m_{t+1}$, each of which distorts the conditional distribution of $\varepsilon_{t+1}$ given information at $t$. + +These increments cumulate into a date-$t$ likelihood ratio $M_t = \prod_{s=0}^{t-1} m_{s+1}$ (with $M_0 = 1$) that converts the econometrician's probability measure into the distorted one. + +The objective, evaluated under the econometrician's measure, weights each period's consumption by $M_t$: + +```{math} +:label: eq_hansen_seq + +J(x_0, c_0 \mid \eta) := \min_{\{m_{t+1}\}} E \left[\sum_{t=0}^{\infty} \beta^t M_t c_t \middle| x_0, c_0\right] +``` + +The minimisation is subject to three sets of constraints. + +First, the economy evolves according to the econometrician's model: + +$$ +c_{t+1} - c_t = D x_t + G \varepsilon_{t+1}, \qquad x_{t+1} = A x_t + C \varepsilon_{t+1} +$$ + +Second, nature's total probability distortion, measured by discounted entropy, must remain within a budget $\eta$: + +$$ +E \left[\sum_{t=0}^{\infty} \beta^t M_t E \left[m_{t+1}\log m_{t+1} \middle| x_t, c_t\right] \middle| x_0, c_0\right] \leq \eta +$$ + +Third, the incremental likelihood ratios must be valid probability distortions that cumulate multiplicatively: + +$$ +M_{t+1} = M_t m_{t+1}, \qquad E[m_{t+1} \mid x_t, c_t] = 1, \qquad M_0 = 1 +$$ + +The cumulative likelihood ratio $M_t = \prod_{s=0}^{t-1} m_{s+1}$ converts the original probability measure into the distorted one. + +The $M_t$ weighting ensures entropy is measured under the *distorted* measure and the $\beta^t$ discounting means future divergences are penalised less, admitting persistent alternatives. + +The likelihood ratio process $\{M_t\}_{t=0}^{\infty}$ is a multiplicative **martingale**. + +```{figure} /_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus.png +Discounted entropy ball around the econometrician's model. +``` + +### Why discounted entropy? + +Discounted entropy includes models that undiscounted entropy excludes. + +Undiscounted entropy over infinite sequences requires alternative models to share the same long-run averages as the baseline, thereby excluding models that differ only in persistent, low-frequency dynamics. + +But those persistent alternatives are precisely the models that are hardest to distinguish from the econometrician's model with finite data and that matter most for pricing long-lived assets. + +Discounted entropy, by treating future divergences less severely, admits these statistically elusive but economically important alternatives into the set of models that the dubious agent contemplates. + +### Entropy and the likelihood ratio + +When the likelihood ratio takes the log-normal form used throughout this lecture, entropy simplifies dramatically. + +Writing the one-step likelihood ratio in terms of a distortion vector $w_t$: + +$$ +m_{t+1} := \exp \left(-\frac{w_t^\top w_t}{2} - w_t^\top \varepsilon_{t+1}\right) +$$ + +and computing $E[m_{t+1}\log m_{t+1} \mid x_t]$, the cross term $w_t^\top \varepsilon_{t+1}$ averages to zero, leaving only: + +$$ +E \left[m_{t+1}\log m_{t+1} \middle| x_t, c_t\right] = \frac{1}{2} w_t^\top w_t +$$ + +This means that conditional entropy equals half the squared norm of the distortion vector -- it measures how far the distorted mean $-w_t$ is from the baseline mean of zero. + +Substituting this expression into {eq}`eq_hansen_seq` and performing a change of measure (replacing $E[\cdot]$ with $E^w[\cdot]$ under the distorted model) yields a reformulated problem in which nature directly chooses the mean shift $w_t$ rather than a likelihood ratio: + +```{math} +:label: eq_hansen_reform + +J(x_0, c_0 \mid \eta) := \min_{\{w_t\}} E^w \left[\sum_{t=0}^{\infty} \beta^t c_t \middle| x_0, c_0\right] +``` + +Under the distorted measure, $\tilde\varepsilon_{t+1} \sim \mathcal{N}(0, I)$, and we have substituted $\varepsilon_{t+1} = \tilde\varepsilon_{t+1} - w_t$ so that the mean shift $-w_t$ appears explicitly in the dynamics: + +$$ +c_{t+1} - c_t = D x_t + G (\tilde\varepsilon_{t+1} - w_t), \qquad x_{t+1} = A x_t + C (\tilde\varepsilon_{t+1} - w_t) +$$ + +The entropy constraint now takes the transparent form of bounding the cumulative squared distortion: + +$$ +\frac{1}{2} E^w \left[\sum_{t=0}^{\infty} \beta^t w_t^\top w_t \middle| x_0, c_0\right] \leq \eta +$$ + +The shift $-w_t$ *reduces* expected consumption growth by $G w_t$ and shifts the state dynamics by $-C w_t$, which is how the worst-case model makes the agent worse off. + +### Outcome: constant worst-case distortion + +Because the econometrician's model is linear Gaussian and the entropy constraint is a scalar bound $\eta$, the worst-case mean distortion turns out to be a *constant vector*: + +$$ +w_t = \bar{w} +$$ + +The consequence is that the contribution of $w_t$ to risk prices is *state-independent*. + +This does *not* help explain countercyclical prices of risk (or prices of model uncertainty), motivating the more refined "tilted" entropy ball in the next section. + +We compute $\bar{w}$ using the multiplier formulation developed in {ref}`the preceding section `, in which the parameter $\theta$ penalises entropy: larger $\theta$ means less concern about misspecification. + +In the multiplier formulation, the agent minimises + +$$ +E^w \left[\sum_{t=0}^\infty \beta^t \bigl(c_t + \tfrac{\theta}{2} w_t^\top w_t\bigr)\right] +$$ + +over $\{w_t\}$ subject to the shifted dynamics. + +Since $c_t = c_0 + \sum_{s=0}^{t-1}(D x_s + G \varepsilon_{s+1})$ and $\varepsilon_{s+1} = \tilde\varepsilon_{s+1} - w_s$, the first-order condition for $w_t$ balances the entropy penalty $\theta w_t$ against the marginal effect on discounted consumption: + +$$ +\theta \bar{w} = \frac{\beta}{1-\beta} G^\top + \beta C^\top v +$$ + +where $v$ solves $v = \frac{\beta}{1-\beta} D^\top + \beta A^\top v$, or equivalently $v = \beta (I - \beta A^\top)^{-1} D^\top / (1-\beta)$. + +The vector $v$ captures the discounted cumulative effect of a unit change in $x_t$ on future consumption. + +```{code-cell} ipython3 +def hansen_worst_case(A, C, D, G, β, θ): + """Constant worst-case distortion w_bar for Hansen's dubious agent.""" + n = A.shape[0] + v = β * np.linalg.solve(np.eye(n) - β * A.T, D.T.flatten()) / (1 - β) + w_bar = (1.0 / θ) * (β / (1 - β) * G.T.flatten() + β * C.T @ v) + return w_bar + + +β = 0.995 +θ_values = [0.5, 1.0, 2.0, 5.0] + +print(f"{'θ':>6} {'w_bar[0]':>10} {'w_bar[1]':>10} {'Entropy':>10}") +print("-" * 42) +for θ in θ_values: + w = hansen_worst_case(A, C, D, G, β, θ) + print(f"{θ:>6.1f} {w[0]:>10.4f} {w[1]:>10.4f} {0.5 * w @ w:>10.4f}") +``` + +The worst-case distortion $\bar{w}$ is constant: it does not depend on the state $x_t$. + +Larger $\theta$ (less concern about misspecification) yields a smaller distortion. + +````{exercise} +:label: lr_exercise_2 + +Derive the formula for $\bar{w}$. + +1. Write the discounted consumption path as $\sum_{t=0}^\infty \beta^t c_t = \frac{c_0}{1-\beta} + \sum_{t=0}^\infty \beta^t \sum_{s=0}^{t-1}(D x_s - G w_s + G \tilde\varepsilon_{s+1})$. +2. Use the state recursion $x_{t+1} = A x_t - C w_t + C \tilde\varepsilon_{t+1}$ and take first-order conditions with respect to the constant $w_t = \bar{w}$. +3. Verify that the first-order condition gives $\theta \bar{w} = \frac{\beta}{1-\beta} G^\top + \beta C^\top v$ with $v = \beta(I - \beta A^\top)^{-1} D^\top / (1-\beta)$. +4. Check numerically that larger $\theta$ brings $\bar{w}$ closer to zero. + +```` + +````{solution} lr_exercise_2 +:class: dropdown + +For part 1, the consumption increment $\Delta c_{s+1} = Dx_s - G\bar{w} + G\tilde\varepsilon_{s+1}$ at date $s$ enters $c_t$ for every $t \geq s+1$, with total discounted weight $\frac{\beta^{s+1}}{1-\beta}$. + +Swapping the order of summation: + +$$ +E \left[\sum_{t=0}^\infty \beta^t c_t\right] = \frac{c_0}{1-\beta} + \frac{1}{1-\beta}\sum_{s=0}^\infty \beta^{s+1}\bigl(D E[x_s] - G\bar{w}\bigr) +$$ + +For part 2, define $S = \sum_{s=0}^\infty \beta^{s+1} E[x_s]$. + +From $E[x_{s+1}] = A E[x_s] - C\bar{w}$, multiply both sides by $\beta^{s+2}$ and sum over $s = 0, 1, \ldots$: + +$$ +S - \beta x_0 = \beta A S - \frac{\beta^2}{1-\beta}C\bar{w} +$$ + +Solving: $S = (I - \beta A)^{-1} \left(\beta x_0 - \frac{\beta^2}{1-\beta}C\bar{w}\right)$. + +Substituting back, the expected objective $E[\sum \beta^t(c_t + \frac{\theta}{2}\|\bar{w}\|^2)]$ depends on $\bar{w}$ only through the term $\frac{1}{1-\beta}(D S - \frac{\beta}{1-\beta}G\bar{w}) + \frac{\theta}{2(1-\beta)}\|\bar{w}\|^2$. + +For part 3, differentiate with respect to $\bar{w}$ and set to zero. + +The only part of $S$ that depends on $\bar{w}$ is $-\frac{\beta^2}{1-\beta}(I-\beta A)^{-1}C\bar{w}$, so $\nabla_{\bar{w}}(D S) = -\frac{\beta^2}{1-\beta}C^\top(I - \beta A^\top)^{-1}D^\top$. + +The first-order condition is: + +$$ +\frac{\theta}{1-\beta}\bar{w} = \frac{1}{1-\beta} \left(\frac{\beta}{1-\beta}G^\top + \frac{\beta^2}{1-\beta}C^\top(I - \beta A^\top)^{-1}D^\top\right) +$$ + +Simplifying: $\theta\bar{w} = \frac{\beta}{1-\beta}G^\top + \beta C^\top v$, where $v = \frac{\beta}{1-\beta}(I - \beta A^\top)^{-1}D^\top$. + +Therefore $\bar{w} = \frac{1}{\theta}\bigl(\frac{\beta}{1-\beta}G^\top + \beta C^\top v\bigr)$. + +For part 4, as $\theta \to \infty$, $\bar{w} = \frac{1}{\theta}(\cdots) \to 0$, which the numerical table confirms. + +```` + +(mult_pref_section)= +## Multiplier preferences + +The constraint formulation {eq}`eq_hansen_seq` bounds discounted entropy by $\eta$, but an equivalent **multiplier** formulation replaces the constraint with a penalty term weighted by a Lagrange multiplier $\theta$. + +The **multiplier preference** version of the dubious agent's problem is: + +```{math} +:label: eq_mult_seq + +\hat{J}(x_0, c_0 \mid \theta) := \min_{\{m_{t+1}\}} E \left[\sum_{t=0}^{\infty} \beta^t M_t\bigl(c_t + \theta m_{t+1}\log m_{t+1}\bigr) \middle| x_0, c_0\right] +``` + +with $M_{t+1} = M_t m_{t+1}$, $E[m_{t+1} \mid x_t, c_t] = 1$, $M_0 = 1$. + +To derive a Bellman equation, write the value as today's consumption plus the worst-case continuation: + +$$ +\hat{J}(x_t, c_t \mid \theta) = c_t + \min_{m_{t+1}} E \left[m_{t+1}\bigl[\beta \hat{J}(x_{t+1}, c_{t+1}) + \theta\log m_{t+1}\bigr] \middle| x_t, c_t\right] +$$ + +Solving the inner minimisation analytically (by completing the square in the exponential family) yields a closed-form expression: + +$$ += c_t - \theta\log E \left[\exp \left(-\frac{\beta \hat{J}(x_{t+1}, c_{t+1})}{\theta}\right) \middle| x_t, c_t\right] +$$ + +The second line defines the **risk-sensitivity operator** $T_t$: + +$$ +=: c_t + T_t \left[\beta \hat{J}(x_{t+1}, c_{t+1})\right] +$$ + +The minimising likelihood ratio that attains this value is: + +$$ +m_{t+1}^* \propto \exp \left(-\frac{\beta \hat{J}(x_{t+1}, c_{t+1})}{\theta}\right) +$$ + +By Lagrange multiplier theory, for the **corresponding dual pair** $(\tilde\theta, \eta)$, + +$$ +\hat{J}(x_t, c_t \mid \tilde\theta) = J(x_t, c_t \mid \eta) + \tilde\theta \eta +$$ + +Each choice of $\tilde\theta$ in the multiplier problem corresponds to a particular entropy bound $\eta(\tilde\theta)$ in the constraint problem, so the two formulations are equivalent. + +The operator $T_t$ defined above is a **risk-sensitivity operator** that maps the continuation value through an exponential tilt, downweighting good outcomes and upweighting bad ones. + +```{code-cell} ipython3 +def T_operator(V, θ, probs=None): + """Risk-sensitivity operator: T[V] = -θ log E[exp(-V/θ)].""" + if probs is None: + probs = np.ones(len(V)) / len(V) + V_s = -V / θ + max_v = np.max(V_s) + return -θ * (max_v + np.log(np.sum(probs * np.exp(V_s - max_v)))) + +rng = np.random.default_rng(0) +V_samples = rng.normal(loc=5.0, scale=1.0, size=10_000) +E_V = np.mean(V_samples) + +θ_grid = np.logspace(-1, 3, 50) +T_vals = [T_operator(V_samples, θ) for θ in θ_grid] + +fig, ax = plt.subplots(figsize=(8, 5)) +ax.semilogx(θ_grid, T_vals, 'firebrick', lw=2, label=r"$T_\theta[V]$") +ax.axhline(E_V, color='steelblue', lw=1.5, + ls='--', label=r"$E[V]$ (risk neutral)") +ax.set_xlabel(r"Robustness parameter $\theta$") +ax.set_ylabel("Value") +ax.legend() +ax.annotate(r"$\theta \to \infty$: risk neutral", + xy=(500, E_V), fontsize=11, color='steelblue', + xytext=(50, E_V - 0.8), + arrowprops=dict(arrowstyle='->', color='steelblue')) +plt.tight_layout() +plt.show() +``` + +As $\theta \to \infty$, the risk-sensitivity operator converges to the ordinary expectation $E[V]$, and the agent becomes risk neutral. + +As $\theta$ shrinks, the operator places more weight on bad outcomes, reflecting greater concern about model misspecification. + +## Tilting the entropy ball + +### Hansen and Szőke's more refined dubious agent + +To generate *state-dependent* uncertainty prices, Hansen and Szőke introduce a more refined dubious agent who: + +- shares the econometrician's model $A$, $C$, $D$, $G$, +- expresses doubts by using a continuum of likelihood ratios to form a + discounted entropy ball around the econometrician's model, *and* +- also insists that some martingales representing particular alternative + *parametric* models be included in the discounted entropy ball. + +The inclusion of those alternative parametric models *tilts* the entropy ball, which affects the worst-case model in a way that can produce countercyclical uncertainty prices. + +"Tilting" means replacing the constant entropy bound $\eta$ with a state-dependent bound $\xi(x_t)$ that is larger in states where the feared parametric alternative deviates more from the baseline. + +### The feared parametric model + +The investor wants the entropy ball to be large enough to include specific alternative models whose conditional entropy at each date is: + +$$ +E_t \left[\bar{m}_{t+1}\log\bar{m}_{t+1}\right] = \frac{1}{2} \bar{w}_t^\top \bar{w}_t =: \frac{1}{2}\xi(x_t) +$$ + +The function $\xi(x_t)$ measures how far the feared model's conditional distribution deviates from the baseline at state $x_t$, and the total discounted divergence of the feared model is: + +$$ +\frac{1}{2} E^{\bar{W}} \left[\sum_{t=0}^{\infty} \beta^t \xi(x_t) \middle| x_0, c_0\right] +$$ + +To ensure this feared model lies inside the entropy ball, we replace the earlier constant bound $\eta$ with a state-dependent budget: + +```{math} +:label: eq_tilted_constraint + +\frac{1}{2} E^w \left[\sum_{t=0}^{\infty} \beta^t w_t^\top w_t \middle| x_0, c_0\right] \leq \frac{1}{2} E^w \left[\sum_{t=0}^{\infty} \beta^t \xi(x_t) \middle| x_0, c_0\right] +``` + +The time-$t$ contributions to the right-hand side of {eq}`eq_tilted_constraint` relax the discounted entropy constraint in states $x_t$ in which $\xi(x_t)$ is larger. + +This sets the stage for *state-dependent* mean distortions in the worst-case model. + +Inspired by {cite:t}`Bansal_Yaron_2004`, a concrete form of this concern is that the true state dynamics are more persistent than the econometrician's model implies, expressed by + +$$ +x_{t+1} = \bar{A} x_t + C \tilde\varepsilon_{t+1} +$$ + +Since $\bar{A} = A - C\bar{W}$, this feared model is equivalent to shifting the mean of $\varepsilon_{t+1}$ by $-\bar{W}x_t$, giving $\bar{w}_t = \bar{W} x_t$ with + +$$ +\bar{W} = -\check{C}^{-1}(\bar{A}_{2:n,\cdot} - A_{2:n,\cdot}) +$$ + +(again using the assumption that $\check{C}$ is square and invertible), which implies a *quadratic* $\xi$ function: + +```{math} +:label: eq_xi + +\xi(x_t) := x_t^\top \bar{W}^\top\bar{W} x_t =: x_t^\top \Xi x_t +``` + +```{figure} /_static/lecture_specific/risk_aversion_or_mistaken_beliefs/eggs_backus2.png +Tilted discounted entropy balls. Including particular parametric alternatives with more long-run risk tilts the entropy ball and generates state-dependent worst-case distortions. +``` + +### The Szőke agent's sequence problem + +As in the multiplier preferences section, we convert the constraint problem into an unconstrained one by attaching a Lagrange multiplier $\tilde\theta \geq 0$ to the tilted entropy constraint {eq}`eq_tilted_constraint`. + +The penalty $\tilde\theta(w_t^\top w_t - x_t^\top \Xi x_t)/2$ now has two terms: the first penalises the agent's distortion, while the second rewards distortions in states where the feared model deviates more, creating a state-dependent entropy budget: + +```{math} +:label: eq_szoke_seq + +J(x_0, c_0 \mid \Xi) := \max_{\tilde\theta \geq 0} \min_{\{w_t\}} E^w \left[\sum_{t=0}^{\infty} \beta^t c_t + \tilde\theta \frac{1}{2}\sum_{t=0}^{\infty} \beta^t\bigl(w_t^\top w_t - x_t^\top \Xi x_t\bigr) \middle| x_0, c_0\right] +``` + +The dynamics under the distorted measure remain linear Gaussian: + +$$ +c_{t+1} - c_t = D x_t + G (\tilde\varepsilon_{t+1} - w_t), \qquad x_{t+1} = A x_t + C (\tilde\varepsilon_{t+1} - w_t) +$$ + +Because the state vector now includes a leading constant $1$, we can write the worst-case distortion as a *linear* function of the augmented state: + +$$ +\tilde{w}_t = \tilde{W} x_t +$$ + +The first column of $\tilde{W}$ stores the constant part that would otherwise be written separately as $a$, while the remaining columns load on the stochastic factors $\check{x}_t$. + +When $\Xi = 0$ (no tilting), that first column reduces to Hansen's constant $\bar{w}$ from the untilted problem, while the remaining columns are zero. + +When $\Xi \neq 0$, the nonconstant columns of $\tilde{W}$ are the new contribution of the tilted entropy ball, and are what generate countercyclical uncertainty prices. + +Writing $\tilde{A} = A - C \tilde{W}$ and $\tilde{D} = D - G \tilde{W}$, the worst-case dynamics are + +$$ +x_{t+1} = \tilde{A} x_t + C\tilde\varepsilon_{t+1}, \qquad c_{t+1} - c_t = \tilde{D} x_t + G\tilde\varepsilon_{t+1} +$$ + +### Implementation: tilted entropy ball + +For the inner minimisation over $\{w_t\}$ in {eq}`eq_szoke_seq`, the value function is **affine-quadratic** in the augmented state because $c_t$ enters linearly: + +$$ +J(x, c) = \frac{c}{1-\beta} + v^\top x + x^\top P x + K +$$ + +Writing $e_1 = (1, 0, \ldots, 0)^\top$ for the selector of the constant state and $\tilde\theta$ as $\theta$ in the code, the first-order condition for $\tilde{W}$ balances the entropy penalty against the marginal effects on the value function: + +$$ +(\theta I + 2\beta C^\top P C) \tilde{W} += 2\beta C^\top P A ++ \left(\frac{\beta}{1-\beta} G^\top + \beta C^\top v\right) e_1^\top +$$ + +The linear coefficient $v$ captures the discounted cumulative effect of a unit change in $x_t$ on future consumption, and satisfies: + +$$ +\bigl(I - \beta (A - C\tilde{W})^\top\bigr) v += \frac{\beta}{1-\beta} \left(D^\top - \tilde{W}^\top G^\top\right) +$$ + +Finally, matching quadratic terms in $x$ yields the matrix Riccati equation for $P$, which encodes the curvature of the value function: + +$$ +P = -\tfrac{\theta}{2} \Xi + \tfrac{\theta}{2} \tilde{W}^\top \tilde{W} + \beta (A - C\tilde{W})^\top P (A - C\tilde{W}) +$$ + +The code below iterates on the coupled $(P, v, \tilde{W})$ system to convergence. + +For later comparisons, we also keep a version of $\tilde{W}$ with its first column set to zero, denoted informally by $\tilde{W}^{sd}$, which isolates the factor-dependent component. + +```{code-cell} ipython3 +class TiltedEntropyModel: + """ + Hansen-Szőke tilted entropy ball model. + + Given (A, C, D, G, β, θ, Ξ), computes the full worst-case + distortion matrix W_tilde in w_t = W_tilde @ x_t. + + The first column of W_tilde is the constant component induced by + the augmented state x_t = [1, check_x_t']'. W_tilde_state zeros + that column out and keeps only factor-dependent loadings. + """ + + def __init__(self, A, C, D, G, β, θ, Ξ): + self.A = np.atleast_2d(A).astype(float) + self.C = np.atleast_2d(C).astype(float) + self.D = np.atleast_2d(D).astype(float) + self.G = np.atleast_2d(G).astype(float) + self.β, self.θ = float(β), float(θ) + self.Ξ = np.atleast_2d(Ξ).astype(float) + self.n = self.A.shape[0] + + self.W_tilde, self.v = self._solve_worst_case() + self.A_tilde = self.A - self.C @ self.W_tilde + self.D_tilde = self.D - self.G @ self.W_tilde + self.W_tilde_state = self.W_tilde.copy() + self.W_tilde_state[:, 0] = 0.0 + self.A_tilde_state = self.A - self.C @ self.W_tilde_state + self.D_tilde_state = self.D - self.G @ self.W_tilde_state + + def _solve_worst_case(self): + """Iterate on the coupled (P, v, W) system.""" + n, k = self.n, self.C.shape[1] + β, θ = self.β, self.θ + + P = np.zeros((n, n)) + v = np.zeros(n) + e1 = np.zeros(n) + e1[0] = 1.0 + converged = False + for _ in range(10000): + M = θ * np.eye(k) + 2 * β * self.C.T @ P @ self.C + b = β / (1 - β) * self.G.T.flatten() + β * self.C.T @ v + rhs = 2 * β * self.C.T @ P @ self.A + np.outer(b, e1) + W = np.linalg.solve(M, rhs) + A_w = self.A - self.C @ W + P_new = (-(θ / 2) * self.Ξ + + (θ / 2) * W.T @ W + + β * A_w.T @ P @ A_w) + P_new = 0.5 * (P_new + P_new.T) + D_flat = self.D.T.flatten() + G_flat = self.G.T.flatten() + rhs_v = β * (D_flat - W.T @ G_flat) / (1 - β) + v_new = np.linalg.solve(np.eye(n) - β * A_w.T, rhs_v) + if (np.max(np.abs(P_new - P)) < 1e-10 + and np.max(np.abs(v_new - v)) < 1e-10): + P = P_new + v = v_new + converged = True + break + P = P_new + v = v_new + + if not converged: + print("Warning: (P, v, W) iteration did not converge") + self._P_quad = P + self._v_lin = v + return W, v + + def distortion(self, x): + """Full worst-case distortion W_tilde @ x.""" + return self.W_tilde @ x + + def state_dependent_distortion(self, x): + """Factor-dependent component with the constant column removed.""" + return self.W_tilde_state @ x + + def entropy(self, x): + """Full conditional entropy: (1/2)(W_tilde x)'(W_tilde x).""" + w = self.distortion(x) + return 0.5 * w @ w + + def state_dependent_entropy(self, x): + """Entropy of the factor-dependent component only.""" + w = self.state_dependent_distortion(x) + return 0.5 * w @ w + + def xi_function(self, x): + return x @ self.Ξ @ x +``` + +```{code-cell} ipython3 +# Feared parametric model +A_bar_core = np.array([[0.995, -0.03], + [0.000, 0.96]]) + +A_bar, _, _, _, _ = augment_state_space( + A_bar_core, C_core, np.zeros((1, 2)), np.zeros((1, 2)), np.zeros(2) +) + +W_bar = -np.linalg.solve(C[1:, :], A_bar[1:, :] - A[1:, :]) +Ξ = W_bar.T @ W_bar + +print("Feared transition A_bar:\n", A_bar) +print("\nImplied distortion W_bar:\n", W_bar.round(3)) +print("\nTilting matrix Ξ:\n", Ξ.round(1)) +``` + +In {eq}`eq_szoke_seq` the multiplier $\tilde\theta$ is determined by the outer maximisation. + +Here we fix $\theta$ at a representative value and solve only the inner minimisation, illustrating the multiplier formulation described in {ref}`the multiplier preferences section `. + +```{code-cell} ipython3 +θ_tilt = 3.0 +tilted = TiltedEntropyModel(A, C, D, G, β, θ_tilt, Ξ) + +print("Worst-case distortion W_tilde:\n", tilted.W_tilde.round(4)) +print("\nConstant column of W_tilde:\n", tilted.W_tilde[:, 0].round(4)) +W_fd = tilted.W_tilde[:, 1:].round(4) +print("\nFactor-dependent columns of W_tilde:\n", W_fd) +print("\nWorst-case transition A_tilde:\n", + tilted.A_tilde.round(4)) +eig_A = eigvals(A[1:, 1:]).round(4) +eig_At = eigvals(tilted.A_tilde[1:, 1:]).round(4) +print(f"\nEigenvalues of check(A): {eig_A}") +print(f"Eigenvalues of check(A_tilde): {eig_At}") +``` + +````{exercise} +:label: lr_exercise_3 + +Derive the first-order condition for the tilted entropy problem under the augmented-state convention. + +1. Start from {eq}`eq_szoke_seq` and write $w_t = W x_t$, where the first column of $W$ captures the constant part of the distortion. +2. Show that the first-order condition gives + + $$ + (\theta I + 2\beta C^\top P C) W + = 2\beta C^\top P A + + \left(\frac{\beta}{1-\beta} G^\top + \beta C^\top v\right)e_1^\top + $$ + + and derive the associated linear equation for $v$. +3. Derive the $P$ update by substituting the optimal $W$ back into the Bellman equation and matching quadratic terms in $x$. + +```` + +````{solution} lr_exercise_3 +:class: dropdown + +For part 1, write $w_t = W x_t$ with $x_t = (1, \check{x}_t^\top)^\top$. + +To confirm the affine-quadratic form, substitute this guess into the Bellman equation for the inner minimisation of {eq}`eq_szoke_seq`: + +$$ +J(x, c) = \min_{w} \left\{ c + \frac{\theta}{2}(w^\top w - x^\top \Xi x) + \beta\, E\bigl[J(x', c')\bigr] \right\} +$$ + +where $x' = Ax + C(\tilde\varepsilon - w)$ and $c' - c = Dx + G(\tilde\varepsilon - w)$. + +Conjecture $J(x,c) = c/(1-\beta) + v^\top x + x^\top P x + K$. + +Under this guess, $J(x',c') = c'/(1-\beta) + v^\top x' + x'^\top P x' + K$. + +Write $c'/(1-\beta) = c/(1-\beta) + (c'-c)/(1-\beta)$, so the $c/(1-\beta)$ term passes through both sides and can be cancelled. + +Substituting $w = Wx$, the next-period state is $x' = (A - CW)x + C\tilde\varepsilon$, and the consumption increment is $(c'-c) = (D - GW)x + G\tilde\varepsilon$. + +Taking $E[\cdot]$ (with $E[\tilde\varepsilon] = 0$, $E[\tilde\varepsilon\tilde\varepsilon^\top] = I$), the right-hand side after cancelling $c/(1-\beta)$ has the following structure. + +*Quadratic terms in $x$:* + +$$ +-\tfrac{\theta}{2} x^\top \Xi x + \tfrac{\theta}{2} x^\top W^\top W x + \beta x^\top (A-CW)^\top P (A-CW) x +$$ + +These come from the penalty $\frac{\theta}{2}(w^\top w - x^\top \Xi x)$ and from $x'^\top P x'$. + +*Linear terms in $x$:* + +$$ +\tfrac{\beta}{1-\beta}(D - GW)x + \beta v^\top (A-CW)x +$$ + +These come from $(c'-c)/(1-\beta)$ and from $v^\top x'$. + +The remaining terms are constant (independent of $x$). + +These come from the noise terms and from $K$. + +Every term is at most quadratic in $x$, so matching coefficients reproduces the conjectured form $v^\top x + x^\top P x + K$ with updated $v$, $P$, $K$. + +This confirms self-consistency of the affine-quadratic guess. + +To derive the first-order condition, collect all terms in the Bellman RHS that depend on $w = Wx$. + +From the penalty $\frac{\theta}{2} w^\top w$, the contribution is $\frac{\theta}{2}x^\top W^\top W x$. + +From $\beta E[v^\top x']$ with $E[x'] = (A-CW)x$, the $w$-dependent part is $-\beta v^\top C W x$. + +From $\beta E[(c'-c)/(1-\beta)]$ with $E[c'-c] = (D-GW)x$, the $w$-dependent part is $-\frac{\beta}{1-\beta} G W x$. + +From $\beta E[x'^\top P x']$ with $E[x'] = (A-CW)x$, the $w$-dependent part is $\beta x^\top (A-CW)^\top P (A-CW) x$. + +Differentiating with respect to $W$ and using $e_1^\top x_t = 1$ gives: + +$$ +(\theta I + 2\beta C^\top P C) W += 2\beta C^\top P A ++ \left(\frac{\beta}{1-\beta} G^\top + \beta C^\top v\right)e_1^\top +$$ + +This shows directly how the first column of $W$ carries the constant part of the distortion. + +$$ +\bigl(I - \beta (A - CW)^\top\bigr) v += \frac{\beta}{1-\beta} \left(D^\top - W^\top G^\top\right) +$$ + +This is the linear equation used in the code to update $v$ once $W$ is known. + +For part 3, substitute the optimal $W$ back into the Bellman equation. + +The quadratic terms in $x_t$ give: + +$$ +P = -\tfrac{\theta}{2} \Xi + \tfrac{\theta}{2} W^\top W + \beta (A-CW)^\top P(A-CW) +$$ + +This is the matrix Riccati equation that the code iterates to convergence. + +```` + +### State-dependent entropy: the key innovation + +The following figure compares the conditional entropy of the worst-case distortion across three models as a function of the level factor $\check{x}_{1,t}$, revealing how the tilted entropy ball produces the state dependence that Hansen's original formulation lacks. + +```{code-cell} ipython3 +x_grid = np.linspace(-0.03, 0.04, 200) + +entropy_tilted = np.array([ + tilted.state_dependent_entropy( + augment_state(np.array([x, 0.005]))) + for x in x_grid]) +ξ_vals = np.array([ + tilted.xi_function( + augment_state(np.array([x, 0.005]))) + for x in x_grid]) + +# Calibrate Hansen's θ so constant entropy matches E[ξ(x_t)/2] +Σ_check = solve_discrete_lyapunov(A[1:, 1:], C[1:, :] @ C[1:, :].T) +E_xx = np.block([ + [np.ones((1, 1)), np.zeros((1, 2))], + [np.zeros((2, 1)), Σ_check] +]) +avg_ξ_half = 0.5 * np.trace(Ξ @ E_xx) +w_unit = hansen_worst_case(A, C, D, G, β, 1.0) +θ_hansen = norm(w_unit) / np.sqrt(2 * avg_ξ_half) +w_hansen = w_unit / θ_hansen +hansen_ent = 0.5 * w_hansen @ w_hansen + +fig, ax = plt.subplots(figsize=(9, 5)) +hansen_lab = (rf"Hansen: constant " + rf"$\frac{{1}}{{2}}\bar{{w}}^\top" + rf"\bar{{w}} = {hansen_ent:.4f}$") +ax.axhline(hansen_ent, color='steelblue', + lw=2, ls='--', label=hansen_lab) +szoke_lab = (r"Sz\H{o}ke (factor-dep.): " + r"$\frac{1}{2}(\tilde{W}^{sd}x_t)" + r"^\top(\tilde{W}^{sd}x_t)$") +ax.plot(x_grid, entropy_tilted, 'firebrick', + lw=2, label=szoke_lab) +ax.plot(x_grid, 0.5 * ξ_vals, 'seagreen', lw=2, ls=':', + label=r"Feared model: $\frac{1}{2}\xi(x_t)$") +ax.set_xlabel(r"Level factor $\check{x}_{1,t}$") +ax.set_ylabel("Conditional entropy") +ax.legend() +plt.tight_layout() +plt.show() +``` + +The key innovation of the tilted entropy ball is visible: the factor-dependent component $\tilde{W}^{sd} x_t$ of the worst-case distortion grows with $|\check{x}_t|$, producing *countercyclical uncertainty prices*. + +By contrast, Hansen's constant distortion $\bar{w}$ has entropy $\frac{1}{2}\bar{w}^\top\bar{w}$ that does not vary with the state (shown as a horizontal line). + +The Szőke parabola lies inside the feared model's entropy budget $\frac{1}{2}\xi(x_t)$ along this slice, consistent with the tilted entropy constraint {eq}`eq_tilted_constraint`. + +### Three probability twisters + +To summarize, three distinct probability twisters play roles in this analysis: + +| Symbol | Source | Describes | +|:---------------|:------------------------------|:----------------------------------| +| $w_t^*$ | Piazzesi, Salomão, Schneider | Mistaken agent's beliefs | +| $\bar{w}_t$ | Szőke's feared parametric model | Especial LRR parametric worry | +| $\tilde{W} x_t$ | Szőke's worst-case model | State-dependent component of worst-case distortion | + +```{code-cell} ipython3 +x_state = augment_state(np.array([0.02, 0.008])) +w_pss = W_star @ x_state +w_feared = W_bar @ x_state +w_szoke = tilted.state_dependent_distortion(x_state) + +ε_grid = np.linspace(-4, 4, 500) +ϕ_base = normal_dist.pdf(ε_grid, 0, 1) + +fig, ax = plt.subplots(figsize=(9, 5)) +ax.plot(ε_grid, ϕ_base, 'black', lw=2, + label='Econometrician: $\\mathcal{N}(0,1)$') + +for w_val, label, color, ls in [ + (w_pss[0], r"PSS mistaken $w^*_t$", 'steelblue', '-'), + (w_feared[0], r"Feared LRR $\bar{w}_t$", 'seagreen', '--'), + (w_szoke[0], + r"Sz\H{o}ke worst-case $\tilde{W}^{sd}x_t$" + r" (factor-dep.)", 'firebrick', '-'), +]: + ax.plot(ε_grid, normal_dist.pdf(ε_grid, -w_val, 1), + color=color, lw=2, ls=ls, label=label) + +ax.set_xlabel(r"$\varepsilon_1$") +ax.set_ylabel("Density") +ax.legend() +plt.tight_layout() +plt.show() +``` + +Each twister shifts the econometrician's $\mathcal{N}(0,1)$ density by the displayed component, where the direction and magnitude depend on the current state $x_t$. + +For the state shown here, all three displayed components are negative in their first element, so the twisted densities are shifted slightly to the right. + +The shifts are small relative to the unit variance because the stochastic state $\check{x}_t = (0.02, 0.008)$ is close to the unconditional mean. + +## Empirical challenges and model performances + +Before comparing models, it helps to see the empirical regularities that any successful theory must explain. + +```{code-cell} ipython3 +--- +tags: [hide-input] +mystnb: + figure: + caption: U.S. Treasury yields and yield spread + name: fig-us-yields +--- +data = pd.read_csv( + 'https://raw.githubusercontent.com/QuantEcon/lecture-python.myst/refs/heads/' + 'main/lectures/_static/lecture_specific/risk_aversion_or_mistaken_beliefs/fred_data.csv', + parse_dates=['DATE'], index_col='DATE' +) + +fig, axes = plt.subplots(2, 1, figsize=(12, 8), sharex=True, + gridspec_kw={'height_ratios': [2, 1]}) + +# Recession shading helper +def shade_recessions(ax, rec): + ax.fill_between(rec.index, 0, 1, + where=rec.values.flatten() == 1, + transform=ax.get_xaxis_transform(), + color='grey', alpha=0.2) + +rec = data['USREC'].dropna() + +ax = axes[0] +shade_recessions(ax, rec) + +ax.plot(data['GS1'], 'steelblue', lw=2, + label=r'$y_{\mathrm{nom}}^{(1)}$') +ax.plot(data['GS5'], 'seagreen', lw=2, + label=r'$y_{\mathrm{nom}}^{(5)}$') +ax.plot(data['GS10'], 'firebrick', lw=2, + label=r'$y_{\mathrm{nom}}^{(10)}$') +ax.plot(data['DFII5'], 'seagreen', lw=2, ls='--', + label=r'$y_{\mathrm{real}}^{(5)}$') +ax.plot(data['DFII10'], 'firebrick', lw=2, ls='--', + label=r'$y_{\mathrm{real}}^{(10)}$') + +ax.axhline(0, color='black', lw=0.5) +ax.set_ylabel('Yield (%)') +ax.legend(loc='upper right') + +ax2 = axes[1] +shade_recessions(ax2, rec) + +spread_10_1 = data['GS10'] - data['GS1'] +ax2.plot(spread_10_1, 'steelblue', lw=2, + label=r'$y^{(10)} - y^{(1)}$') +ax2.axhline(0, color='black', lw=0.5) +ax2.set_ylabel('Spread (%)') +ax2.set_xlabel('Year') +ax2.legend(loc='upper left') + +plt.tight_layout() +plt.show() +``` + +Several recognised patterns characterise the U.S. term structure: + +- The nominal yield curve usually slopes *upward*. +- The long-minus-short yield spread *narrows before* U.S. recessions and + *widens after* them. +- Consequently, the slope of the yield curve helps *predict* recessions and economic activity. +- Long and short yields are *almost equally volatile* (the Shiller "volatility puzzle"). +- To solve the Shiller puzzle, risk prices (or something observationally equivalent) + must *depend on volatile state variables*. + - {doc}`The Hansen-Jagannathan Bound ` provides a nonparametric way to quantify the required volatility of the stochastic discount factor. + +The following table summarises how various models perform: + +| Model | Average slope | Slopes near recessions | Volatile long yield | +|:-------------------------------|:--------------|:-----------------------|:--------------------| +| {cite:t}`Lucas1978` | no | no | no | +| Epstein-Zin with LRR | maybe | yes | no | +| {cite:t}`piazzesi2015trend` | built-in | built-in | yes | +| {cite:t}`szoke2022estimating` | *YES* | yes | yes | + +### Why Szőke's model succeeds + +Szőke's framework delivers: + +1. A theory of *state-dependent belief distortions* with factor-dependent component $\tilde{W}^{sd} x_t$. +2. A theory about the *question that professional forecasters answer*: they + respond with their worst-case model because they hear "tell me forecasts that + rationalise your (max-min) decisions." +3. A way to *measure* the size of belief distortions relative to the + econometrician's model. + +```{code-cell} ipython3 +model_rn = LikelihoodRatioModel( + A, C, D, G, r_bar, Λ=np.zeros_like(Λ)) +model_uncert = LikelihoodRatioModel( + A, C, D, G, r_bar, Λ=tilted.W_tilde_state) + +x_test = augment_state(np.array([0.01, -0.03])) +n_max = 120 +mats = np.arange(1, n_max + 1) + +fig, ax = plt.subplots(figsize=(9, 5)) +ax.plot(mats, model_rn.yields(x_test, n_max) * 1200, + 'grey', lw=2, ls=':', label='Risk neutral') +ax.plot(mats, model.yields(x_test, n_max) * 1200, + 'steelblue', lw=2, label=r'Risk aversion ($\lambda x_t$)') +ax.plot(mats, model_uncert.yields(x_test, n_max) * 1200, + 'firebrick', lw=2, ls='--', + label=r'Model uncertainty ($\tilde{W}^{sd} x_t$)') +ax.set_xlabel("Maturity (months)") +ax.set_ylabel("Yield (annualised %)") +ax.legend() +plt.tight_layout() +plt.show() +``` + +The risk-aversion-only and model-uncertainty-only yield curves both slope upward, generating a term premium. + +(Note that the model-uncertainty curve uses only the factor-dependent component $\tilde{W}^{sd} x_t$, not the full worst-case distortion.) + +The two explanations represent *alternative channels* for the same observed term premium, reinforcing the identification challenge explored throughout this lecture. + +## Cross-equation restrictions and estimation + +A key appeal of the robust control approach is that it lets us deviate from rational expectations while still preserving a set of powerful **cross-equation restrictions** on decision makers' beliefs. + +As {cite:t}`szoke2022estimating` puts it: + +> An appealing feature of robust control theory is that it lets us deviate from +> rational expectations, but still preserves a set of powerful cross-equation +> restrictions on decision makers' beliefs. ... Consequently, estimation can proceed +> essentially as with rational expectations econometrics. The main difference is +> that now restrictions through which we interpret the data emanate from the +> decision maker's best response to a worst-case model instead of to the +> econometrician's model. + +### Szőke's empirical strategy + +In the Szőke framework, the rational-expectations econometrician's risk price vector $\lambda_t$ is decomposed as $\lambda_t = \tilde{w}_t + \tilde{\lambda}_t$, paralleling the PSS decomposition $\lambda = \lambda^* + W^*$. + +The combined likelihood ratio retains the same log-normal form as before, but now the total distortion vector $\lambda_t$ has an explicit decomposition into belief distortion and risk price components: + +$$ +m_{t+1}^\lambda = \exp \left(-\lambda_t^\top\varepsilon_{t+1} - \frac{1}{2}\lambda_t^\top\lambda_t\right), \qquad \lambda_t = \tilde{w}_t + \tilde\lambda_t +$$ + +Here $\tilde{w}_t = \tilde{W} x_t$ is the worst-case belief distortion, whose factor-dependent component is carried by the non-first columns of $\tilde{W}$, and $\tilde\lambda_t = \tilde\lambda x_t$ is the residual risk price. + +In stage I (estimation): + +1. Use $\{x_t, c_t\}_{t=0}^T$ to estimate the econometrician's $A$, $C$, $D$, $G$. +2. View $\Xi$ as a matrix of additional free parameters and estimate them + simultaneously with risk prices $\tilde\lambda x_t$ from data + $\{p_t(n+1)\}_{n=1}^N$, $t = 0, \ldots, T$, by imposing cross-equation + restrictions: + +$$ +p_t(n+1) = \exp(-r_t) E_t \left[m_{t+1}^\lambda p_{t+1}(n)\right] +$$ + +In stage II (assessment): + +1. Assess improvements in predicted behaviour of the term structure. +2. Use estimated worst-case dynamics to form distorted forecasts + $\tilde{E}_t[x_{t+1}] = (A - C\tilde{W})x_t$ and compare them to those of + professional forecasters. +3. Compute the discounted KL divergence $\frac{1}{2}E^w[\sum \beta^t w_t^\top w_t]$ of + each twisted model relative to the econometrician's model and compare them + (the code below zeros the first column of $W$ and keeps only the factor-dependent part). + +```{code-cell} ipython3 +def discounted_kl(W, A_w, C, x0, β, T_horizon=500): + """Factor-dependent KL: (1/2) E^w [Σ β^t (W x_t)'(W x_t)].""" + n_sims = 10_000 + k = C.shape[1] + rng = np.random.default_rng(2024) + X = np.tile(x0, (n_sims, 1)) + total = np.zeros(n_sims) + for t in range(T_horizon): + w_t = X @ W.T + total += β**t * 0.5 * np.sum(w_t**2, axis=1) + X = X @ A_w.T + rng.standard_normal((n_sims, k)) @ C.T + return np.mean(total) + +x0_test = augment_state(np.array([0.01, 0.005])) +kl_szoke = discounted_kl( + tilted.W_tilde_state, + tilted.A_tilde_state, C, x0_test, β) +kl_feared = discounted_kl(W_bar, A_bar, C, x0_test, β) + +print(f"Szőke factor-dep. KL: {kl_szoke:.4f}") +print(f"Feared LRR KL: {kl_feared:.4f}") +status = ('closer to' if kl_szoke < kl_feared + else 'farther from') +print(f"\nWorst-case model is {status} " + f"the econometrician's model.") +``` + +Using only the factor-dependent component $\tilde{W}^{sd} x_t$, the Szőke worst-case model has lower discounted KL divergence from the econometrician's model than the feared long-run risk model, meaning it is statistically harder to distinguish from the baseline. + +Yet it still generates the state-dependent uncertainty prices needed to match term-structure dynamics. + +## Who cares? + +Joint probability distributions of interest rates and macroeconomic shocks are important throughout macroeconomics: + +- *Costs of aggregate fluctuations.* Welfare assessments of business cycles + depend sensitively on how risks are priced. +- *Consumption Euler equations.* The "New Keynesian IS curve" is a log-linearised + consumption Euler equation whose risk adjustments are controlled by the stochastic + discount factor. +- *Optimal taxation and government debt management.* Government bond prices embed + risk prices whose state dependence matters for optimal fiscal policy. +- *Central bank expectations management.* Forward guidance works by shifting the + term structure, an exercise whose effects depend on the same likelihood ratios + studied here. +- *Long-run risk and secular stagnation.* The Bansal-Yaron long-run risk + hypothesis is difficult to detect statistically, yet an agent who fears it in + the sense formalised above may behave very differently than one who does not. + +Understanding whether observed asset prices reflect risk aversion, mistaken beliefs, or fears of model misspecification, and quantifying each component, is interesting for both positive and normative macroeconomics. + + +## Related lectures + +This lecture connects to several others in the series: + +- {doc}`Doubts or Variability? ` studies how a preference for robustness generates worst-case likelihood ratios that look like stochastic discount factor shocks, complementing the analysis here with Hansen-Jagannathan bounds and detection-error probabilities. +- {doc}`Asset Pricing: Finite State Models ` introduces stochastic discount factors and risk-neutral pricing in a finite-state Markov setting, the discrete-state counterpart of the continuous Gaussian framework used here. +- {doc}`Heterogeneous Beliefs and Bubbles ` examines how heterogeneous and possibly mistaken beliefs generate speculative asset price bubbles, providing another perspective on how beliefs affect asset prices. +- {doc}`Likelihood Ratio Processes ` develops the mathematical properties of likelihood ratios, the central device organising this lecture, including their martingale structure and statistical applications. +- {doc}`Divergence Measures ` covers Kullback-Leibler divergence and relative entropy in detail, providing the information-theoretic foundations for the entropy constraints used in the robust control sections. +- {doc}`Affine Models of Asset Prices ` extends the linear Gaussian state-space framework to affine and exponential-quadratic stochastic discount factors, developing risk-neutral pricing formulas closely related to those derived here. +- {doc}`Robustness ` introduces the multiplier and constraint preferences that formalise fear of model misspecification, providing the decision-theoretic foundations for the Hansen and Szőke worst-case analysis developed here. +- {doc}`The Hansen-Jagannathan Bound ` derives bounds on the stochastic discount factor from asset return data, offering an empirical discipline on the likelihood ratios and risk prices studied in this lecture. diff --git a/lectures/rob_markov_perf.md b/lectures/rob_markov_perf.md index 645e9a2c..fcbf219a 100644 --- a/lectures/rob_markov_perf.md +++ b/lectures/rob_markov_perf.md @@ -568,12 +568,13 @@ def nnash_robust(A, C, B1, B2, R1, R2, Q1, Q2, S1, S2, W1, W2, M1, M2, k_1 = B1.shape[1] k_2 = B2.shape[1] + rng = np.random.default_rng(0) v1 = np.eye(k_1) v2 = np.eye(k_2) P1 = np.eye(n) * 1e-5 P2 = np.eye(n) * 1e-5 - F1 = np.random.randn(k_1, n) - F2 = np.random.randn(k_2, n) + F1 = rng.standard_normal((k_1, n)) + F2 = rng.standard_normal((k_2, n)) for it in range(max_iter): @@ -769,7 +770,7 @@ where $F_1$ and $F_2$ are the firms' robust decision rules within the robust mar laws that are distorted relative to the baseline model. After simulating $x_t$ under the baseline transition dynamics and robust decision rules $F_i, i = 1, 2$, we -extract and plot industry output $q_t=q_{1t}+q_{2t}$ and price $p_t = a_0 − a_1 q_t$. +extract and plot industry output $q_t=q_{1t}+q_{2t}$ and price $p_t = a_0 - a_1 q_t$. Here we set the robustness and volatility matrix parameters as follows: