|
4 | 4 | "cell_type": "markdown", |
5 | 5 | "metadata": {}, |
6 | 6 | "source": [ |
7 | | - "# Job History Manual Test\\n", |
8 | | - "\\n", |
| 7 | + "# Job History Manual Test\n", |
| 8 | + "\n", |
9 | 9 | "This notebook demonstrates and manually tests the `bigframes.pandas.job_history()` functionality." |
10 | 10 | ] |
11 | 11 | }, |
|
15 | 15 | "metadata": {}, |
16 | 16 | "outputs": [], |
17 | 17 | "source": [ |
18 | | - "import pandas as pd\\n", |
19 | | - "import bigframes.pandas as bpd\\n", |
20 | | - "\\n", |
21 | | - "# Set options if needed, e.g. project/location\\n", |
22 | | - "# bpd.options.bigquery.project = \"YOUR_PROJECT\"\\n", |
| 18 | + "import pandas as pd\n", |
| 19 | + "import bigframes.pandas as bpd\n", |
| 20 | + "\n", |
| 21 | + "# Set options if needed, e.g. project/location\\n\n", |
| 22 | + "# bpd.options.bigquery.project = \"YOUR_PROJECT\"\\n\n", |
23 | 23 | "# bpd.options.bigquery.location = \"US\"" |
24 | 24 | ] |
25 | 25 | }, |
|
36 | 36 | "metadata": {}, |
37 | 37 | "outputs": [], |
38 | 38 | "source": [ |
39 | | - "df = bpd.read_gbq(\"SELECT 1 as a, 2 as b\")\\n", |
| 39 | + "df = bpd.read_gbq(\"SELECT 1 as a, 2 as b\")\n", |
40 | 40 | "df.head()" |
41 | 41 | ] |
42 | 42 | }, |
|
53 | 53 | "metadata": {}, |
54 | 54 | "outputs": [], |
55 | 55 | "source": [ |
56 | | - "local_df = pd.DataFrame({'col1': [1, 2, 3], 'col2': ['a', 'b', 'c']})\\n", |
57 | | - "bf_df = bpd.read_pandas(local_df)\\n", |
| 56 | + "local_df = pd.DataFrame({'col1': [1, 2, 3], 'col2': ['a', 'b', 'c']})\n", |
| 57 | + "bf_df = bpd.read_pandas(local_df)\n", |
58 | 58 | "bf_df.head()" |
59 | 59 | ] |
60 | 60 | }, |
|
71 | 71 | "metadata": {}, |
72 | 72 | "outputs": [], |
73 | 73 | "source": [ |
74 | | - "# Perform a simple aggregation to trigger a computation\\n", |
75 | | - "agg_df = bf_df.groupby('col2').sum()\\n", |
| 74 | + "# Perform a simple aggregation to trigger a computation\\n\n", |
| 75 | + "agg_df = bf_df.groupby('col2').sum()\n", |
76 | 76 | "agg_df.head()" |
77 | 77 | ] |
78 | 78 | }, |
|
89 | 89 | "metadata": {}, |
90 | 90 | "outputs": [], |
91 | 91 | "source": [ |
92 | | - "history = bpd.job_history()\\n", |
| 92 | + "history = bpd.job_history()\n", |
93 | 93 | "history" |
94 | 94 | ] |
95 | 95 | }, |
|
106 | 106 | "metadata": {}, |
107 | 107 | "outputs": [], |
108 | 108 | "source": [ |
109 | | - "# Display key columns to verify data population\\n", |
110 | | - "cols_to_check = [\\n", |
111 | | - " 'job_id', \\n", |
112 | | - " 'job_type', \\n", |
113 | | - " 'creation_time', \\n", |
114 | | - " 'duration_seconds', \\n", |
115 | | - " 'total_bytes_processed', \\n", |
116 | | - " 'query', \\n", |
117 | | - " 'input_files', # Should be populated for Load Job\\n", |
118 | | - " 'destination_table'\\n", |
119 | | - "]\\n", |
120 | | - "\\n", |
121 | | - "# Filter columns that exist in the history DataFrame\\n", |
122 | | - "existing_cols = [col for col in cols_to_check if col in history.columns]\\n", |
| 109 | + "# Display key columns to verify data population\\n\n", |
| 110 | + "cols_to_check = [\n", |
| 111 | + " 'job_id',\n", |
| 112 | + " 'job_type',\n", |
| 113 | + " 'creation_time',\n", |
| 114 | + " 'duration_seconds',\n", |
| 115 | + " 'total_bytes_processed',\n", |
| 116 | + " 'query',\n", |
| 117 | + " 'input_files', # Should be populated for Load Job\n", |
| 118 | + " 'destination_table'\n", |
| 119 | + "]\n", |
| 120 | + "\n", |
| 121 | + "# Filter columns that exist in the history DataFrame\n", |
| 122 | + "existing_cols = [col for col in cols_to_check if col in history.columns]\n", |
123 | 123 | "history[existing_cols]" |
124 | 124 | ] |
125 | 125 | } |
|
0 commit comments