Skip to content
This repository was archived by the owner on Apr 1, 2026. It is now read-only.

Commit 29b4e12

Browse files
committed
fix: fix mypy error
1 parent 117cb17 commit 29b4e12

File tree

3 files changed

+35
-31
lines changed

3 files changed

+35
-31
lines changed

bigframes/session/loader.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,8 @@
4747
from google.cloud import bigquery_storage_v1
4848
import google.cloud.bigquery
4949
import google.cloud.bigquery as bigquery
50+
from google.cloud.bigquery.job.load import LoadJob
51+
from google.cloud.bigquery.job.query import QueryJob
5052
import google.cloud.bigquery.table
5153
from google.cloud.bigquery_storage_v1 import types as bq_storage_types
5254
import pandas
@@ -562,7 +564,7 @@ def _start_generic_job(self, job: formatting_helpers.GenericJob):
562564
else:
563565
job.result()
564566

565-
if self._metrics is not None and isinstance(job, google.cloud.bigquery.job.Job):
567+
if self._metrics is not None and isinstance(job, (QueryJob, LoadJob)):
566568
self._metrics.count_job_stats(query_job=job)
567569

568570
@overload

bigframes/session/metrics.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,11 @@
1717
import dataclasses
1818
import datetime
1919
import os
20-
from typing import Any, Mapping, Optional, Tuple
20+
from typing import Any, Mapping, Optional, Tuple, Union
2121

2222
import google.cloud.bigquery as bigquery
23+
from google.cloud.bigquery.job.load import LoadJob
24+
from google.cloud.bigquery.job.query import QueryJob
2325
import google.cloud.bigquery.table as bq_table
2426

2527
LOGGING_NAME_ENV_VAR = "BIGFRAMES_PERFORMANCE_LOG_NAME"
@@ -62,7 +64,7 @@ class ExecutionMetrics:
6264

6365
def count_job_stats(
6466
self,
65-
query_job: Optional[bigquery.job.Job] = None,
67+
query_job: Optional[Union[QueryJob, LoadJob]] = None,
6668
row_iterator: Optional[bq_table.RowIterator] = None,
6769
):
6870
if query_job is None:
@@ -106,7 +108,7 @@ def count_job_stats(
106108
)
107109
)
108110

109-
elif query_job.configuration.dry_run:
111+
elif isinstance(query_job, QueryJob) and query_job.configuration.dry_run:
110112
query_char_count = len(getattr(query_job, "query", ""))
111113

112114
# TODO(tswast): Pass None after making benchmark publishing robust to missing data.

notebooks/dataframes/job_history.ipynb

Lines changed: 27 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
"cell_type": "markdown",
55
"metadata": {},
66
"source": [
7-
"# Job History Manual Test\\n",
8-
"\\n",
7+
"# Job History Manual Test\n",
8+
"\n",
99
"This notebook demonstrates and manually tests the `bigframes.pandas.job_history()` functionality."
1010
]
1111
},
@@ -15,11 +15,11 @@
1515
"metadata": {},
1616
"outputs": [],
1717
"source": [
18-
"import pandas as pd\\n",
19-
"import bigframes.pandas as bpd\\n",
20-
"\\n",
21-
"# Set options if needed, e.g. project/location\\n",
22-
"# bpd.options.bigquery.project = \"YOUR_PROJECT\"\\n",
18+
"import pandas as pd\n",
19+
"import bigframes.pandas as bpd\n",
20+
"\n",
21+
"# Set options if needed, e.g. project/location\\n\n",
22+
"# bpd.options.bigquery.project = \"YOUR_PROJECT\"\\n\n",
2323
"# bpd.options.bigquery.location = \"US\""
2424
]
2525
},
@@ -36,7 +36,7 @@
3636
"metadata": {},
3737
"outputs": [],
3838
"source": [
39-
"df = bpd.read_gbq(\"SELECT 1 as a, 2 as b\")\\n",
39+
"df = bpd.read_gbq(\"SELECT 1 as a, 2 as b\")\n",
4040
"df.head()"
4141
]
4242
},
@@ -53,8 +53,8 @@
5353
"metadata": {},
5454
"outputs": [],
5555
"source": [
56-
"local_df = pd.DataFrame({'col1': [1, 2, 3], 'col2': ['a', 'b', 'c']})\\n",
57-
"bf_df = bpd.read_pandas(local_df)\\n",
56+
"local_df = pd.DataFrame({'col1': [1, 2, 3], 'col2': ['a', 'b', 'c']})\n",
57+
"bf_df = bpd.read_pandas(local_df)\n",
5858
"bf_df.head()"
5959
]
6060
},
@@ -71,8 +71,8 @@
7171
"metadata": {},
7272
"outputs": [],
7373
"source": [
74-
"# Perform a simple aggregation to trigger a computation\\n",
75-
"agg_df = bf_df.groupby('col2').sum()\\n",
74+
"# Perform a simple aggregation to trigger a computation\\n\n",
75+
"agg_df = bf_df.groupby('col2').sum()\n",
7676
"agg_df.head()"
7777
]
7878
},
@@ -89,7 +89,7 @@
8989
"metadata": {},
9090
"outputs": [],
9191
"source": [
92-
"history = bpd.job_history()\\n",
92+
"history = bpd.job_history()\n",
9393
"history"
9494
]
9595
},
@@ -106,20 +106,20 @@
106106
"metadata": {},
107107
"outputs": [],
108108
"source": [
109-
"# Display key columns to verify data population\\n",
110-
"cols_to_check = [\\n",
111-
" 'job_id', \\n",
112-
" 'job_type', \\n",
113-
" 'creation_time', \\n",
114-
" 'duration_seconds', \\n",
115-
" 'total_bytes_processed', \\n",
116-
" 'query', \\n",
117-
" 'input_files', # Should be populated for Load Job\\n",
118-
" 'destination_table'\\n",
119-
"]\\n",
120-
"\\n",
121-
"# Filter columns that exist in the history DataFrame\\n",
122-
"existing_cols = [col for col in cols_to_check if col in history.columns]\\n",
109+
"# Display key columns to verify data population\\n\n",
110+
"cols_to_check = [\n",
111+
" 'job_id',\n",
112+
" 'job_type',\n",
113+
" 'creation_time',\n",
114+
" 'duration_seconds',\n",
115+
" 'total_bytes_processed',\n",
116+
" 'query',\n",
117+
" 'input_files', # Should be populated for Load Job\n",
118+
" 'destination_table'\n",
119+
"]\n",
120+
"\n",
121+
"# Filter columns that exist in the history DataFrame\n",
122+
"existing_cols = [col for col in cols_to_check if col in history.columns]\n",
123123
"history[existing_cols]"
124124
]
125125
}

0 commit comments

Comments
 (0)