-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Expand file tree
/
Copy pathtest_base.py
More file actions
2344 lines (1996 loc) · 93.1 KB
/
test_base.py
File metadata and controls
2344 lines (1996 loc) · 93.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# pyright: reportPrivateUsage=false
"""Unit-test suite for the `unstructured.chunking.base` module."""
from __future__ import annotations
import logging
from typing import Any, Sequence
import pytest
from lxml.html import fragment_fromstring
from unstructured.chunking.base import (
ChunkingOptions,
PreChunk,
PreChunkBuilder,
PreChunkCombiner,
PreChunker,
TokenCounter,
_CellAccumulator,
_Chunker,
_HtmlTableSplitter,
_PreChunkAccumulator,
_RowAccumulator,
_TableChunker,
_TextSplitter,
is_on_next_page,
is_title,
)
from unstructured.chunking.dispatch import reconstruct_table_from_chunks
from unstructured.common.html_table import HtmlCell, HtmlRow, HtmlTable
from unstructured.documents.elements import (
CheckBox,
CodeSnippet,
CompositeElement,
Element,
ElementMetadata,
Image,
PageBreak,
Table,
TableChunk,
Text,
Title,
)
# ================================================================================================
# CHUNKING OPTIONS
# ================================================================================================
class DescribeChunkingOptions:
"""Unit-test suite for `unstructured.chunking.base.ChunkingOptions` objects."""
@pytest.mark.parametrize("max_characters", [0, -1, -42])
def it_rejects_max_characters_not_greater_than_zero(self, max_characters: int):
with pytest.raises(
ValueError,
match=f"'max_characters' argument must be > 0, got {max_characters}",
):
ChunkingOptions(max_characters=max_characters)._validate()
def it_does_not_complain_when_specifying_max_characters_by_itself(self):
"""Caller can specify `max_characters` arg without specifying any others.
In particular, When `combine_text_under_n_chars` is not specified it defaults to the value
of `max_characters`; it has no fixed default value that can be greater than `max_characters`
and trigger an exception.
"""
try:
ChunkingOptions(max_characters=50)._validate()
except ValueError:
pytest.fail("did not accept `max_characters` as option by itself")
@pytest.mark.parametrize(
("combine_text_under_n_chars", "expected_value"), [(None, 0), (42, 42)]
)
def it_accepts_combine_text_under_n_chars_in_constructor_but_defaults_to_no_combining(
self, combine_text_under_n_chars: int | None, expected_value: int
):
"""Subclasses can store `combine_text_under_n_chars` but must validate and enable it.
The `combine_text_under_n_chars` option is not used by all chunkers and its behavior can
differ between subtypes. It is present in and stored by the contructur but it defaults to
`0` (no pre-chunk combining) and must be overridden by subclasses to give it the desired
behavior.
"""
opts = ChunkingOptions(combine_text_under_n_chars=combine_text_under_n_chars)
assert opts.combine_text_under_n_chars == expected_value
@pytest.mark.parametrize(
("kwargs", "expected_value"),
[
({"include_orig_elements": True}, True),
({"include_orig_elements": False}, False),
({"include_orig_elements": None}, True),
({}, True),
],
)
def it_knows_whether_to_include_orig_elements_in_the_chunk_metadata(
self, kwargs: dict[str, Any], expected_value: bool
):
assert ChunkingOptions(**kwargs).include_orig_elements is expected_value
@pytest.mark.parametrize("n_chars", [-1, -42])
def it_rejects_new_after_n_chars_for_n_less_than_zero(self, n_chars: int):
with pytest.raises(
ValueError,
match=f"'new_after_n_chars' argument must be >= 0, got {n_chars}",
):
ChunkingOptions(new_after_n_chars=n_chars)._validate()
def it_rejects_overlap_not_less_than_max_characters(self):
with pytest.raises(
ValueError,
match="'overlap' argument must be less than `max_characters`, got 300 >= 200",
):
ChunkingOptions(max_characters=200, overlap=300)._validate()
def it_does_not_complain_when_specifying_new_after_n_chars_by_itself(self):
"""Caller can specify `new_after_n_chars` arg without specifying any other options."""
opts = ChunkingOptions(new_after_n_chars=200)
try:
opts._validate()
except ValueError:
pytest.fail("did not accept `new_after_n_chars` as option by itself")
assert opts.soft_max == 200
def it_accepts_0_for_new_after_n_chars_to_put_each_element_into_its_own_chunk(self):
"""Specifying `new_after_n_chars=0` places each element into its own pre-chunk.
This puts each element into its own chunk, although long chunks are still split.
"""
opts = ChunkingOptions(new_after_n_chars=0)
opts._validate()
assert opts.soft_max == 0
def it_silently_accepts_new_after_n_chars_greater_than_maxchars(self):
"""`new_after_n_chars` > `max_characters` doesn't affect chunking behavior.
So rather than raising an exception or warning, we just cap that value at `max_characters`
which is the behavioral equivalent.
"""
opts = ChunkingOptions(max_characters=444, new_after_n_chars=555)
try:
opts._validate()
except ValueError:
pytest.fail("did not accept `new_after_n_chars` greater than `max_characters`")
assert opts.soft_max == 444
def it_knows_how_much_overlap_to_apply_to_split_chunks(self):
assert ChunkingOptions(overlap=10).overlap == 10
def and_it_uses_the_same_value_for_inter_chunk_overlap_when_asked_to_overlap_all_chunks(self):
assert ChunkingOptions(overlap=10, overlap_all=True).inter_chunk_overlap == 10
def but_it_does_not_overlap_pre_chunks_by_default(self):
assert ChunkingOptions(overlap=10).inter_chunk_overlap == 0
def it_knows_the_text_separator_string(self):
assert ChunkingOptions().text_separator == "\n\n"
# -- Token-based chunking tests --
def it_rejects_max_tokens_and_max_characters_together(self):
with pytest.raises(
ValueError,
match="'max_tokens' and 'max_characters' are mutually exclusive",
):
ChunkingOptions(max_tokens=100, max_characters=500)._validate()
def it_rejects_max_tokens_without_tokenizer(self):
with pytest.raises(
ValueError,
match="'tokenizer' is required when using 'max_tokens'",
):
ChunkingOptions(max_tokens=100)._validate()
@pytest.mark.parametrize("max_tokens", [0, -1, -42])
def it_rejects_max_tokens_not_greater_than_zero(self, max_tokens: int):
with pytest.raises(
ValueError,
match=f"'max_tokens' argument must be > 0, got {max_tokens}",
):
ChunkingOptions(max_tokens=max_tokens, tokenizer="cl100k_base")._validate()
def it_rejects_new_after_n_tokens_without_max_tokens(self):
with pytest.raises(
ValueError,
match="'new_after_n_tokens' requires 'max_tokens' to be specified",
):
ChunkingOptions(new_after_n_tokens=50)._validate()
@pytest.mark.parametrize("n_tokens", [-1, -42])
def it_rejects_new_after_n_tokens_for_n_less_than_zero(self, n_tokens: int):
with pytest.raises(
ValueError,
match=f"'new_after_n_tokens' argument must be >= 0, got {n_tokens}",
):
ChunkingOptions(
max_tokens=100, new_after_n_tokens=n_tokens, tokenizer="cl100k_base"
)._validate()
def it_knows_when_token_counting_is_enabled(self):
opts_char = ChunkingOptions(max_characters=500)
opts_token = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
assert opts_char.use_token_counting is False
assert opts_token.use_token_counting is True
def it_returns_hard_max_in_tokens_when_token_counting_is_enabled(self):
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
assert opts.hard_max == 100
def it_returns_soft_max_in_tokens_when_token_counting_is_enabled(self):
opts = ChunkingOptions(max_tokens=100, new_after_n_tokens=80, tokenizer="cl100k_base")
assert opts.soft_max == 80
def it_defaults_soft_max_to_hard_max_for_token_counting(self):
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
assert opts.soft_max == 100
def it_creates_token_counter_when_tokenizer_is_specified(self):
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
assert opts.token_counter is not None
def it_returns_no_token_counter_when_tokenizer_is_not_specified(self):
opts = ChunkingOptions(max_characters=500)
assert opts.token_counter is None
def it_measures_text_in_characters_by_default(self):
opts = ChunkingOptions(max_characters=500)
text = "Hello, World!"
assert opts.measure(text) == len(text)
# ================================================================================================
# TOKEN COUNTER
# ================================================================================================
class DescribeTokenCounter:
"""Unit-test suite for `unstructured.chunking.base.TokenCounter` objects."""
@pytest.fixture
def _tiktoken_installed(self):
"""Skip test if tiktoken is not installed."""
pytest.importorskip("tiktoken")
def it_counts_tokens_using_encoding_name(self, _tiktoken_installed: None):
counter = TokenCounter("cl100k_base")
# -- "Hello, World!" is typically tokenized as ["Hello", ",", " World", "!"] = 4 tokens --
count = counter.count("Hello, World!")
assert isinstance(count, int)
assert count > 0
def it_counts_tokens_using_model_name(self, _tiktoken_installed: None):
counter = TokenCounter("gpt-4")
count = counter.count("Hello, World!")
assert isinstance(count, int)
assert count > 0
def it_lazily_imports_tiktoken(self, _tiktoken_installed: None):
counter = TokenCounter("cl100k_base")
# -- encoder should not be initialized until count is called --
assert "_encoder" not in counter.__dict__
counter.count("test")
# -- now encoder should be cached --
assert "_encoder" in counter.__dict__
# ================================================================================================
# TEXT SPLITTER (TOKEN MODE)
# ================================================================================================
class DescribeTextSplitterTokenMode:
"""Unit-test suite for `_TextSplitter` in token-based chunking mode."""
@pytest.fixture
def _tiktoken_installed(self):
"""Skip test if tiktoken is not installed."""
pytest.importorskip("tiktoken")
def it_returns_text_unchanged_when_under_token_limit(self, _tiktoken_installed: None):
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
split = _TextSplitter(opts)
text = "Hello, World!"
fragment, remainder = split(text)
assert fragment == text
assert remainder == ""
def it_splits_oversized_text_respecting_token_limit(self, _tiktoken_installed: None):
opts = ChunkingOptions(max_tokens=10, tokenizer="cl100k_base")
split = _TextSplitter(opts)
# -- create text that exceeds 10 tokens --
text = "The quick brown fox jumps over the lazy dog. " * 5
fragment, remainder = split(text)
# -- fragment should be non-empty and have fewer tokens than the limit --
assert len(fragment) > 0
assert len(remainder) > 0
assert opts.measure(fragment) <= 10
def it_prefers_separator_boundaries_when_splitting(self, _tiktoken_installed: None):
opts = ChunkingOptions(max_tokens=15, tokenizer="cl100k_base")
split = _TextSplitter(opts)
# -- text with clear sentence boundaries --
text = "First sentence here. Second sentence here. Third sentence here."
fragment, remainder = split(text)
# -- should split on a sentence/word boundary, not mid-word --
assert fragment.endswith(".") or fragment[-1].isalnum()
assert not fragment.endswith(" ")
def it_handles_text_with_no_good_split_points(self, _tiktoken_installed: None):
opts = ChunkingOptions(max_tokens=5, tokenizer="cl100k_base")
split = _TextSplitter(opts)
# -- single long word repeated --
text = "Supercalifragilisticexpialidocious " * 10
fragment, remainder = split(text)
# -- should still produce a valid split --
assert len(fragment) > 0
assert opts.measure(fragment) <= 5
def it_applies_token_based_overlap_not_character_based(self, _tiktoken_installed: None):
"""Overlap in token mode should be measured in tokens, not characters."""
# -- 3 tokens of overlap --
opts = ChunkingOptions(max_tokens=10, tokenizer="cl100k_base", overlap=3)
split = _TextSplitter(opts)
# -- text that will need to be split (14 tokens total) --
text = "apple banana cherry date elderberry fig grape honeydew kiwi lemon"
fragment, remainder = split(text)
# -- verify exact fragment content (8 tokens, split at sentence boundary) --
assert fragment == "apple banana cherry date elderberry fig grape"
assert opts.measure(fragment) == 8
# -- verify exact remainder content (overlap + remaining text) --
# -- "fig grape" is the 3-token overlap from end of fragment --
assert remainder == "fig grape honeydew kiwi lemon"
# -- remainder starts with overlap words from fragment --
assert remainder.startswith("fig grape")
def it_computes_token_overlap_tail_correctly(self, _tiktoken_installed: None):
"""Test the _get_token_overlap_tail helper method."""
import tiktoken
enc = tiktoken.get_encoding("cl100k_base")
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
splitter = _TextSplitter(opts)
text = "The quick brown fox jumps over the lazy dog."
# -- request 3 tokens worth of tail --
tail = splitter._get_token_overlap_tail(text, 3)
# -- verify exact tail content: "lazy dog." is exactly 3 tokens --
assert tail == "lazy dog."
assert len(enc.encode(tail)) == 3
def it_handles_overlap_when_text_has_fewer_tokens_than_target(self, _tiktoken_installed: None):
"""When text has fewer tokens than overlap target, return all text."""
opts = ChunkingOptions(max_tokens=100, tokenizer="cl100k_base")
splitter = _TextSplitter(opts)
short_text = "Hello" # Just 1 token
tail = splitter._get_token_overlap_tail(short_text, 5)
# -- should return the entire text (stripped) --
assert tail == "Hello"
def it_produces_correct_overlapping_splits(self, _tiktoken_installed: None):
"""Verify the complete split-with-overlap behavior works correctly."""
opts = ChunkingOptions(max_tokens=8, tokenizer="cl100k_base", overlap=2)
split = _TextSplitter(opts)
# -- create text that will need multiple splits (12 tokens total) --
text = "one two three four five six seven eight nine ten eleven twelve"
# -- first split --
fragment1, remainder1 = split(text)
# -- verify exact first fragment (8 tokens) --
assert fragment1 == "one two three four five six seven eight"
assert opts.measure(fragment1) == 8
# -- verify exact remainder with overlap --
# -- "seven eight" is the 2-token overlap from end of fragment1 --
assert remainder1 == "seven eight nine ten eleven twelve"
assert remainder1.startswith("seven eight")
# -- second split consumes remainder completely (6 tokens, under limit) --
fragment2, remainder2 = split(remainder1)
assert fragment2 == "seven eight nine ten eleven twelve"
assert remainder2 == ""
# ================================================================================================
# PRE-CHUNKER
# ================================================================================================
class DescribePreChunker:
"""Unit-test suite for `unstructured.chunking.base.PreChunker` objects."""
def it_gathers_elements_into_pre_chunks_respecting_the_specified_chunk_size(self):
elements = [
Title("Lorem Ipsum"),
Text("Lorem ipsum dolor sit amet, consectetur adipiscing elit."),
Text("Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua."),
Title("Ut Enim"),
Text("Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi."),
Text("Ut aliquip ex ea commodo consequat."),
CheckBox(),
]
opts = ChunkingOptions(max_characters=150, new_after_n_chars=65)
pre_chunk_iter = PreChunker.iter_pre_chunks(elements, opts=opts)
pre_chunk = next(pre_chunk_iter)
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._elements == [
Title("Lorem Ipsum"),
Text("Lorem ipsum dolor sit amet, consectetur adipiscing elit."),
]
# --
pre_chunk = next(pre_chunk_iter)
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._elements == [
Text("Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.")
]
# --
pre_chunk = next(pre_chunk_iter)
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._elements == [
Title("Ut Enim"),
Text("Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi."),
]
# --
pre_chunk = next(pre_chunk_iter)
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._elements == [Text("Ut aliquip ex ea commodo consequat."), CheckBox()]
# --
with pytest.raises(StopIteration):
next(pre_chunk_iter)
class DescribePreChunkBuilder:
"""Unit-test suite for `unstructured.chunking.base.PreChunkBuilder`."""
def it_is_empty_on_construction(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=50))
assert builder._text_length == 0
assert builder._remaining_space == 50
def it_accumulates_elements_added_to_it(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=150))
builder.add_element(Title("Introduction"))
assert builder._text_length == 12
assert builder._remaining_space == 136
builder.add_element(
Text(
"Lorem ipsum dolor sit amet consectetur adipiscing elit. In rhoncus ipsum sed"
"lectus porta volutpat.",
),
)
assert builder._text_length == 112
assert builder._remaining_space == 36
def it_will_fit_when_element_has_none_as_text(self):
builder = PreChunkBuilder(opts=ChunkingOptions())
assert builder.will_fit(Image(None))
def it_will_fit_an_oversized_element_when_empty(self):
builder = PreChunkBuilder(opts=ChunkingOptions())
assert builder.will_fit(Text("abcd " * 200))
@pytest.mark.parametrize(
("existing_element", "next_element"),
[
(Text("abcd"), Text("abcd " * 200)),
(Table("Heading\nCell text"), Text("abcd " * 200)),
],
)
def but_not_when_it_already_contains_an_element(
self, existing_element: Element, next_element: Element
):
builder = PreChunkBuilder(opts=ChunkingOptions())
builder.add_element(existing_element)
assert not builder.will_fit(next_element)
@pytest.mark.parametrize("element", [Text("abcd"), Table("Fruits\nMango")])
def it_will_accept_another_element_that_fits_when_it_already_contains_a_table(
self, element: Element
):
builder = PreChunkBuilder(opts=ChunkingOptions())
builder.add_element(Table("Heading\nCell text"))
assert builder.will_fit(element)
def it_will_not_fit_an_element_when_it_already_exceeds_the_soft_maxlen(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=100, new_after_n_chars=50))
builder.add_element(
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit.") # 55-chars
)
assert not builder.will_fit(Text("In rhoncus ipsum."))
def and_it_will_not_fit_an_element_when_that_would_cause_it_to_exceed_the_hard_maxlen(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=100))
builder.add_element(
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit.") # 55-chars
)
# -- 55 + 2 (separator) + 44 == 101 --
assert not builder.will_fit(
Text("In rhoncus ipsum sed lectus portos volutpat.") # 44-chars
)
def but_it_will_fit_an_element_that_fits(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=100))
builder.add_element(
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit.") # 55-chars
)
# -- 55 + 2 (separator) + 43 == 100 --
assert builder.will_fit(Text("In rhoncus ipsum sed lectus porto volutpat.")) # 43-chars
def it_generates_a_PreChunk_when_flushed_and_resets_itself_to_empty(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=150))
builder.add_element(Title("Introduction"))
builder.add_element(
Text(
"Lorem ipsum dolor sit amet consectetur adipiscing elit. In rhoncus ipsum sed"
"lectus porta volutpat.",
),
)
pre_chunk = next(builder.flush())
# -- pre-chunk builder was reset before the yield, such that the iterator does not need to
# -- be exhausted before clearing out the old elements and a new pre-chunk can be
# -- accumulated immediately (first `next()` call is required however, to advance to the
# -- yield statement).
assert builder._text_length == 0
assert builder._remaining_space == 150
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._elements == [
Title("Introduction"),
Text(
"Lorem ipsum dolor sit amet consectetur adipiscing elit. In rhoncus ipsum sed"
"lectus porta volutpat.",
),
]
def but_it_does_not_generate_a_pre_chunk_on_flush_when_empty(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=150))
pre_chunks = list(builder.flush())
assert pre_chunks == []
assert builder._text_length == 0
assert builder._remaining_space == 150
def it_computes_overlap_from_each_pre_chunk_and_applies_it_to_the_next(self):
opts = ChunkingOptions(overlap=15, overlap_all=True)
builder = PreChunkBuilder(opts=opts)
builder.add_element(Text("Lorem ipsum dolor sit amet consectetur adipiscing elit."))
pre_chunk = list(builder.flush())[0]
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._text == "Lorem ipsum dolor sit amet consectetur adipiscing elit."
builder.add_element(Table("In rhoncus ipsum sed lectus porta volutpat."))
pre_chunk = list(builder.flush())[0]
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._text == "dipiscing elit.\n\nIn rhoncus ipsum sed lectus porta volutpat."
builder.add_element(Text("Donec semper facilisis metus finibus."))
pre_chunk = list(builder.flush())[0]
assert isinstance(pre_chunk, PreChunk)
assert pre_chunk._text == "porta volutpat.\n\nDonec semper facilisis metus finibus."
def it_considers_separator_length_when_computing_text_length_and_remaining_space(self):
builder = PreChunkBuilder(opts=ChunkingOptions(max_characters=50))
builder.add_element(Text("abcde"))
builder.add_element(Text("fghij"))
# -- ._text_length includes a separator ("\n\n", len==2) between each text-segment,
# -- so 5 + 2 + 5 = 12 here, not 5 + 5 = 10
assert builder._text_length == 12
# -- ._remaining_space is reduced by the length (2) of the trailing separator which would
# -- go between the current text and that of the next element if one was added.
# -- So 50 - 12 - 2 = 36 here, not 50 - 12 = 38
assert builder._remaining_space == 36
# ================================================================================================
# PRE-CHUNK SUBTYPES
# ================================================================================================
class DescribePreChunk:
"""Unit-test suite for `unstructured.chunking.base.PreChunk` objects."""
@pytest.mark.parametrize(
("overlap_pfx", "texts", "other_overlap_pfx", "other_texts", "expected_value"),
[
# -- same elements, and overlap-prefix --
("foo", ["bar", "baz"], "foo", ["bar", "baz"], True),
# -- same elements, no overlap-prefix --
("", ["bar", "baz"], "", ["bar", "baz"], True),
# -- same elements, different overlap-prefix --
("foo", ["bar", "baz"], "fob", ["bar", "baz"], False),
# -- different elements, same overlap-prefix --
("foo", ["bar", "baz"], "foo", ["bah", "dah"], False),
# -- different elements, different overlap-prefix --
("", ["bar", "baz"], "foo", ["bah", "dah"], False),
],
)
def it_knows_when_it_is_equal_to_another_PreChunk_instance(
self,
overlap_pfx: str,
texts: list[str],
other_overlap_pfx: str,
other_texts: list[str],
expected_value: bool,
):
opts = ChunkingOptions()
pre_chunk = PreChunk([Text(t) for t in texts], overlap_prefix=overlap_pfx, opts=opts)
other_pre_chunk = PreChunk(
[Text(t) for t in other_texts], overlap_prefix=other_overlap_pfx, opts=opts
)
assert (pre_chunk == other_pre_chunk) is expected_value
def and_it_knows_it_is_NOT_equal_to_an_object_that_is_not_a_PreChunk(self):
pre_chunk = PreChunk([], overlap_prefix="", opts=ChunkingOptions())
assert pre_chunk != 42
def it_can_handle_element_with_none_as_text(self):
pre_chunk = PreChunk(
[Image(None), Text("hello")], overlap_prefix="", opts=ChunkingOptions()
)
assert pre_chunk._text == "hello"
def it_can_chunk_elements_with_none_text_without_error(self):
"""Regression test for AttributeError when Image elements have None text."""
pre_chunk = PreChunk(
[Image(None), Text("hello world"), Image(None)],
overlap_prefix="",
opts=ChunkingOptions(),
)
# Should not raise AttributeError when generating chunks
chunks = list(pre_chunk.iter_chunks())
assert len(chunks) == 1
assert chunks[0].text == "hello world"
@pytest.mark.parametrize(
("max_characters", "combine_text_under_n_chars", "expected_value"),
[
# Will exactly fit:
# - Prefix + separator + text = 20 + 2 + 50 = 72 < combine_text_under_n_chars
# - pre_chunk + separator + next_pre_chunk_text = 72 + 2 + 26 = 100 <= max_characters
(100, 73, True),
# -- already exceeds combine_text_under_n_chars threshold --
(100, 72, False),
# -- would exceeds hard-max chunking-window threshold --
(99, 73, False),
],
)
def it_knows_when_it_can_combine_itself_with_another_PreChunk_instance(
self, max_characters: int, combine_text_under_n_chars: int, expected_value: bool
):
"""This allows `PreChunkCombiner` to operate without knowing `PreChunk` internals."""
opts = ChunkingOptions(
max_characters=max_characters,
combine_text_under_n_chars=combine_text_under_n_chars,
overlap=20,
overlap_all=True,
)
pre_chunk = PreChunk(
[Text("Lorem ipsum dolor sit amet consectetur adipiscing.")], # len == 50
overlap_prefix="e feugiat efficitur.", # len == 20
opts=opts,
)
next_pre_chunk = PreChunk(
[Text("In rhoncus sum sed lectus.")], # len == 26
overlap_prefix="sectetur adipiscing.", # len == 20 but shouldn't come into computation
opts=opts,
)
assert pre_chunk.can_combine(next_pre_chunk) is expected_value
def it_can_combine_itself_with_another_PreChunk_instance(self):
""".combine() produces a new pre-chunk by appending the elements of `other_pre-chunk`.
Note that neither the original or other pre_chunk are mutated.
"""
opts = ChunkingOptions()
pre_chunk = PreChunk(
[
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit."),
Text("In rhoncus ipsum sed lectus porta volutpat."),
],
overlap_prefix="feugiat efficitur.",
opts=opts,
)
other_pre_chunk = PreChunk(
[
Text("Donec semper facilisis metus finibus malesuada."),
Text("Vivamus magna nibh, blandit eu dui congue, feugiat efficitur velit."),
],
overlap_prefix="porta volupat.",
opts=opts,
)
new_pre_chunk = pre_chunk.combine(other_pre_chunk)
# -- Combined pre-chunk contains all elements from both, in order. It gets the
# -- overlap-prefix from the existing pre-chunk and the other overlap-prefix is discarded
# -- (although it's still in there at the end of the first pre-chunk since that's where it
# -- came from originally).
assert new_pre_chunk == PreChunk(
[
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit."),
Text("In rhoncus ipsum sed lectus porta volutpat."),
Text("Donec semper facilisis metus finibus malesuada."),
Text("Vivamus magna nibh, blandit eu dui congue, feugiat efficitur velit."),
],
overlap_prefix="feugiat efficitur.",
opts=opts,
)
# -- Neither pre-chunk used for combining is mutated, so we don't have to worry about who
# -- else may have been given a reference to them.
assert pre_chunk == PreChunk(
[
Text("Lorem ipsum dolor sit amet consectetur adipiscing elit."),
Text("In rhoncus ipsum sed lectus porta volutpat."),
],
overlap_prefix="feugiat efficitur.",
opts=opts,
)
assert other_pre_chunk == PreChunk(
[
Text("Donec semper facilisis metus finibus malesuada."),
Text("Vivamus magna nibh, blandit eu dui congue, feugiat efficitur velit."),
],
overlap_prefix="porta volupat.",
opts=opts,
)
@pytest.mark.parametrize(
("text", "expected_value"),
[
# -- normally it splits exactly on overlap size |------- 20 -------|
("In rhoncus ipsum sed lectus porta volutpat.", "ctus porta volutpat."),
# -- but it strips leading and trailing whitespace when the tail includes it --
("In rhoncus ipsum sed lect us portas volutpat. ", "us portas volutpat."),
],
)
def it_computes_its_overlap_tail_for_use_in_inter_pre_chunk_overlap(
self, text: str, expected_value: str
):
pre_chunk = PreChunk(
[Text(text)], overlap_prefix="", opts=ChunkingOptions(overlap=20, overlap_all=True)
)
assert pre_chunk.overlap_tail == expected_value
@pytest.mark.parametrize(
("elements", "overlap_prefix", "expected_value"),
[
([Text("foo"), Text("bar")], "bah da bing.", "bah da bing.\n\nfoo\n\nbar"),
([Text("foo"), PageBreak(""), Text("bar")], "da bang.", "da bang.\n\nfoo\n\nbar"),
([PageBreak(""), Text("foo")], "bah da boom.", "bah da boom.\n\nfoo"),
([Text("foo"), Text("bar"), PageBreak("")], "", "foo\n\nbar"),
],
)
def it_knows_the_concatenated_text_of_the_pre_chunk_to_help(
self, elements: list[Text], overlap_prefix: str, expected_value: str
):
"""._text is the "joined" text of the pre-chunk elements.
The text-segment contributed by each element is separated from the next by a blank line
("\n\n"). An element that contributes no text does not give rise to a separator.
"""
pre_chunk = PreChunk(elements, overlap_prefix=overlap_prefix, opts=ChunkingOptions())
assert pre_chunk._text == expected_value
def it_preserves_whitespace_in_CodeSnippet_elements(self):
"""CodeSnippet elements should preserve their internal whitespace including newlines.
This is important for code blocks where formatting (indentation, line breaks) is
semantically meaningful.
"""
code_text = "def hello():\n print('Hello')\n return True"
pre_chunk = PreChunk([CodeSnippet(code_text)], overlap_prefix="", opts=ChunkingOptions())
# The text should preserve newlines, not collapse them to spaces
assert "\n" in pre_chunk._text
assert pre_chunk._text == code_text
def it_preserves_whitespace_in_CodeSnippet_when_mixed_with_other_elements(self):
"""CodeSnippet whitespace is preserved even when mixed with regular Text elements."""
code_text = "for i in range(10):\n print(i)"
pre_chunk = PreChunk(
[
Text("Here is some code:"),
CodeSnippet(code_text),
Text("That was the code."),
],
overlap_prefix="",
opts=ChunkingOptions(),
)
# The combined text should have the code with preserved newlines
assert "for i in range(10):\n print(i)" in pre_chunk._text
# Regular text elements are still joined with blank line separators
assert "Here is some code:\n\n" in pre_chunk._text
# ================================================================================================
# CHUNKING HELPER/SPLITTERS
# ================================================================================================
class Describe_Chunker:
"""Unit-test suite for `unstructured.chunking.base._Chunker` objects."""
def it_generates_a_single_chunk_from_its_elements_if_they_together_fit_in_window(self):
elements = [
Title("Introduction"),
Text(
"Lorem ipsum dolor sit amet consectetur adipiscing elit. In rhoncus ipsum sed"
" lectus porta volutpat.",
),
]
opts = ChunkingOptions(max_characters=200, include_orig_elements=True)
chunker = _Chunker(
elements,
text=(
"e feugiat efficitur.\n\nIntroduction\n\nLorem ipsum dolor sit amet consectetur"
" adipiscing elit. In rhoncus ipsum sed lectus porta volutpat."
),
opts=opts,
)
chunk_iter = chunker._iter_chunks()
chunk = next(chunk_iter)
assert chunk == CompositeElement(
"e feugiat efficitur.\n\nIntroduction\n\nLorem ipsum dolor sit amet consectetur"
" adipiscing elit. In rhoncus ipsum sed lectus porta volutpat.",
)
assert chunk.metadata is chunker._consolidated_metadata
assert chunk.metadata.orig_elements == elements
# --
with pytest.raises(StopIteration):
next(chunk_iter)
def but_it_generates_split_chunks_when_its_single_element_exceeds_window_size(self):
# -- Chunk-splitting only occurs when a *single* element is too big to fit in the window.
# -- The pre-chunker will automatically isolate that element in a pre_chunk of its own.
text = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor"
" incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud"
" exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat."
)
elements = [Text(text)]
opts = ChunkingOptions(max_characters=200, include_orig_elements=True)
chunker = _Chunker(elements, text=text, opts=opts)
chunk_iter = chunker._iter_chunks()
# -- Note that .metadata.orig_elements is the same single original element, "repeated" for
# -- each text-split chunk. This behavior emerges without explicit command as a consequence
# -- of using `._consolidated_metadata` (and `._continuation_metadata` which extends
# -- `._consolidated_metadata)` for each text-split chunk.
chunk = next(chunk_iter)
assert chunk == CompositeElement(
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut"
)
assert chunk.metadata is chunker._consolidated_metadata
assert chunk.metadata.orig_elements == elements
# --
chunk = next(chunk_iter)
assert chunk == CompositeElement("aliquip ex ea commodo consequat.")
assert chunk.metadata is chunker._continuation_metadata
assert chunk.metadata.orig_elements == elements
# --
with pytest.raises(StopIteration):
next(chunk_iter)
def and_it_adds_the_is_continuation_flag_for_second_and_later_split_chunks(self):
# -- |--------------------- 48 ---------------------|
text = "'Lorem ipsum dolor' means 'Thank you very much'."
metadata = ElementMetadata(
category_depth=0,
filename="foo.docx",
languages=["lat"],
parent_id="f87731e0",
)
elements = [Text(text, metadata=metadata)]
chunk_iter = _Chunker.iter_chunks(elements, text, opts=ChunkingOptions(max_characters=20))
assert [c.metadata.is_continuation for c in chunk_iter] == [None, True, True]
def but_it_generates_no_chunks_when_the_pre_chunk_contains_no_text(self):
metadata = ElementMetadata()
chunk_iter = _Chunker.iter_chunks(
[PageBreak(" ", metadata=metadata)],
text="",
opts=ChunkingOptions(),
)
with pytest.raises(StopIteration):
next(chunk_iter)
def it_extracts_all_populated_metadata_values_from_the_elements_to_help(self):
elements = [
Title(
"Lorem Ipsum",
metadata=ElementMetadata(
category_depth=0,
filename="foo.docx",
languages=["lat"],
parent_id="f87731e0",
),
),
Text(
"'Lorem ipsum dolor' means 'Thank you very much' in Latin.",
metadata=ElementMetadata(
category_depth=1,
filename="foo.docx",
image_path="sprite.png",
languages=["lat", "eng"],
),
),
]
text = "Lorem Ipsum\n\n'Lorem ipsum dolor' means 'Thank you very much' in Latin."
chunker = _Chunker(elements, text=text, opts=ChunkingOptions())
assert chunker._all_metadata_values == {
# -- scalar values are accumulated in a list in element order --
"category_depth": [0, 1],
# -- all values are accumulated, not only unique ones --
"filename": ["foo.docx", "foo.docx"],
# -- list-type fields produce a list of lists --
"languages": [["lat"], ["lat", "eng"]],
# -- fields that only appear in some elements are captured --
"image_path": ["sprite.png"],
"parent_id": ["f87731e0"],
# -- A `None` value never appears, neither does a field-name with an empty list --
}
def but_it_discards_ad_hoc_metadata_fields_during_consolidation(self):
metadata = ElementMetadata(
category_depth=0,
filename="foo.docx",
languages=["lat"],
parent_id="f87731e0",
)
metadata.coefficient = 0.62
metadata_2 = ElementMetadata(
category_depth=1,
filename="foo.docx",
image_path="sprite.png",
languages=["lat", "eng"],
)
metadata_2.quotient = 1.74
elements = [
Title("Lorem Ipsum", metadata=metadata),
Text("'Lorem ipsum dolor' means 'Thank you very much'.", metadata=metadata_2),
]
text = "Lorem Ipsum\n\n'Lorem ipsum dolor' means 'Thank you very much' in Latin."
chunker = _Chunker(elements, text=text, opts=ChunkingOptions())