@@ -107,18 +107,25 @@ def test_empty_queue_flush(mock_send, elasticapm_client):
107107 transport .close ()
108108
109109
110- @mock .patch ("elasticapm.transport.base.Transport.send " )
110+ @mock .patch ("elasticapm.transport.base.Transport._flush " )
111111@pytest .mark .parametrize ("elasticapm_client" , [{"api_request_time" : "5s" }], indirect = True )
112- def test_metadata_prepended (mock_send , elasticapm_client ):
112+ def test_metadata_prepended (mock_flush , elasticapm_client ):
113113 transport = Transport (client = elasticapm_client , compress_level = 0 )
114114 transport .start_thread ()
115115 transport .queue ("error" , {}, flush = True )
116116 transport .close ()
117- assert mock_send .call_count == 1
118- args , kwargs = mock_send .call_args
119- data = gzip .decompress (args [0 ])
117+ assert mock_flush .call_count == 1
118+ args , kwargs = mock_flush .call_args
119+ buffer = args [0 ]
120+ # this test used to mock send but after we fixed a leak for not releasing the memoryview containing
121+ # the gzipped data we cannot read it anymore. So reimplement _flush and read the data ourselves
122+ fileobj = buffer .fileobj
123+ buffer .close ()
124+ compressed_data = fileobj .getbuffer ()
125+ data = gzip .decompress (compressed_data )
120126 data = data .decode ("utf-8" ).split ("\n " )
121127 assert "metadata" in data [0 ]
128+ compressed_data .release ()
122129
123130
124131@mock .patch ("elasticapm.transport.base.Transport.send" )
@@ -157,43 +164,43 @@ def test_api_request_time_dynamic(mock_send, caplog, elasticapm_client):
157164 assert mock_send .call_count == 0
158165
159166
160- @pytest .mark .skipif (sys .version_info >= (3 , 12 ), reason = "Failing locally on 3.12.0rc1" ) # TODO py3.12
167+ def _cleanup_flush_mock_buffers (mock_flush ):
168+ args , kwargs = mock_flush .call_args
169+ buffer = args [0 ]
170+ buffer .close ()
171+
172+
161173@mock .patch ("elasticapm.transport.base.Transport._flush" )
162174def test_api_request_size_dynamic (mock_flush , caplog , elasticapm_client ):
163- elasticapm_client .config .update (version = "1" , api_request_size = "100b " )
175+ elasticapm_client .config .update (version = "1" , api_request_size = "9b " )
164176 transport = Transport (client = elasticapm_client , queue_chill_count = 1 )
165177 transport .start_thread ()
166178 try :
167179 with caplog .at_level ("DEBUG" , "elasticapm.transport" ):
168- # we need to add lots of uncompressible data to fill up the gzip-internal buffer
169- for i in range (12 ):
170- transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
180+ transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
171181 transport ._flushed .wait (timeout = 0.1 )
182+ _cleanup_flush_mock_buffers (mock_flush )
172183 assert mock_flush .call_count == 1
173184 elasticapm_client .config .update (version = "1" , api_request_size = "1mb" )
174185 with caplog .at_level ("DEBUG" , "elasticapm.transport" ):
175- # we need to add lots of uncompressible data to fill up the gzip-internal buffer
176- for i in range (12 ):
177- transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
186+ transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
178187 transport ._flushed .wait (timeout = 0.1 )
179188 # Should be unchanged because our buffer limit is much higher.
180189 assert mock_flush .call_count == 1
181190 finally :
182191 transport .close ()
183192
184193
185- @pytest .mark .skipif (sys .version_info >= (3 , 12 ), reason = "Failing locally on 3.12.0rc1" ) # TODO py3.12
186194@mock .patch ("elasticapm.transport.base.Transport._flush" )
187- @pytest .mark .parametrize ("elasticapm_client" , [{"api_request_size" : "100b " }], indirect = True )
195+ @pytest .mark .parametrize ("elasticapm_client" , [{"api_request_size" : "9b " }], indirect = True )
188196def test_flush_time_size (mock_flush , caplog , elasticapm_client ):
189197 transport = Transport (client = elasticapm_client , queue_chill_count = 1 )
190198 transport .start_thread ()
191199 try :
192200 with caplog .at_level ("DEBUG" , "elasticapm.transport" ):
193- # we need to add lots of uncompressible data to fill up the gzip-internal buffer
194- for i in range (12 ):
195- transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
201+ transport .queue ("error" , "" .join (random .choice (string .ascii_letters ) for i in range (2000 )))
196202 transport ._flushed .wait (timeout = 0.1 )
203+ _cleanup_flush_mock_buffers (mock_flush )
197204 assert mock_flush .call_count == 1
198205 finally :
199206 transport .close ()
0 commit comments