Skip to content

Commit 30b0823

Browse files
maxwbuckleyclaude
andcommitted
Fix warnings in additional sample files built with TensorRT runtime
Fix warnings in 10 sample files that are only compiled when the real TensorRT runtime libraries are available (not with stub libs): - BatchStream.h: add virtual destructor, int64_t narrowing casts - sampleCharRNN: shadow fix, size_t casts - sampleDynamicReshape: unused param, double-promotion, conversion - sampleINT8API: shadow fix, float/int casts - sampleIOFormats: shadow fixes, unused params, sizeof casts - sampleNamedDimensions: unused params, float literal fixes - sampleNonZeroPlugin: unused params, int64_t/size_t casts - sampleOnnxMNIST: unused param, exp() float cast - sampleOnnxMnistCoordConvAC: float literal fixes, exp() cast - sampleProgressMonitor: int-to-float cast, exp() cast, ptr diff cast Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> Signed-off-by: Max Buckley <maxwbuckley@gmail.com>
1 parent 5ef4b52 commit 30b0823

File tree

10 files changed

+105
-104
lines changed

10 files changed

+105
-104
lines changed

samples/common/BatchStream.h

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
class IBatchStream
2727
{
2828
public:
29+
virtual ~IBatchStream() = default;
2930
virtual void reset(int firstBatch) = 0;
3031
virtual bool next() = 0;
3132
virtual void skip(int skipCount) = 0;
@@ -173,7 +174,7 @@ class BatchStream : public IBatchStream
173174
mDims.d[3] = d[3]; // Width
174175
ASSERT(mDims.d[0] > 0 && mDims.d[1] > 0 && mDims.d[2] > 0 && mDims.d[3] > 0);
175176

176-
mImageSize = mDims.d[1] * mDims.d[2] * mDims.d[3];
177+
mImageSize = static_cast<int>(mDims.d[1] * mDims.d[2] * mDims.d[3]);
177178
mBatch.resize(mBatchSize * mImageSize, 0);
178179
mLabels.resize(mBatchSize, 0);
179180
mFileBatch.resize(mDims.d[0] * mImageSize, 0);
@@ -193,7 +194,7 @@ class BatchStream : public IBatchStream
193194
, mListFile(listFile)
194195
, mDataDir(directories)
195196
{
196-
mImageSize = mDims.d[1] * mDims.d[2] * mDims.d[3];
197+
mImageSize = static_cast<int>(mDims.d[1] * mDims.d[2] * mDims.d[3]);
197198
mBatch.resize(mBatchSize * mImageSize, 0);
198199
mLabels.resize(mBatchSize, 0);
199200
mFileBatch.resize(mDims.d[0] * mImageSize, 0);
@@ -205,7 +206,7 @@ class BatchStream : public IBatchStream
205206
{
206207
mBatchCount = 0;
207208
mFileCount = 0;
208-
mFileBatchPos = mDims.d[0];
209+
mFileBatchPos = static_cast<int>(mDims.d[0]);
209210
skip(firstBatch);
210211
}
211212

@@ -217,7 +218,7 @@ class BatchStream : public IBatchStream
217218
return false;
218219
}
219220

220-
for (int64_t csize = 1, batchPos = 0; batchPos < mBatchSize; batchPos += csize, mFileBatchPos += csize)
221+
for (int64_t csize = 1, batchPos = 0; batchPos < mBatchSize; batchPos += csize, mFileBatchPos += static_cast<int>(csize))
221222
{
222223
ASSERT(mFileBatchPos > 0 && mFileBatchPos <= mDims.d[0]);
223224
if (mFileBatchPos == mDims.d[0] && !update())
@@ -238,9 +239,9 @@ class BatchStream : public IBatchStream
238239
// Skips the batches
239240
void skip(int skipCount) override
240241
{
241-
if (mBatchSize >= mDims.d[0] && mBatchSize % mDims.d[0] == 0 && mFileBatchPos == mDims.d[0])
242+
if (mBatchSize >= mDims.d[0] && mBatchSize % mDims.d[0] == 0 && mFileBatchPos == static_cast<int>(mDims.d[0]))
242243
{
243-
mFileCount += skipCount * mBatchSize / mDims.d[0];
244+
mFileCount += static_cast<int>(skipCount * mBatchSize / mDims.d[0]);
244245
return;
245246
}
246247

@@ -269,7 +270,7 @@ class BatchStream : public IBatchStream
269270

270271
int getBatchSize() const override
271272
{
272-
return mBatchSize;
273+
return static_cast<int>(mBatchSize);
273274
}
274275

275276
nvinfer1::Dims getDims() const override
@@ -338,14 +339,14 @@ class BatchStream : public IBatchStream
338339
}
339340

340341
std::vector<float> data(samplesCommon::volume(mDims));
341-
const float scale = 2.0 / 255.0;
342-
const float bias = 1.0;
343-
long int volChl = mDims.d[2] * mDims.d[3];
342+
const float scale = 2.0f / 255.0f;
343+
const float bias = 1.0f;
344+
long int volChl = static_cast<long int>(mDims.d[2] * mDims.d[3]);
344345

345346
// Normalize input data
346-
for (int i = 0, volImg = mDims.d[1] * mDims.d[2] * mDims.d[3]; i < mBatchSize; ++i)
347+
for (int i = 0, volImg = static_cast<int>(mDims.d[1] * mDims.d[2] * mDims.d[3]); i < mBatchSize; ++i)
347348
{
348-
for (int c = 0; c < mDims.d[1]; ++c)
349+
for (int64_t c = 0; c < mDims.d[1]; ++c)
349350
{
350351
for (int j = 0; j < volChl; ++j)
351352
{

samples/sampleCharRNN/sampleCharRNN.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -639,12 +639,12 @@ nvinfer1::ILayer* SampleCharRNNLoop::addLSTMLayers(std::unique_ptr<nvinfer1::INe
639639
nvinfer1::Dims dimB{1, {4 * mParams.hiddenSize}};
640640
nvinfer1::Dims dim0{1, {0}};
641641
auto extractWeights = [](nvinfer1::Weights weights, Dims start, Dims size) -> nvinfer1::Weights {
642-
const char* data = static_cast<const char*>(weights.values);
642+
const char* data_ = static_cast<const char*>(weights.values);
643643
int64_t shift = samplesCommon::volume(start);
644-
const int bufferSize = samplesCommon::getNbBytes(weights.type, shift);
644+
const int bufferSize = static_cast<int>(samplesCommon::getNbBytes(weights.type, shift));
645645
int64_t count = samplesCommon::volume(size);
646646
ASSERT(shift + count <= weights.count);
647-
return nvinfer1::Weights{weights.type, data + bufferSize, count};
647+
return nvinfer1::Weights{weights.type, data_ + bufferSize, count};
648648
};
649649
for (int i = 0; i < mParams.layerCount; ++i)
650650
{
@@ -667,7 +667,7 @@ nvinfer1::ILayer* SampleCharRNNLoop::addLSTMLayers(std::unique_ptr<nvinfer1::INe
667667
}
668668

669669
auto addConcatenation = [&network](std::vector<nvinfer1::ITensor*> tensors) -> nvinfer1::ITensor* {
670-
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(tensors.data(), tensors.size());
670+
nvinfer1::IConcatenationLayer* concat = network->addConcatenation(tensors.data(), static_cast<int>(tensors.size()));
671671
concat->setAxis(0);
672672
return concat->getOutput(0);
673673
};
@@ -776,7 +776,7 @@ bool SampleCharRNNBase::infer()
776776

777777
// Select a random seed string.
778778
srand(unsigned(time(nullptr)));
779-
int sentenceIndex = rand() % mParams.inputSentences.size();
779+
int sentenceIndex = static_cast<int>(rand() % mParams.inputSentences.size());
780780
std::string inputSentence = mParams.inputSentences[sentenceIndex];
781781
std::string expected = mParams.outputSentences[sentenceIndex];
782782
std::string genstr;

samples/sampleDynamicReshape/sampleDynamicReshape.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ bool SampleDynamicReshape::build()
150150
//! \return false if error in build preprocessor engine.
151151
//!
152152
bool SampleDynamicReshape::buildPreprocessorEngine(
153-
nvinfer1::IBuilder& builder, nvinfer1::IRuntime& runtime, cudaStream_t profileStream)
153+
nvinfer1::IBuilder& builder, nvinfer1::IRuntime& runtime, cudaStream_t /*profileStream*/)
154154
{
155155
// Create the preprocessor engine using a network that supports full dimensions (createNetworkV2).
156156
auto preprocessorNetwork = std::unique_ptr<INetworkDefinition>(
@@ -437,7 +437,7 @@ Dims SampleDynamicReshape::loadPGMFile(const std::string& fileName)
437437
mInput.hostBuffer.resize(inputDims);
438438
float* hostDataBuffer = static_cast<float*>(mInput.hostBuffer.data());
439439
std::transform(fileData.begin(), fileData.end(), hostDataBuffer,
440-
[](uint8_t x) { return 1.0 - static_cast<float>(x / 255.0); });
440+
[](uint8_t x) { return 1.0f - static_cast<float>(x) / 255.0f; });
441441
return inputDims;
442442
}
443443

@@ -459,7 +459,7 @@ bool SampleDynamicReshape::validateOutput(int digit)
459459
++curIndex;
460460
}
461461

462-
int predictedDigit = std::max_element(prob.begin(), prob.end()) - prob.begin();
462+
int predictedDigit = static_cast<int>(std::max_element(prob.begin(), prob.end()) - prob.begin());
463463
return digit == predictedDigit;
464464
}
465465

samples/sampleINT8API/sampleINT8API.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,6 @@ void SampleINT8API::setLayerPrecision(nvinfer1::INetworkDefinition const& networ
241241
std::string tensorName = layer->getOutput(j)->getName();
242242
if (mParams.verbose)
243243
{
244-
std::string tensorName = layer->getOutput(j)->getName();
245244
sample::gLogInfo << "Tensor: " << tensorName << ". OutputType: INT8" << std::endl;
246245
}
247246
// set output type of execution tensors and not shape tensors.
@@ -391,7 +390,7 @@ bool SampleINT8API::setDynamicRange(nvinfer1::INetworkDefinition& network)
391390
max = std::max(max, std::abs(val));
392391
}
393392

394-
if (!lyr->getOutput(j)->setDynamicRange(-max, max))
393+
if (!lyr->getOutput(j)->setDynamicRange(static_cast<float>(-max), static_cast<float>(max)))
395394
{
396395
return false;
397396
}
@@ -466,7 +465,7 @@ bool SampleINT8API::verifyOutput(samplesCommon::BufferManager const& buffers) co
466465
{
467466
// copy output host buffer data for further processing
468467
float const* probPtr = static_cast<float const*>(buffers.getHostBuffer(mInOut.at("output")));
469-
std::vector<float> output(probPtr, probPtr + mOutputDims.d[1]);
468+
std::vector<float> output(probPtr, probPtr + static_cast<int>(mOutputDims.d[1]));
470469

471470
auto inds = samplesCommon::argMagnitudeSort(output.cbegin(), output.cend());
472471

samples/sampleIOFormats/sampleIOFormats.cpp

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -74,32 +74,32 @@ class BufferDesc
7474
public:
7575
BufferDesc() = default;
7676

77-
BufferDesc(nvinfer1::Dims dims, int32_t dataWidth, TensorFormat format)
77+
BufferDesc(nvinfer1::Dims dims_, int32_t dataWidth_, TensorFormat format)
7878
{
79-
this->dataWidth = dataWidth;
79+
this->dataWidth = dataWidth_;
8080
if (format == TensorFormat::kLINEAR)
8181
{
82-
this->dims[0] = dims.d[0];
83-
this->dims[1] = dims.d[1];
84-
this->dims[2] = dims.d[2];
85-
this->dims[3] = dims.d[3];
82+
this->dims[0] = static_cast<int32_t>(dims_.d[0]);
83+
this->dims[1] = static_cast<int32_t>(dims_.d[1]);
84+
this->dims[2] = static_cast<int32_t>(dims_.d[2]);
85+
this->dims[3] = static_cast<int32_t>(dims_.d[3]);
8686
this->dims[4] = 1;
8787
}
8888
else if (format == TensorFormat::kCHW32)
8989
{
90-
this->dims[0] = dims.d[0];
91-
this->dims[1] = divUp(dims.d[1], 32);
92-
this->dims[2] = dims.d[2];
93-
this->dims[3] = dims.d[3];
90+
this->dims[0] = static_cast<int32_t>(dims_.d[0]);
91+
this->dims[1] = divUp(static_cast<int32_t>(dims_.d[1]), 32);
92+
this->dims[2] = static_cast<int32_t>(dims_.d[2]);
93+
this->dims[3] = static_cast<int32_t>(dims_.d[3]);
9494
this->dims[4] = 32;
9595
this->scalarPerVector = 32;
9696
}
9797
else if (format == TensorFormat::kHWC)
9898
{
99-
this->dims[0] = dims.d[0];
100-
this->dims[1] = dims.d[2];
101-
this->dims[2] = dims.d[3];
102-
this->dims[3] = dims.d[1];
99+
this->dims[0] = static_cast<int32_t>(dims_.d[0]);
100+
this->dims[1] = static_cast<int32_t>(dims_.d[2]);
101+
this->dims[2] = static_cast<int32_t>(dims_.d[3]);
102+
this->dims[3] = static_cast<int32_t>(dims_.d[1]);
103103
this->dims[4] = 1;
104104
this->channelPivot = true;
105105
}
@@ -144,23 +144,23 @@ class SampleBuffer
144144
dims.d[3] = 1;
145145
}
146146

147-
SampleBuffer(nvinfer1::Dims dims, int32_t dataWidth, TensorFormat format, bool isInput)
148-
: dims(dims)
149-
, dataWidth(dataWidth)
150-
, format(format)
151-
, isInput(isInput)
147+
SampleBuffer(nvinfer1::Dims dims_, int32_t dataWidth_, TensorFormat format_, bool isInput_)
148+
: dims(dims_)
149+
, dataWidth(dataWidth_)
150+
, format(format_)
151+
, isInput(isInput_)
152152
{
153153

154154
// Output buffer is unsqueezed to 4D in order to reuse the BufferDesc class
155-
if (isInput == false)
155+
if (isInput_ == false)
156156
{
157-
dims.d[2] = dims.d[0];
158-
dims.d[3] = dims.d[1];
159-
dims.d[0] = 1;
160-
dims.d[1] = 1;
157+
dims_.d[2] = dims_.d[0];
158+
dims_.d[3] = dims_.d[1];
159+
dims_.d[0] = 1;
160+
dims_.d[1] = 1;
161161
}
162162

163-
desc = BufferDesc(dims, dataWidth, format);
163+
desc = BufferDesc(dims_, dataWidth_, format_);
164164

165165
if (nullptr == buffer)
166166
{
@@ -330,7 +330,7 @@ bool SampleIOFormats::verify(TypeSpec const& spec)
330330
//!
331331
//! \return true if the engine was created successfully and false otherwise
332332
//!
333-
bool SampleIOFormats::build(int32_t dataWidth)
333+
bool SampleIOFormats::build(int32_t /*dataWidth*/)
334334
{
335335
auto builder = std::unique_ptr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));
336336
if (!builder)
@@ -436,7 +436,7 @@ bool SampleIOFormats::build(int32_t dataWidth)
436436
//! \param builder Pointer to the engine builder
437437
//!
438438
bool SampleIOFormats::constructNetwork(std::unique_ptr<nvinfer1::IBuilder>& builder,
439-
std::unique_ptr<nvinfer1::INetworkDefinition>& network, std::unique_ptr<nvinfer1::IBuilderConfig>& config,
439+
std::unique_ptr<nvinfer1::INetworkDefinition>& /*network*/, std::unique_ptr<nvinfer1::IBuilderConfig>& config,
440440
std::unique_ptr<nvonnxparser::IParser>& parser)
441441
{
442442
auto parsed = parser->parseFromFile(samplesCommon::locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(),
@@ -548,7 +548,7 @@ void printHelpInfo()
548548
//! \brief Used to run the engine build and inference/reference functions
549549
//!
550550
template <typename T>
551-
bool process(SampleIOFormats& sample, sample::Logger::TestAtom const& sampleTest, SampleBuffer& inputBuf,
551+
bool process(SampleIOFormats& sample, sample::Logger::TestAtom const& /*sampleTest*/, SampleBuffer& inputBuf,
552552
SampleBuffer& outputBuf, TypeSpec& spec)
553553
{
554554
sample::gLogInfo << "Building and running a GPU inference engine with specified I/O formats." << std::endl;
@@ -562,8 +562,8 @@ bool process(SampleIOFormats& sample, sample::Logger::TestAtom const& sampleTest
562562
return false;
563563
}
564564

565-
inputBuf = SampleBuffer(sample.mInputDims, sizeof(T), sample.mTensorFormat, true);
566-
outputBuf = SampleBuffer(sample.mOutputDims, sizeof(T), TensorFormat::kLINEAR, false);
565+
inputBuf = SampleBuffer(sample.mInputDims, static_cast<int32_t>(sizeof(T)), sample.mTensorFormat, true);
566+
outputBuf = SampleBuffer(sample.mOutputDims, static_cast<int32_t>(sizeof(T)), TensorFormat::kLINEAR, false);
567567

568568
if (!sample.infer(inputBuf, outputBuf))
569569
{

samples/sampleNamedDimensions/sampleNamedDimensions.cpp

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <cuda_runtime_api.h>
3737

3838
#include <algorithm>
39+
#include <cmath>
3940
#include <cstdlib>
4041
#include <fstream>
4142
#include <iostream>
@@ -223,8 +224,8 @@ bool SampleNamedDimensions::build()
223224
//!
224225
//! \brief Uses ONNX parser to create the ONNX Network and marks the output layers
225226
//!
226-
bool SampleNamedDimensions::constructNetwork(std::unique_ptr<nvinfer1::IBuilder>& builder,
227-
std::unique_ptr<nvinfer1::INetworkDefinition>& network, std::unique_ptr<nvinfer1::IBuilderConfig>& config,
227+
bool SampleNamedDimensions::constructNetwork(std::unique_ptr<nvinfer1::IBuilder>& /*builder*/,
228+
std::unique_ptr<nvinfer1::INetworkDefinition>& /*network*/, std::unique_ptr<nvinfer1::IBuilderConfig>& /*config*/,
228229
std::unique_ptr<nvonnxparser::IParser>& parser)
229230
{
230231
auto parsed = parser->parseFromFile(samplesCommon::locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(),
@@ -314,15 +315,15 @@ bool SampleNamedDimensions::infer()
314315
bool SampleNamedDimensions::processInput(samplesCommon::BufferManager const& buffers)
315316
{
316317
int32_t const input0H = mNamedDimension;
317-
int32_t const input0W = mInputDims[0].d[1];
318+
int32_t const input0W = static_cast<int32_t>(mInputDims[0].d[1]);
318319
int32_t const input1H = mNamedDimension;
319-
int32_t const input1W = mInputDims[1].d[1];
320+
int32_t const input1W = static_cast<int32_t>(mInputDims[1].d[1]);
320321

321322
// Generate random input
322323
mInput0.resize(input0H * input0W);
323324
mInput1.resize(input1H * input1W);
324325
std::default_random_engine generator(static_cast<uint32_t>(time(nullptr)));
325-
std::uniform_real_distribution<float> unif_real_distr(-10., 10.);
326+
std::uniform_real_distribution<float> unif_real_distr(-10.0f, 10.0f);
326327

327328
sample::gLogInfo << "Input0:\n";
328329
for (int32_t i = 0; i < input0H * input0W; i++)
@@ -357,7 +358,7 @@ bool SampleNamedDimensions::processInput(samplesCommon::BufferManager const& buf
357358
bool SampleNamedDimensions::verifyOutput(samplesCommon::BufferManager const& buffers)
358359
{
359360
int32_t const outputH = 2 * mNamedDimension;
360-
int32_t const outputW = mOutputDims[0].d[1];
361+
int32_t const outputW = static_cast<int32_t>(mOutputDims[0].d[1]);
361362
int32_t const outputSize = outputH * outputW;
362363

363364
auto* output = static_cast<float*>(buffers.getHostBuffer(mParams.outputTensorNames[0]));
@@ -374,7 +375,7 @@ bool SampleNamedDimensions::verifyOutput(samplesCommon::BufferManager const& buf
374375
for (int32_t i = 0; i < outputH * outputW; i++)
375376
{
376377
auto const reference_value = i > outputSize / 2 ? mInput1[i - outputSize / 2] : mInput0[i];
377-
if (fabs(output[i] - reference_value) > std::numeric_limits<float>::epsilon())
378+
if (std::abs(output[i] - reference_value) > std::numeric_limits<float>::epsilon())
378379
{
379380
return false;
380381
}

0 commit comments

Comments
 (0)