/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef TRT_SAMPLE_UTILS_H #define TRT_SAMPLE_UTILS_H #include #include #include #include #include #include #include #include #include #include #include "NvInfer.h" #include "common.h" #include "logger.h" #define SMP_RETVAL_IF_FALSE(condition, msg, retval, err) \ { \ if ((condition) == false) \ { \ (err) << (msg) << std::endl; \ return retval; \ } \ } namespace sample { size_t dataTypeSize(nvinfer1::DataType dataType); template inline T roundUp(T m, T n) { return ((m + n - 1) / n) * n; } //! comps is the number of components in a vector. Ignored if vecDim < 0. int64_t volume(nvinfer1::Dims const& dims, nvinfer1::Dims const& strides, int32_t vecDim, int32_t comps, int32_t batch); using samplesCommon::volume; nvinfer1::Dims toDims(std::vector const& vec); template ::value, bool>::type = true> void fillBuffer(void* buffer, int64_t volume, T min, T max); template ::value, int32_t>::type = 0> void fillBuffer(void* buffer, int64_t volume, T min, T max); template void dumpBuffer(void const* buffer, std::string const& separator, std::ostream& os, nvinfer1::Dims const& dims, nvinfer1::Dims const& strides, int32_t vectorDim, int32_t spv); void loadFromFile(std::string const& fileName, char* dst, size_t size); std::vector splitToStringVec(std::string const& option, char separator); bool broadcastIOFormats(std::vector const& formats, size_t nbBindings, bool isInput = true); int32_t getCudaDriverVersion(); int32_t getCudaRuntimeVersion(); void sparsify(nvinfer1::INetworkDefinition& network, std::vector>& sparseWeights); void sparsify(nvinfer1::Weights const& weights, int32_t k, int32_t rs, std::vector& sparseWeights); // Walk the weights elements and overwrite (at most) 2 out of 4 elements to 0. template void sparsify(T const* values, int64_t count, int32_t k, int32_t rs, std::vector& sparseWeights); template void setSparseWeights(L& l, int32_t k, int32_t rs, std::vector& sparseWeights); // Sparsify the weights of Constant layers that are fed to MatMul via Shuffle layers. // Forward analysis on the API graph to determine which weights to sparsify. void sparsifyMatMulKernelWeights( nvinfer1::INetworkDefinition& network, std::vector>& sparseWeights); template void transpose2DWeights(void* dst, void const* src, int32_t const m, int32_t const n); } // namespace sample #endif // TRT_SAMPLE_UTILS_H