diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 9577abcf5..9df773f77 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,11 +29,11 @@ variables: sudo apt-get -y update sudo apt-get -y install clang git libunwind8 curl libomp-dev libomp5 wget gpg wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | sudo tee /usr/share/keyrings/kitware-archive-keyring.gpg >/dev/null - echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ noble main' | sudo tee /etc/apt/sources.list.d/kitware.list + echo 'deb [signed-by=/usr/share/keyrings/kitware-archive-keyring.gpg] https://apt.kitware.com/ubuntu/ resolute main' | sudo tee /etc/apt/sources.list.d/kitware.list sudo apt-get -y update sudo apt-get -y install cmake cmake --version - wget https://packages.microsoft.com/config/ubuntu/24.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb + wget https://packages.microsoft.com/config/ubuntu/26.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb sudo dpkg --purge packages-microsoft-prod && sudo dpkg -i packages-microsoft-prod.deb sudo apt-get update; sudo apt-get install -y apt-transport-https && sudo apt-get update ldd --version && (/sbin/ldconfig -p | grep stdc++) && (strings /usr/lib/x86_64-linux-gnu/libstdc++.so.6 | grep LIBCXX) @@ -46,7 +46,7 @@ resources: # https://github.com/dotnet/versions/blob/main/build-info/docker/image-info.dotnet-dotnet-buildtools-prereqs-docker-main.json - container: UbuntuContainer - image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-24.04 + image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-26.04-amd64 jobs: - template: /build/ci/job-template.yml diff --git a/src/Native/LibTorchSharp/THSTensor.cpp b/src/Native/LibTorchSharp/THSTensor.cpp index 7b4a0e55e..a001045fc 100644 --- a/src/Native/LibTorchSharp/THSTensor.cpp +++ b/src/Native/LibTorchSharp/THSTensor.cpp @@ -753,13 +753,14 @@ Tensor THSTensor_isreal(const Tensor tensor) CATCH_TENSOR(torch::isreal(*tensor)); } -void completeTensorIndices(const int64_t* indexStarts, +std::vector completeTensorIndices(const int64_t* indexStarts, const int64_t* indexEnds, const int64_t* indexSteps, const Tensor* indexTensors, - at::indexing::TensorIndex* indicesArray, const int indicesLength) { + std::vector indices; + indices.reserve(indicesLength); // The indexStart encodes the kind of slice being performed for each dimension // range INT64_MIN..INT64_MIN+5 is for various singleton cases // range INT64_MIN+6 is for slice with absent start @@ -770,56 +771,41 @@ void completeTensorIndices(const int64_t* indexStarts, auto n = indexStarts[i]; if (n == INT64_MIN) // TensorIndex 'Null' { - at::indexing::TensorIndex idx(c10::nullopt); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(c10::nullopt); } else if (n == INT64_MIN + 1) // TensorIndex 'False' { - at::indexing::TensorIndex idx(false); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(false); } else if (n == INT64_MIN + 2) // TensorIndex 'True' { - at::indexing::TensorIndex idx(true); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(true); } else if (n == INT64_MIN + 3) // TensorIndex '...' { - at::indexing::TensorIndex idx(at::indexing::Ellipsis); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(at::indexing::Ellipsis); } else if (n == INT64_MIN + 4) // TensorIndex 'None' { - at::indexing::TensorIndex idx(at::indexing::None); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(at::indexing::None); } else if (n == INT64_MIN + 5) // TensorIndex by tensor { - at::indexing::TensorIndex idx(*indexTensors[i]); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(*indexTensors[i]); } else if (n > INT64_MIN / 4) // TensorIndex by integer { - at::indexing::TensorIndex idx(n); - // The '=' copy constructor for TensorIndex doesn't work - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(n); } else // TensorIndex by Slice { - // slice auto start = (n == INT64_MIN + 6) ? c10::optional() : c10::optional(n - INT64_MIN / 2); auto end = (indexEnds == nullptr || indexEnds[i] == INT64_MIN) ? c10::optional() : c10::optional(indexEnds[i]); auto step = (indexSteps == nullptr || indexSteps[i] == INT64_MIN) ? c10::optional() : c10::optional(indexSteps[i]); - at::indexing::TensorIndex idx(at::indexing::Slice(start, end, step)); - memcpy(&indicesArray[i], &idx, sizeof(at::indexing::TensorIndex)); + indices.emplace_back(at::indexing::Slice(start, end, step)); } } + return indices; } Tensor THSTensor_index(Tensor tensor, @@ -829,11 +815,8 @@ Tensor THSTensor_index(Tensor tensor, const Tensor* indexTensors, const int indicesLength) { - at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); - memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex)); - // The indexStart encodes the kind of slice being performed for each dimension - completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength); - auto indices = at::ArrayRef(indicesArray, indicesLength); + auto indicesVec = completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesLength); + auto indices = at::ArrayRef(indicesVec.data(), indicesVec.size()); CATCH_TENSOR(tensor->index(indices)); } @@ -845,10 +828,8 @@ void THSTensor_index_put_(Tensor tensor, const int indicesLength, const Tensor value) { - at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); - memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex)); - completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength); - auto indices = at::ArrayRef(indicesArray, indicesLength); + auto indicesVec = completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesLength); + auto indices = at::ArrayRef(indicesVec.data(), indicesVec.size()); CATCH(tensor->index_put_(indices, *value);); } @@ -861,10 +842,8 @@ void THSTensor_index_put_(Tensor tensor, const Tensor value, const bool accumulate) { - at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); - memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex)); - completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength); - auto indices = at::ArrayRef(indicesArray, indicesLength); + auto indicesVec = completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesLength); + auto indices = at::ArrayRef(indicesVec.data(), indicesVec.size()); if (accumulate) { c10::List> indicesList = c10::List>(); for (int i = 0; i < indicesLength; i++) { @@ -885,10 +864,8 @@ void THSTensor_index_put_scalar_(Tensor tensor, const int indicesLength, const Scalar value) { - at::indexing::TensorIndex* indicesArray = (at::indexing::TensorIndex*)alloca(indicesLength * sizeof(at::indexing::TensorIndex)); - memset(indicesArray, 0, indicesLength * sizeof(at::indexing::TensorIndex)); - completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesArray, indicesLength); - auto indices = at::ArrayRef(indicesArray, indicesLength); + auto indicesVec = completeTensorIndices(indexStarts, indexEnds, indexSteps, indexTensors, indicesLength); + auto indices = at::ArrayRef(indicesVec.data(), indicesVec.size()); CATCH(tensor->index_put_(indices, *value);); }