Skip to content

Commit 6df838b

Browse files
committed
msbuild sdk extras update
1 parent 7d2e575 commit 6df838b

File tree

7 files changed

+34
-36
lines changed

7 files changed

+34
-36
lines changed

csharp/src/Microsoft.ML.OnnxRuntime/FixedBufferOnnxValue.shared.cs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
using Microsoft.ML.OnnxRuntime.Tensors;
55
using System;
66

7-
#if NET8_0
7+
#if NET8_0_OR_GREATER
88
using DotnetTensors = System.Numerics.Tensors;
99
using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives;
1010
#endif
@@ -14,7 +14,7 @@ namespace Microsoft.ML.OnnxRuntime
1414
/// <summary>
1515
/// This is a legacy class that is kept for backward compatibility.
1616
/// Use OrtValue based API.
17-
///
17+
///
1818
/// Represents an OrtValue with its underlying buffer pinned
1919
/// </summary>
2020
public class FixedBufferOnnxValue : IDisposable
@@ -44,7 +44,7 @@ public static FixedBufferOnnxValue CreateFromTensor<T>(Tensor<T> value)
4444
return new FixedBufferOnnxValue(ref ortValue, OnnxValueType.ONNX_TYPE_TENSOR, elementType);
4545
}
4646

47-
#if NET8_0
47+
#if NET8_0_OR_GREATER
4848
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
4949
/// <summary>
5050
/// Creates a <see cref="FixedBufferOnnxValue"/> object from the tensor and pins its underlying buffer.
@@ -83,7 +83,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor<T>(DotnetTensors.Tenso
8383
/// Here is an example of using a 3rd party library class for processing float16/bfloat16.
8484
/// Currently, to pass tensor data and create a tensor one must copy data to Float16/BFloat16 structures
8585
/// so DenseTensor can recognize it.
86-
///
86+
///
8787
/// If you are using a library that has a class Half and it is blittable, that is its managed in memory representation
8888
/// matches native one and its size is 16-bits, you can use the following conceptual example
8989
/// to feed/fetch data for inference using Half array. This allows you to avoid copying data from your Half[] to Float16[]
@@ -94,7 +94,7 @@ public static FixedBufferOnnxValue CreateFromDotnetTensor<T>(DotnetTensors.Tenso
9494
/// var input_shape = new long[] {input.Length};
9595
/// Half[] output = new Half[40]; // Whatever the expected len/shape is must match
9696
/// var output_shape = new long[] {output.Length};
97-
///
97+
///
9898
/// var memInfo = OrtMemoryInfo.DefaultInstance; // CPU
9999
///
100100
/// using(var fixedBufferInput = FixedBufferOnnxvalue.CreateFromMemory{Half}(memInfo,

csharp/src/Microsoft.ML.OnnxRuntime/ManagedProjections.shared.cs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
using System.Reflection;
1010

1111

12-
#if NET8_0
12+
#if NET8_0_OR_GREATER
1313
using DotnetTensors = System.Numerics.Tensors;
1414
using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives;
1515
#endif
@@ -173,7 +173,7 @@ private static OrtValue CreateMapProjection(NamedOnnxValue node, NodeMetadata el
173173
/// <exception cref="OnnxRuntimeException"></exception>
174174
private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata elementMeta)
175175
{
176-
#if NET8_0
176+
#if NET8_0_OR_GREATER
177177
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
178178
if (node.Value is not TensorBase && node.Value.GetType().GetGenericTypeDefinition() != typeof(DotnetTensors.Tensor<>))
179179
{
@@ -226,4 +226,3 @@ private static OrtValue CreateTensorProjection(NamedOnnxValue node, NodeMetadata
226226
}
227227
}
228228
}
229-

csharp/src/Microsoft.ML.OnnxRuntime/Microsoft.ML.OnnxRuntime.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
<Project Sdk="MSBuild.Sdk.Extras/3.0.22">
1+
<Project Sdk="MSBuild.Sdk.Extras/3.0.44">
22
<PropertyGroup>
33
<!--- packaging properties -->
44
<OrtPackageId Condition="'$(OrtPackageId)' == ''">Microsoft.ML.OnnxRuntime</OrtPackageId>

csharp/src/Microsoft.ML.OnnxRuntime/NamedOnnxValue.shared.cs

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
using System.Diagnostics;
99
using System.Linq;
1010

11-
#if NET8_0
11+
#if NET8_0_OR_GREATER
1212
using DotnetTensors = System.Numerics.Tensors;
1313
using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives;
1414
#endif
@@ -35,37 +35,37 @@ internal MapHelper(TensorBase keys, TensorBase values)
3535
/// <summary>
3636
/// This is a legacy class that is kept for backward compatibility.
3737
/// Use OrtValue based API.
38-
///
39-
/// The class associates a name with an Object.
38+
///
39+
/// The class associates a name with an Object.
4040
/// The name of the class is a misnomer, it does not hold any Onnx values,
4141
/// just managed representation of them.
42-
///
42+
///
4343
/// The class is currently used as both inputs and outputs. Because it is non-
4444
/// disposable, it can not hold on to any native objects.
45-
///
45+
///
4646
/// When used as input, we temporarily create OrtValues that map managed inputs
4747
/// directly. Thus we are able to avoid copying of contiguous data.
48-
///
48+
///
4949
/// For outputs, tensor buffers works the same as input, providing it matches
5050
/// the expected output shape. For other types (maps and sequences) we create a copy of the data.
5151
/// This is because, the class is not Disposable and it is a public interface, thus it can not own
5252
/// the underlying OrtValues that must be destroyed before Run() returns.
53-
///
53+
///
5454
/// To avoid data copying on output, use DisposableNamedOnnxValue class that is returned from Run() methods.
5555
/// This provides access to the native memory tensors and avoids copying.
56-
///
56+
///
5757
/// It is a recursive structure that may contain Tensors (base case)
5858
/// Other sequences and maps. Although the OnnxValueType is exposed,
5959
/// the caller is supposed to know the actual data type contained.
60-
///
60+
///
6161
/// The convention is that for tensors, it would contain a DenseTensor{T} instance or
6262
/// anything derived from Tensor{T}.
63-
///
63+
///
6464
/// For sequences, it would contain a IList{T} where T is an instance of NamedOnnxValue that
6565
/// would contain a tensor or another type.
66-
///
66+
///
6767
/// For Maps, it would contain a IDictionary{K, V} where K,V are primitive types or strings.
68-
///
68+
///
6969
/// </summary>
7070
public class NamedOnnxValue
7171
{
@@ -145,7 +145,7 @@ public static NamedOnnxValue CreateFromTensor<T>(string name, Tensor<T> value)
145145
return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR);
146146
}
147147

148-
#if NET8_0
148+
#if NET8_0_OR_GREATER
149149
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
150150
/// <summary>
151151
/// This is a factory method that instantiates NamedOnnxValue
@@ -219,7 +219,7 @@ public Tensor<T> AsTensor<T>()
219219
}
220220

221221

222-
#if NET8_0
222+
#if NET8_0_OR_GREATER
223223
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
224224
/// <summary>
225225
/// Try-get value as a Tensor&lt;T&gt;.
@@ -303,7 +303,7 @@ internal virtual IntPtr OutputToOrtValueHandle(NodeMetadata metadata, out IDispo
303303
}
304304
}
305305

306-
throw new OnnxRuntimeException(ErrorCode.NotImplemented,
306+
throw new OnnxRuntimeException(ErrorCode.NotImplemented,
307307
$"Can not create output OrtValue for NamedOnnxValue '{metadata.OnnxValueType}' type." +
308308
$" Only tensors can be pre-allocated for outputs " +
309309
$" Use Run() overloads that return DisposableNamedOnnxValue to get access to all Onnx value types that may be returned as output.");

csharp/src/Microsoft.ML.OnnxRuntime/OrtValue.shared.cs

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
using System.Runtime.InteropServices;
1313
using System.Text;
1414

15-
#if NET8_0
15+
#if NET8_0_OR_GREATER
1616
using DotnetTensors = System.Numerics.Tensors;
1717
using TensorPrimitives = System.Numerics.Tensors.TensorPrimitives;
1818
#endif
@@ -213,7 +213,7 @@ public ReadOnlySpan<T> GetTensorDataAsSpan<T>() where T : unmanaged
213213
return MemoryMarshal.Cast<byte, T>(byteSpan);
214214
}
215215

216-
#if NET8_0
216+
#if NET8_0_OR_GREATER
217217
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
218218
/// <summary>
219219
/// Returns a ReadOnlyTensorSpan<typeparamref name="T"/> over tensor native buffer that
@@ -261,7 +261,7 @@ public Span<T> GetTensorMutableDataAsSpan<T>() where T : unmanaged
261261
return MemoryMarshal.Cast<byte, T>(byteSpan);
262262
}
263263

264-
#if NET8_0
264+
#if NET8_0_OR_GREATER
265265
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so we can continue receiving API feedback
266266
/// <summary>
267267
/// Returns a TensorSpan<typeparamref name="T"/> over tensor native buffer.
@@ -297,7 +297,7 @@ public Span<byte> GetTensorMutableRawData()
297297
return GetTensorBufferRawData(typeof(byte));
298298
}
299299

300-
#if NET8_0
300+
#if NET8_0_OR_GREATER
301301
#pragma warning disable SYSLIB5001 // Type is for evaluation purposes only and is subject to change or removal in future updates. Suppress this diagnostic to proceed.
302302
/// <summary>
303303
/// Provides mutable raw native buffer access.
@@ -686,7 +686,7 @@ public static OrtValue CreateTensorValueFromMemory<T>(T[] data, long[] shape) wh
686686
return OrtValue.CreateTensorValueFromMemory(OrtMemoryInfo.DefaultInstance, new Memory<T>(data), shape);
687687
}
688688

689-
#if NET8_0
689+
#if NET8_0_OR_GREATER
690690
#pragma warning disable SYSLIB5001 // System.Numerics.Tensors is only in preview so it can continue receiving API feedback
691691
/// <summary>
692692
/// This is a factory method creates a native Onnxruntime OrtValue containing a tensor.
@@ -709,7 +709,6 @@ public static OrtValue CreateTensorValueFromDotnetTensorObject<T>(DotnetTensors.
709709
var field = tensor.GetType().GetFields(BindingFlags.Instance | BindingFlags.NonPublic).Where(x => x.Name == "_values").FirstOrDefault();
710710
var backingData = (T[])field.GetValue(tensor);
711711
GCHandle handle = GCHandle.Alloc(backingData, GCHandleType.Pinned);
712-
//GCHandle handle = GCHandle.Alloc(tensor.GetPinnableReference(), GCHandleType.Pinned);
713712
var memHandle = new MemoryHandle(Unsafe.AsPointer(ref tensor.GetPinnableReference()), handle);
714713

715714
try

csharp/test/Microsoft.ML.OnnxRuntime.Tests.NetCoreApp/InferenceTest.netcore.cs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -333,8 +333,8 @@ private void ThrowWrongOutputDimensionDotnetTensors()
333333
var inputData = tuple.Item2;
334334
var inputTensor = tuple.Item3;
335335
var inputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor<float>("data_0", inputTensor) };
336-
var outputTensor = new DenseTensor<float>((ReadOnlySpan<int>)new[] { 1, 1001, 1, 1 });
337-
var outputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) };
336+
var outputTensor = DotnetTensors.Tensor.Create([ 1, 1001, 1, 1 ], [4]);
337+
var outputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) };
338338
var ex = Assert.Throws<OnnxRuntimeException>(() => session.Run(inputs, outputs));
339339
// TODO: check exception message
340340
// InferenceSession::ValidateOutputs() does not check dims so far. Currently this will finally trigger an error in Softmax.
@@ -349,8 +349,8 @@ private void ThrowNoOutputDotnetTensors()
349349
var inputData = tuple.Item2;
350350
var inputTensor = tuple.Item3;
351351
var inputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor<float>("data_0", inputTensor) };
352-
var outputTensor = new DenseTensor<float>((ReadOnlySpan<int>)new[] { 1, 1000, 1, 1 });
353-
var outputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor("softmaxout_1", outputTensor) };
352+
var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]);
353+
var outputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromDotnetTensor("softmaxout_1", outputTensor) };
354354
var ex = Assert.Throws<OnnxRuntimeException>(() => session.Run(inputs, new NamedOnnxValue[0]));
355355
Assert.Contains("[ErrorCode:InvalidArgument] At least one output should be requested.", ex.Message);
356356
session.Dispose();
@@ -364,7 +364,7 @@ private void ThrowInconsistentPinnedOutputsDotnetTensors()
364364
var inputData = tuple.Item2;
365365
var inputTensor = tuple.Item3;
366366
var inputs = new List<NamedOnnxValue> { NamedOnnxValue.CreateFromTensor<float>("data_0", inputTensor) };
367-
var outputTensor = new DenseTensor<float>((ReadOnlySpan<int>)new[] { 1, 1000, 1, 1 });
367+
var outputTensor = DotnetTensors.Tensor.Create([1, 1001, 1, 1], [4]);
368368

369369
using (var outputs = new DisposableListTest<FixedBufferOnnxValue>())
370370
{

csharp/tools/linux_pack/LinuxPackNativeNuget.csproj

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
If you need a more sophisticated package for testing, you can run the production packaging pipeline against your
88
branch and download the resulting nuget package from the build artifacts.
99
-->
10-
<Project Sdk="MSBuild.Sdk.Extras/3.0.22">
10+
<Project Sdk="MSBuild.Sdk.Extras/3.0.44">
1111
<PropertyGroup>
1212
<TargetFrameworks>netstandard2.0</TargetFrameworks>
1313
<NuspecFile>$(OnnxRuntimeBuildDirectory)/NativeNuget.nuspec</NuspecFile>

0 commit comments

Comments
 (0)