Skip to content

Commit 1bf4414

Browse files
authored
Merge pull request #8158 from Unity-Technologies/internal/6000.1/staging
Internal/6000.1/staging
2 parents 67fcb5b + db60212 commit 1bf4414

72 files changed

Lines changed: 9309 additions & 104 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
---
2+
uid: srp-core-api-index
3+
---
4+
5+
# Scriptable Render Pipeline Core scripting API
6+
7+
This is the documentation for the scripting APIs of the Scriptable Render Pipeline (SRP) Core package.
8+
9+
**Note**: URP and HDRP are built on the Scriptable Render Pipeline (SRP) Core package, but have their own class types. For more information, refer to the following:
10+
11+
- [Universal Render Pipeline (URP) Scripting API](https://docs.unity3d.com/Packages/com.unity.render-pipelines.universal@17.1/api/index.html)
12+
- [High Definition Render Pipeline (HDRP) Scripting API](https://docs.unity3d.com/Packages/com.unity.render-pipelines.high-definition@17.1/api/index.html)

Packages/com.unity.render-pipelines.core/Editor/Lighting/ProbeVolume/ProbeGIBaking.cs

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1419,11 +1419,14 @@ static void ApplyPostBakeOperations()
14191419
if (m_BakingSet.hasDilation)
14201420
{
14211421
// This subsequent block needs to happen AFTER we call WriteBakingCells.
1422-
// Otherwise in cases where we change the spacing between probes, we end up loading cells with a certain layout in ForceSHBand
1422+
// Otherwise, in cases where we change the spacing between probes, we end up loading cells with a certain layout in ForceSHBand
14231423
// And then we unload cells using the wrong layout in PerformDilation (after WriteBakingCells updates the baking set object) which leads to a broken internal state.
14241424

14251425
// Don't use Disk streaming to avoid having to wait for it when doing dilation.
14261426
probeRefVolume.ForceNoDiskStreaming(true);
1427+
// Increase the memory budget to make sure we can fit the current cell and all its neighbors when doing dilation.
1428+
var prevMemoryBudget = probeRefVolume.memoryBudget;
1429+
probeRefVolume.ForceMemoryBudget(ProbeVolumeTextureMemoryBudget.MemoryBudgetHigh);
14271430
// Force maximum sh bands to perform baking, we need to store what sh bands was selected from the settings as we need to restore it after.
14281431
var prevSHBands = probeRefVolume.shBands;
14291432
probeRefVolume.ForceSHBand(ProbeVolumeSHBands.SphericalHarmonicsL2);
@@ -1434,8 +1437,9 @@ static void ApplyPostBakeOperations()
14341437
using (new BakingCompleteProfiling(BakingCompleteProfiling.Stages.PerformDilation))
14351438
PerformDilation();
14361439

1437-
// Need to restore the original state
1440+
// Restore the original state.
14381441
probeRefVolume.ForceNoDiskStreaming(false);
1442+
probeRefVolume.ForceMemoryBudget(prevMemoryBudget);
14391443
probeRefVolume.ForceSHBand(prevSHBands);
14401444
}
14411445
else
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
area: Post-processing and UI Features
1+
area: Post-processing and Compositing

Packages/com.unity.render-pipelines.core/Editor/RenderGraph/RenderGraphViewer.SidePanel.cs

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,14 @@ public partial class RenderGraphViewer
1818
"Compute Pass"
1919
};
2020

21+
static readonly string[] k_PassTypeNamesNotMergedMessage =
22+
{
23+
"This is a Legacy Render Pass. Only Raster Render Passes can be merged.",
24+
"This is an Unsafe Render Pass. Only Raster Render Passes can be merged.",
25+
"Pass merging was disabled.",
26+
"This is a Compute Pass. Only Raster Render Passes can be merged."
27+
};
28+
2129
static partial class Names
2230
{
2331
public const string kPanelContainer = "panel-container";
@@ -370,8 +378,7 @@ void CreateTextElement(VisualElement parent, string text, string className = nul
370378
else
371379
{
372380
CreateTextElement(passItem, "Pass break reasoning", Classes.kSubHeaderText);
373-
var msg = $"This is a {k_PassTypeNames[(int) firstPassData.type]}. Only Raster Render Passes can be merged.";
374-
msg = msg.Replace("a Unsafe", "an Unsafe");
381+
string msg = k_PassTypeNamesNotMergedMessage[(int)firstPassData.type];
375382
CreateTextElement(passItem, msg);
376383
}
377384

Packages/com.unity.render-pipelines.core/Runtime/Common/DynamicArray.cs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -685,8 +685,7 @@ public static class DynamicArrayExtensions
685685
}
686686
}
687687

688-
// C# SUCKS
689-
// Had to copy paste because it's apparently impossible to pass a sort delegate where T is Comparable<T>, otherwise some boxing happens and allocates...
688+
// A copy/paste because it's apparently impossible to pass a sort delegate where T is Comparable<T>, otherwise some boxing happens and allocates...
690689
// So two identical versions of the function, one with delegate but no Comparable and the other with just the comparable.
691690
static int Partition<T>(Span<T> data, int left, int right, DynamicArray<T>.SortComparer comparer) where T : new()
692691
{

Packages/com.unity.render-pipelines.core/Runtime/GPUDriven/GPUResidentBatcher.cs

Lines changed: 3 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -196,12 +196,7 @@ private void UpdateRendererInstancesAndBatches(in GPUDrivenRendererGroupData ren
196196

197197
Profiler.BeginSample("InstanceCullingBatcher.BuildBatch");
198198
{
199-
m_InstanceCullingBatcher.BuildBatch(
200-
instances,
201-
rendererData.materialID,
202-
rendererData.meshID,
203-
rendererData, true);
204-
199+
m_InstanceCullingBatcher.BuildBatch(instances, rendererData, true);
205200
}
206201
Profiler.EndSample();
207202

@@ -234,15 +229,11 @@ private void UpdateRendererBatches(in GPUDrivenRendererGroupData rendererData, I
234229

235230
Profiler.BeginSample("InstanceCullingBatcher.BuildBatch");
236231
{
237-
m_InstanceCullingBatcher.BuildBatch(
238-
instances.AsArray(),
239-
rendererData.materialID,
240-
rendererData.meshID,
241-
rendererData, false);
242-
instances.Dispose();
232+
m_InstanceCullingBatcher.BuildBatch(instances.AsArray(), rendererData, false);
243233
}
244234
Profiler.EndSample();
245235

236+
instances.Dispose();
246237
}
247238
Profiler.EndSample();
248239
}

Packages/com.unity.render-pipelines.core/Runtime/GPUDriven/InstanceCullingBatcher.cs

Lines changed: 92 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,6 @@
88
using Unity.Burst;
99
using UnityEngine.Profiling;
1010

11-
[assembly: RegisterGenericJobType(typeof(UnityEngine.Rendering.RegisterNewInstancesJob<UnityEngine.Rendering.BatchMeshID>))]
12-
[assembly: RegisterGenericJobType(typeof(UnityEngine.Rendering.RegisterNewInstancesJob<UnityEngine.Rendering.BatchMaterialID>))]
13-
[assembly: RegisterGenericJobType(typeof(UnityEngine.Rendering.FindNonRegisteredInstancesJob<UnityEngine.Rendering.BatchMeshID>))]
14-
[assembly: RegisterGenericJobType(typeof(UnityEngine.Rendering.FindNonRegisteredInstancesJob<UnityEngine.Rendering.BatchMaterialID>))]
15-
1611
namespace UnityEngine.Rendering
1712
{
1813
internal delegate void OnCullingCompleteCallback(JobHandle jobHandle, in BatchCullingContext cullingContext, in BatchCullingOutput cullingOutput);
@@ -191,48 +186,109 @@ public void Execute(int startIndex, int count)
191186
}
192187

193188
[BurstCompile(DisableSafetyChecks = true, OptimizeFor = OptimizeFor.Performance)]
194-
internal struct FindNonRegisteredInstancesJob<T> : IJobParallelForBatch where T : unmanaged
189+
internal struct FindNonRegisteredMeshesJob : IJobParallelForBatch
195190
{
196191
public const int k_BatchSize = 128;
197192

198193
[ReadOnly] public NativeArray<int> instanceIDs;
199-
[ReadOnly] public NativeParallelHashMap<int, T> hashMap;
194+
[ReadOnly] public NativeParallelHashMap<int, BatchMeshID> hashMap;
200195

201196
[WriteOnly] public NativeList<int>.ParallelWriter outInstancesWriter;
202197

203198
public unsafe void Execute(int startIndex, int count)
204199
{
205-
int* notFoundinstanceIDs = stackalloc int[k_BatchSize];
206-
int length = 0;
200+
int* notFoundinstanceIDsPtr = stackalloc int[k_BatchSize];
201+
var notFoundinstanceIDs = new UnsafeList<int>(notFoundinstanceIDsPtr, k_BatchSize);
202+
203+
notFoundinstanceIDs.Length = 0;
207204

208205
for (int i = startIndex; i < startIndex + count; ++i)
209206
{
210207
int instanceID = instanceIDs[i];
211208

212209
if (!hashMap.ContainsKey(instanceID))
213-
notFoundinstanceIDs[length++] = instanceID;
210+
notFoundinstanceIDs.AddNoResize(instanceID);
214211
}
215212

216-
outInstancesWriter.AddRangeNoResize(notFoundinstanceIDs, length);
213+
outInstancesWriter.AddRangeNoResize(notFoundinstanceIDsPtr, notFoundinstanceIDs.Length);
217214
}
218215
}
219216

220217
[BurstCompile(DisableSafetyChecks = true, OptimizeFor = OptimizeFor.Performance)]
221-
internal struct RegisterNewInstancesJob<T> : IJobParallelFor where T : unmanaged
218+
internal struct FindNonRegisteredMaterialsJob : IJobParallelForBatch
222219
{
223220
public const int k_BatchSize = 128;
224221

225222
[ReadOnly] public NativeArray<int> instanceIDs;
226-
[ReadOnly] public NativeArray<T> batchIDs;
223+
[ReadOnly] public NativeArray<GPUDrivenPackedMaterialData> packedMaterialDatas;
224+
[ReadOnly] public NativeParallelHashMap<int, BatchMaterialID> hashMap;
227225

228-
[WriteOnly] public NativeParallelHashMap<int, T>.ParallelWriter hashMap;
226+
[WriteOnly] public NativeList<int>.ParallelWriter outInstancesWriter;
227+
[WriteOnly] public NativeList<GPUDrivenPackedMaterialData>.ParallelWriter outPackedMaterialDatasWriter;
229228

230-
public unsafe void Execute(int index)
229+
public unsafe void Execute(int startIndex, int count)
230+
{
231+
int* notFoundinstanceIDsPtr = stackalloc int[k_BatchSize];
232+
var notFoundinstanceIDs = new UnsafeList<int>(notFoundinstanceIDsPtr, k_BatchSize);
233+
234+
GPUDrivenPackedMaterialData* notFoundPackedMaterialDatasPtr = stackalloc GPUDrivenPackedMaterialData[k_BatchSize];
235+
var notFoundPackedMaterialDatas = new UnsafeList<GPUDrivenPackedMaterialData>(notFoundPackedMaterialDatasPtr, k_BatchSize);
236+
237+
notFoundinstanceIDs.Length = 0;
238+
notFoundPackedMaterialDatas.Length = 0;
239+
240+
for (int i = startIndex; i < startIndex + count; ++i)
241+
{
242+
int instanceID = instanceIDs[i];
243+
244+
if (!hashMap.ContainsKey(instanceID))
245+
{
246+
notFoundinstanceIDs.AddNoResize(instanceID);
247+
notFoundPackedMaterialDatas.AddNoResize(packedMaterialDatas[i]);
248+
}
249+
}
250+
251+
outInstancesWriter.AddRangeNoResize(notFoundinstanceIDsPtr, notFoundinstanceIDs.Length);
252+
outPackedMaterialDatasWriter.AddRangeNoResize(notFoundPackedMaterialDatasPtr, notFoundPackedMaterialDatas.Length);
253+
}
254+
}
255+
256+
[BurstCompile(DisableSafetyChecks = true, OptimizeFor = OptimizeFor.Performance)]
257+
internal struct RegisterNewMeshesJob : IJobParallelFor
258+
{
259+
public const int k_BatchSize = 128;
260+
261+
[ReadOnly] public NativeArray<int> instanceIDs;
262+
[ReadOnly] public NativeArray<BatchMeshID> batchIDs;
263+
264+
[WriteOnly] public NativeParallelHashMap<int, BatchMeshID>.ParallelWriter hashMap;
265+
266+
public void Execute(int index)
231267
{
232268
hashMap.TryAdd(instanceIDs[index], batchIDs[index]);
233269
}
234270
}
235271

272+
[BurstCompile(DisableSafetyChecks = true, OptimizeFor = OptimizeFor.Performance)]
273+
internal struct RegisterNewMaterialsJob : IJobParallelFor
274+
{
275+
public const int k_BatchSize = 128;
276+
277+
[ReadOnly] public NativeArray<int> instanceIDs;
278+
[ReadOnly] public NativeArray<GPUDrivenPackedMaterialData> packedMaterialDatas;
279+
[ReadOnly] public NativeArray<BatchMaterialID> batchIDs;
280+
281+
[WriteOnly] public NativeParallelHashMap<int, BatchMaterialID>.ParallelWriter batchMaterialHashMap;
282+
[WriteOnly] public NativeParallelHashMap<int, GPUDrivenPackedMaterialData>.ParallelWriter packedMaterialHashMap;
283+
284+
public void Execute(int index)
285+
{
286+
var instanceID = instanceIDs[index];
287+
batchMaterialHashMap.TryAdd(instanceID, batchIDs[index]);
288+
packedMaterialHashMap.TryAdd(instanceID, packedMaterialDatas[index]);
289+
}
290+
}
291+
236292
[BurstCompile(DisableSafetyChecks = true, OptimizeFor = OptimizeFor.Performance)]
237293
internal struct RemoveDrawInstanceIndicesJob : IJob
238294
{
@@ -468,7 +524,7 @@ public void ProcessRenderer(int i)
468524
{
469525
var materialID = rendererData.materialID[materialIndex];
470526
bool isFound = packedMaterialDataHash.TryGetValue(materialID, out packedMaterialData);
471-
Assert.IsTrue(isFound);
527+
Assert.IsTrue(isFound, "Packed material data not found.");
472528
}
473529
supportsIndirect &= packedMaterialData.isIndirectSupported;
474530

@@ -1011,41 +1067,45 @@ public void PostCullBeginCameraRendering(RenderRequestBatcherContext context)
10111067
private void RegisterBatchMeshes(NativeArray<int> meshIDs)
10121068
{
10131069
var newMeshIDs = new NativeList<int>(meshIDs.Length, Allocator.TempJob);
1014-
new FindNonRegisteredInstancesJob<BatchMeshID>
1070+
new FindNonRegisteredMeshesJob
10151071
{
10161072
instanceIDs = meshIDs,
10171073
hashMap = m_BatchMeshHash,
10181074
outInstancesWriter = newMeshIDs.AsParallelWriter()
10191075
}
1020-
.ScheduleBatch(meshIDs.Length, FindNonRegisteredInstancesJob<BatchMeshID>.k_BatchSize).Complete();
1076+
.ScheduleBatch(meshIDs.Length, FindNonRegisteredMeshesJob.k_BatchSize).Complete();
10211077
var newBatchMeshIDs = new NativeArray<BatchMeshID>(newMeshIDs.Length, Allocator.TempJob, NativeArrayOptions.UninitializedMemory);
10221078
m_BRG.RegisterMeshes(newMeshIDs.AsArray(), newBatchMeshIDs);
10231079

10241080
int totalMeshesNum = m_BatchMeshHash.Count() + newBatchMeshIDs.Length;
10251081
m_BatchMeshHash.Capacity = Math.Max(m_BatchMeshHash.Capacity, Mathf.CeilToInt(totalMeshesNum / 1023.0f) * 1024);
10261082

1027-
new RegisterNewInstancesJob<BatchMeshID>
1083+
new RegisterNewMeshesJob
10281084
{
10291085
instanceIDs = newMeshIDs.AsArray(),
10301086
batchIDs = newBatchMeshIDs,
10311087
hashMap = m_BatchMeshHash.AsParallelWriter()
10321088
}
1033-
.Schedule(newMeshIDs.Length, RegisterNewInstancesJob<BatchMeshID>.k_BatchSize).Complete();
1089+
.Schedule(newMeshIDs.Length, RegisterNewMeshesJob.k_BatchSize).Complete();
10341090

10351091
newMeshIDs.Dispose();
10361092
newBatchMeshIDs.Dispose();
10371093
}
10381094

1039-
private void RegisterBatchMaterials(in NativeArray<int> usedMaterialIDs)
1095+
private void RegisterBatchMaterials(in NativeArray<int> usedMaterialIDs, in NativeArray<GPUDrivenPackedMaterialData> usedPackedMaterialDatas)
10401096
{
1097+
Debug.Assert(usedMaterialIDs.Length == usedPackedMaterialDatas.Length, "Each material ID should correspond to one packed material data.");
10411098
var newMaterialIDs = new NativeList<int>(usedMaterialIDs.Length, Allocator.TempJob);
1042-
new FindNonRegisteredInstancesJob<BatchMaterialID>
1099+
var newPackedMaterialDatas = new NativeList<GPUDrivenPackedMaterialData>(usedMaterialIDs.Length, Allocator.TempJob);
1100+
new FindNonRegisteredMaterialsJob
10431101
{
10441102
instanceIDs = usedMaterialIDs,
1103+
packedMaterialDatas = usedPackedMaterialDatas,
10451104
hashMap = m_BatchMaterialHash,
1046-
outInstancesWriter = newMaterialIDs.AsParallelWriter()
1105+
outInstancesWriter = newMaterialIDs.AsParallelWriter(),
1106+
outPackedMaterialDatasWriter = newPackedMaterialDatas.AsParallelWriter()
10471107
}
1048-
.ScheduleBatch(usedMaterialIDs.Length, FindNonRegisteredInstancesJob<BatchMaterialID>.k_BatchSize).Complete();
1108+
.ScheduleBatch(usedMaterialIDs.Length, FindNonRegisteredMaterialsJob.k_BatchSize).Complete();
10491109

10501110
var newBatchMaterialIDs = new NativeArray<BatchMaterialID>(newMaterialIDs.Length, Allocator.TempJob, NativeArrayOptions.UninitializedMemory);
10511111
m_BRG.RegisterMaterials(newMaterialIDs.AsArray(), newBatchMaterialIDs);
@@ -1054,15 +1114,18 @@ private void RegisterBatchMaterials(in NativeArray<int> usedMaterialIDs)
10541114
m_BatchMaterialHash.Capacity = Math.Max(m_BatchMaterialHash.Capacity, Mathf.CeilToInt(totalMaterialsNum / 1023.0f) * 1024);
10551115
m_PackedMaterialHash.Capacity = m_BatchMaterialHash.Capacity;
10561116

1057-
new RegisterNewInstancesJob<BatchMaterialID>
1117+
new RegisterNewMaterialsJob
10581118
{
10591119
instanceIDs = newMaterialIDs.AsArray(),
1120+
packedMaterialDatas = newPackedMaterialDatas.AsArray(),
10601121
batchIDs = newBatchMaterialIDs,
1061-
hashMap = m_BatchMaterialHash.AsParallelWriter()
1122+
batchMaterialHashMap = m_BatchMaterialHash.AsParallelWriter(),
1123+
packedMaterialHashMap = m_PackedMaterialHash.AsParallelWriter()
10621124
}
1063-
.Schedule(newMaterialIDs.Length, RegisterNewInstancesJob<BatchMaterialID>.k_BatchSize).Complete();
1125+
.Schedule(newMaterialIDs.Length, RegisterNewMaterialsJob.k_BatchSize).Complete();
10641126

10651127
newMaterialIDs.Dispose();
1128+
newPackedMaterialDatas.Dispose();
10661129
newBatchMaterialIDs.Dispose();
10671130
}
10681131

@@ -1078,15 +1141,13 @@ public JobHandle SchedulePackedMaterialCacheUpdate(NativeArray<int> materialIDs,
10781141

10791142
public void BuildBatch(
10801143
NativeArray<InstanceHandle> instances,
1081-
NativeArray<int> usedMaterialIDs,
1082-
NativeArray<int> usedMeshIDs,
10831144
in GPUDrivenRendererGroupData rendererData,
10841145
bool registerMaterialsAndMeshes)
10851146
{
10861147
if (registerMaterialsAndMeshes)
10871148
{
1088-
RegisterBatchMaterials(usedMaterialIDs);
1089-
RegisterBatchMeshes(usedMeshIDs);
1149+
RegisterBatchMaterials(rendererData.materialID, rendererData.packedMaterialData);
1150+
RegisterBatchMeshes(rendererData.meshID);
10901151
}
10911152

10921153
new CreateDrawBatchesJob

Packages/com.unity.render-pipelines.core/Runtime/Lighting/ProbeVolume/ProbeBrickPool.cs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -281,6 +281,7 @@ internal bool Allocate(int numberOfBrickChunks, List<BrickChunkAlloc> outAllocat
281281
if (!ignoreErrorLog)
282282
Debug.LogError("Cannot allocate more brick chunks, probe volume brick pool is full.");
283283

284+
Deallocate(outAllocations);
284285
outAllocations.Clear();
285286
return false; // failure case, pool is full
286287
}

Packages/com.unity.render-pipelines.core/Runtime/Lighting/ProbeVolume/ProbeReferenceVolume.cs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1078,6 +1078,11 @@ public void SetVertexSamplingEnabled(bool value)
10781078
m_VertexSampling = value;
10791079
}
10801080

1081+
internal void ForceMemoryBudget(ProbeVolumeTextureMemoryBudget budget)
1082+
{
1083+
m_MemoryBudget = budget;
1084+
}
1085+
10811086
// This is used for steps such as dilation that require the maximum order allowed to be loaded at all times. Should really never be used as a general purpose function.
10821087
internal void ForceSHBand(ProbeVolumeSHBands shBands)
10831088
{
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
area: Post-processing and UI Features
1+
area: Post-processing and Compositing

0 commit comments

Comments
 (0)