New shader cache implementation (#3194)

* New shader cache implementation

* Remove some debug code

* Take transform feedback varying count into account

* Create shader cache directory if it does not exist + fragment output map related fixes

* Remove debug code

* Only check texture descriptors if the constant buffer is bound

* Also check CPU VA on GetSpanMapped

* Remove more unused code and move cache related code

* XML docs + remove more unused methods

* Better codegen for TransformFeedbackDescriptor.AsSpan

* Support migration from old cache format, remove more unused code

Shader cache rebuild now also rewrites the shared toc and data files

* Fix migration error with BRX shaders

* Add a limit to the async translation queue

 Avoid async translation threads not being able to keep up and the queue growing very large

* Re-create specialization state on recompile

This might be required if a new version of the shader translator requires more or less state, or if there is a bug related to the GPU state access

* Make shader cache more error resilient

* Add some missing XML docs and move GpuAccessor docs to the interface/use inheritdoc

* Address early PR feedback

* Fix rebase

* Remove IRenderer.CompileShader and IShader interface, replace with new ShaderSource struct passed to CreateProgram directly

* Handle some missing exceptions

* Make shader cache purge delete both old and new shader caches

* Register textures on new specialization state

* Translate and compile shaders in forward order (eliminates diffs due to different binding numbers)

* Limit in-flight shader compilation to the maximum number of compilation threads

* Replace ParallelDiskCacheLoader state changed event with a callback function

* Better handling for invalid constant buffer 1 data length

* Do not create the old cache directory structure if the old cache does not exist

* Constant buffer use should be per-stage. This change will invalidate existing new caches (file format version was incremented)

* Replace rectangle texture with just coordinate normalization

* Skip incompatible shaders that are missing texture information, instead of crashing

This is required if we, for example, support new texture instruction to the shader translator, and then they allow access to textures that were not accessed before. In this scenario, the old cache entry is no longer usable

* Fix coordinates normalization on cubemap textures

* Check if title ID is null before combining shader cache path

* More robust constant buffer address validation on spec state

* More robust constant buffer address validation on spec state (2)

* Regenerate shader cache with one stream, rather than one per shader.

* Only create shader cache directory during initialization

* Logging improvements

* Proper shader program disposal

* PR feedback, and add a comment on serialized structs

* XML docs for RegisterTexture

Co-authored-by: riperiperi <rhy3756547@hotmail.com>
This commit is contained in:
gdkchan 2022-04-10 10:49:44 -03:00 committed by GitHub
parent 26a881176e
commit 43ebd7a9bb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
81 changed files with 6421 additions and 2406 deletions

View file

@ -0,0 +1,138 @@
using Ryujinx.Common;
using Ryujinx.Common.Logging;
using System;
using System.IO;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Represents a background disk cache writer.
/// </summary>
class BackgroundDiskCacheWriter : IDisposable
{
/// <summary>
/// Possible operation to do on the <see cref="_fileWriterWorkerQueue"/>.
/// </summary>
private enum CacheFileOperation
{
/// <summary>
/// Operation to add a shader to the cache.
/// </summary>
AddShader
}
/// <summary>
/// Represents an operation to perform on the <see cref="_fileWriterWorkerQueue"/>.
/// </summary>
private struct CacheFileOperationTask
{
/// <summary>
/// The type of operation to perform.
/// </summary>
public readonly CacheFileOperation Type;
/// <summary>
/// The data associated to this operation or null.
/// </summary>
public readonly object Data;
public CacheFileOperationTask(CacheFileOperation type, object data)
{
Type = type;
Data = data;
}
}
/// <summary>
/// Background shader cache write information.
/// </summary>
private struct AddShaderData
{
/// <summary>
/// Cached shader program.
/// </summary>
public readonly CachedShaderProgram Program;
/// <summary>
/// Binary host code.
/// </summary>
public readonly byte[] HostCode;
/// <summary>
/// Creates a new background shader cache write information.
/// </summary>
/// <param name="program">Cached shader program</param>
/// <param name="hostCode">Binary host code</param>
public AddShaderData(CachedShaderProgram program, byte[] hostCode)
{
Program = program;
HostCode = hostCode;
}
}
private readonly GpuContext _context;
private readonly DiskCacheHostStorage _hostStorage;
private readonly AsyncWorkQueue<CacheFileOperationTask> _fileWriterWorkerQueue;
/// <summary>
/// Creates a new background disk cache writer.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="hostStorage">Disk cache host storage</param>
public BackgroundDiskCacheWriter(GpuContext context, DiskCacheHostStorage hostStorage)
{
_context = context;
_hostStorage = hostStorage;
_fileWriterWorkerQueue = new AsyncWorkQueue<CacheFileOperationTask>(ProcessTask, "Gpu.BackgroundDiskCacheWriter");
}
/// <summary>
/// Processes a shader cache background operation.
/// </summary>
/// <param name="task">Task to process</param>
private void ProcessTask(CacheFileOperationTask task)
{
switch (task.Type)
{
case CacheFileOperation.AddShader:
AddShaderData data = (AddShaderData)task.Data;
try
{
_hostStorage.AddShader(_context, data.Program, data.HostCode);
}
catch (DiskCacheLoadException diskCacheLoadException)
{
Logger.Error?.Print(LogClass.Gpu, $"Error writing shader to disk cache. {diskCacheLoadException.Message}");
}
catch (IOException ioException)
{
Logger.Error?.Print(LogClass.Gpu, $"Error writing shader to disk cache. {ioException.Message}");
}
break;
}
}
/// <summary>
/// Adds a shader program to be cached in the background.
/// </summary>
/// <param name="program">Shader program to cache</param>
/// <param name="hostCode">Host binary code of the program</param>
public void AddShader(CachedShaderProgram program, byte[] hostCode)
{
_fileWriterWorkerQueue.Add(new CacheFileOperationTask(CacheFileOperation.AddShader, new AddShaderData(program, hostCode)));
}
public void Dispose()
{
Dispose(true);
}
protected virtual void Dispose(bool disposing)
{
if (disposing)
{
_fileWriterWorkerQueue.Dispose();
}
}
}
}

View file

@ -0,0 +1,216 @@
using System;
using System.IO;
using System.IO.Compression;
using System.Runtime.CompilerServices;
using System.Runtime.InteropServices;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Binary data serializer.
/// </summary>
struct BinarySerializer
{
private readonly Stream _stream;
private Stream _activeStream;
/// <summary>
/// Creates a new binary serializer.
/// </summary>
/// <param name="stream">Stream to read from or write into</param>
public BinarySerializer(Stream stream)
{
_stream = stream;
_activeStream = stream;
}
/// <summary>
/// Reads data from the stream.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="data">Data read</param>
public void Read<T>(ref T data) where T : unmanaged
{
Span<byte> buffer = MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref data, 1));
for (int offset = 0; offset < buffer.Length;)
{
offset += _activeStream.Read(buffer.Slice(offset));
}
}
/// <summary>
/// Tries to read data from the stream.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="data">Data read</param>
/// <returns>True if the read was successful, false otherwise</returns>
public bool TryRead<T>(ref T data) where T : unmanaged
{
// Length is unknown on compressed streams.
if (_activeStream == _stream)
{
int size = Unsafe.SizeOf<T>();
if (_activeStream.Length - _activeStream.Position < size)
{
return false;
}
}
Read(ref data);
return true;
}
/// <summary>
/// Reads data prefixed with a magic and size from the stream.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="data">Data read</param>
/// <param name="magic">Expected magic value, for validation</param>
public void ReadWithMagicAndSize<T>(ref T data, uint magic) where T : unmanaged
{
uint actualMagic = 0;
int size = 0;
Read(ref actualMagic);
Read(ref size);
if (actualMagic != magic)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedInvalidMagic);
}
// Structs are expected to expand but not shrink between versions.
if (size > Unsafe.SizeOf<T>())
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedInvalidLength);
}
Span<byte> buffer = MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref data, 1)).Slice(0, size);
for (int offset = 0; offset < buffer.Length;)
{
offset += _activeStream.Read(buffer.Slice(offset));
}
}
/// <summary>
/// Writes data into the stream.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="data">Data to be written</param>
public void Write<T>(ref T data) where T : unmanaged
{
Span<byte> buffer = MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref data, 1));
_activeStream.Write(buffer);
}
/// <summary>
/// Writes data prefixed with a magic and size into the stream.
/// </summary>
/// <typeparam name="T">Type of the data</typeparam>
/// <param name="data">Data to write</param>
/// <param name="magic">Magic value to write</param>
public void WriteWithMagicAndSize<T>(ref T data, uint magic) where T : unmanaged
{
int size = Unsafe.SizeOf<T>();
Write(ref magic);
Write(ref size);
Span<byte> buffer = MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref data, 1));
_activeStream.Write(buffer);
}
/// <summary>
/// Indicates that all data that will be read from the stream has been compressed.
/// </summary>
public void BeginCompression()
{
CompressionAlgorithm algorithm = CompressionAlgorithm.None;
Read(ref algorithm);
if (algorithm == CompressionAlgorithm.Deflate)
{
_activeStream = new DeflateStream(_stream, CompressionMode.Decompress, true);
}
}
/// <summary>
/// Indicates that all data that will be written into the stream should be compressed.
/// </summary>
/// <param name="algorithm">Compression algorithm that should be used</param>
public void BeginCompression(CompressionAlgorithm algorithm)
{
Write(ref algorithm);
if (algorithm == CompressionAlgorithm.Deflate)
{
_activeStream = new DeflateStream(_stream, CompressionLevel.SmallestSize, true);
}
}
/// <summary>
/// Indicates the end of a compressed chunck.
/// </summary>
/// <remarks>
/// Any data written after this will not be compressed unless <see cref="BeginCompression(CompressionAlgorithm)"/> is called again.
/// Any data read after this will be assumed to be uncompressed unless <see cref="BeginCompression"/> is called again.
/// </remarks>
public void EndCompression()
{
if (_activeStream != _stream)
{
_activeStream.Dispose();
_activeStream = _stream;
}
}
/// <summary>
/// Reads compressed data from the stream.
/// </summary>
/// <remarks>
/// <paramref name="data"/> must have the exact length of the uncompressed data,
/// otherwise decompression will fail.
/// </remarks>
/// <param name="stream">Stream to read from</param>
/// <param name="data">Buffer to write the uncompressed data into</param>
public static void ReadCompressed(Stream stream, Span<byte> data)
{
CompressionAlgorithm algorithm = (CompressionAlgorithm)stream.ReadByte();
switch (algorithm)
{
case CompressionAlgorithm.None:
stream.Read(data);
break;
case CompressionAlgorithm.Deflate:
stream = new DeflateStream(stream, CompressionMode.Decompress, true);
for (int offset = 0; offset < data.Length;)
{
offset += stream.Read(data.Slice(offset));
}
stream.Dispose();
break;
}
}
/// <summary>
/// Compresses and writes the compressed data into the stream.
/// </summary>
/// <param name="stream">Stream to write into</param>
/// <param name="data">Data to compress</param>
/// <param name="algorithm">Compression algorithm to be used</param>
public static void WriteCompressed(Stream stream, ReadOnlySpan<byte> data, CompressionAlgorithm algorithm)
{
stream.WriteByte((byte)algorithm);
switch (algorithm)
{
case CompressionAlgorithm.None:
stream.Write(data);
break;
case CompressionAlgorithm.Deflate:
stream = new DeflateStream(stream, CompressionLevel.SmallestSize, true);
stream.Write(data);
stream.Dispose();
break;
}
}
}
}

View file

@ -0,0 +1,18 @@
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Algorithm used to compress the cache.
/// </summary>
enum CompressionAlgorithm : byte
{
/// <summary>
/// No compression, the data is stored as-is.
/// </summary>
None,
/// <summary>
/// Deflate compression (RFC 1951).
/// </summary>
Deflate
}
}

View file

@ -0,0 +1,57 @@
using Ryujinx.Common.Logging;
using System.IO;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Common disk cache utility methods.
/// </summary>
static class DiskCacheCommon
{
/// <summary>
/// Opens a file for read or write.
/// </summary>
/// <param name="basePath">Base path of the file (should not include the file name)</param>
/// <param name="fileName">Name of the file</param>
/// <param name="writable">Indicates if the file will be read or written</param>
/// <returns>File stream</returns>
public static FileStream OpenFile(string basePath, string fileName, bool writable)
{
string fullPath = Path.Combine(basePath, fileName);
FileMode mode;
FileAccess access;
if (writable)
{
mode = FileMode.OpenOrCreate;
access = FileAccess.ReadWrite;
}
else
{
mode = FileMode.Open;
access = FileAccess.Read;
}
try
{
return new FileStream(fullPath, mode, access, FileShare.Read);
}
catch (IOException ioException)
{
Logger.Error?.Print(LogClass.Gpu, $"Could not access file \"{fullPath}\". {ioException.Message}");
throw new DiskCacheLoadException(DiskCacheLoadResult.NoAccess);
}
}
/// <summary>
/// Gets the compression algorithm that should be used when writing the disk cache.
/// </summary>
/// <returns>Compression algorithm</returns>
public static CompressionAlgorithm GetCompressionAlgorithm()
{
return CompressionAlgorithm.Deflate;
}
}
}

View file

@ -0,0 +1,202 @@
using Ryujinx.Common.Logging;
using Ryujinx.Graphics.Gpu.Image;
using Ryujinx.Graphics.Shader;
using System;
using System.Runtime.InteropServices;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Represents a GPU state and memory accessor.
/// </summary>
class DiskCacheGpuAccessor : GpuAccessorBase, IGpuAccessor
{
private readonly ReadOnlyMemory<byte> _data;
private readonly ReadOnlyMemory<byte> _cb1Data;
private readonly ShaderSpecializationState _oldSpecState;
private readonly ShaderSpecializationState _newSpecState;
private readonly int _stageIndex;
private ResourceCounts _resourceCounts;
/// <summary>
/// Creates a new instance of the cached GPU state accessor for shader translation.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="data">The data of the shader</param>
/// <param name="cb1Data">The constant buffer 1 data of the shader</param>
/// <param name="oldSpecState">Shader specialization state of the cached shader</param>
/// <param name="newSpecState">Shader specialization state of the recompiled shader</param>
/// <param name="stageIndex">Shader stage index</param>
public DiskCacheGpuAccessor(
GpuContext context,
ReadOnlyMemory<byte> data,
ReadOnlyMemory<byte> cb1Data,
ShaderSpecializationState oldSpecState,
ShaderSpecializationState newSpecState,
ResourceCounts counts,
int stageIndex) : base(context)
{
_data = data;
_cb1Data = cb1Data;
_oldSpecState = oldSpecState;
_newSpecState = newSpecState;
_stageIndex = stageIndex;
_resourceCounts = counts;
}
/// <inheritdoc/>
public uint ConstantBuffer1Read(int offset)
{
if (offset + sizeof(uint) > _cb1Data.Length)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.InvalidCb1DataLength);
}
return MemoryMarshal.Cast<byte, uint>(_cb1Data.Span.Slice(offset))[0];
}
/// <inheritdoc/>
public void Log(string message)
{
Logger.Warning?.Print(LogClass.Gpu, $"Shader translator: {message}");
}
/// <inheritdoc/>
public ReadOnlySpan<ulong> GetCode(ulong address, int minimumSize)
{
return MemoryMarshal.Cast<byte, ulong>(_data.Span.Slice((int)address));
}
/// <inheritdoc/>
public int QueryBindingConstantBuffer(int index)
{
return _resourceCounts.UniformBuffersCount++;
}
/// <inheritdoc/>
public int QueryBindingStorageBuffer(int index)
{
return _resourceCounts.StorageBuffersCount++;
}
/// <inheritdoc/>
public int QueryBindingTexture(int index)
{
return _resourceCounts.TexturesCount++;
}
/// <inheritdoc/>
public int QueryBindingImage(int index)
{
return _resourceCounts.ImagesCount++;
}
/// <inheritdoc/>
public int QueryComputeLocalSizeX() => _oldSpecState.ComputeState.LocalSizeX;
/// <inheritdoc/>
public int QueryComputeLocalSizeY() => _oldSpecState.ComputeState.LocalSizeY;
/// <inheritdoc/>
public int QueryComputeLocalSizeZ() => _oldSpecState.ComputeState.LocalSizeZ;
/// <inheritdoc/>
public int QueryComputeLocalMemorySize() => _oldSpecState.ComputeState.LocalMemorySize;
/// <inheritdoc/>
public int QueryComputeSharedMemorySize() => _oldSpecState.ComputeState.SharedMemorySize;
/// <inheritdoc/>
public uint QueryConstantBufferUse()
{
_newSpecState.RecordConstantBufferUse(_stageIndex, _oldSpecState.ConstantBufferUse[_stageIndex]);
return _oldSpecState.ConstantBufferUse[_stageIndex];
}
/// <inheritdoc/>
public InputTopology QueryPrimitiveTopology()
{
_newSpecState.RecordPrimitiveTopology();
return ConvertToInputTopology(_oldSpecState.GraphicsState.Topology, _oldSpecState.GraphicsState.TessellationMode);
}
/// <inheritdoc/>
public bool QueryTessCw()
{
return _oldSpecState.GraphicsState.TessellationMode.UnpackCw();
}
/// <inheritdoc/>
public TessPatchType QueryTessPatchType()
{
return _oldSpecState.GraphicsState.TessellationMode.UnpackPatchType();
}
/// <inheritdoc/>
public TessSpacing QueryTessSpacing()
{
return _oldSpecState.GraphicsState.TessellationMode.UnpackSpacing();
}
/// <inheritdoc/>
public TextureFormat QueryTextureFormat(int handle, int cbufSlot)
{
_newSpecState.RecordTextureFormat(_stageIndex, handle, cbufSlot);
(uint format, bool formatSrgb) = _oldSpecState.GetFormat(_stageIndex, handle, cbufSlot);
return ConvertToTextureFormat(format, formatSrgb);
}
/// <inheritdoc/>
public SamplerType QuerySamplerType(int handle, int cbufSlot)
{
_newSpecState.RecordTextureSamplerType(_stageIndex, handle, cbufSlot);
return _oldSpecState.GetTextureTarget(_stageIndex, handle, cbufSlot).ConvertSamplerType();
}
/// <inheritdoc/>
public bool QueryTextureCoordNormalized(int handle, int cbufSlot)
{
_newSpecState.RecordTextureCoordNormalized(_stageIndex, handle, cbufSlot);
return _oldSpecState.GetCoordNormalized(_stageIndex, handle, cbufSlot);
}
/// <inheritdoc/>
public bool QueryTransformFeedbackEnabled()
{
return _oldSpecState.TransformFeedbackDescriptors != null;
}
/// <inheritdoc/>
public ReadOnlySpan<byte> QueryTransformFeedbackVaryingLocations(int bufferIndex)
{
return _oldSpecState.TransformFeedbackDescriptors[bufferIndex].AsSpan();
}
/// <inheritdoc/>
public int QueryTransformFeedbackStride(int bufferIndex)
{
return _oldSpecState.TransformFeedbackDescriptors[bufferIndex].Stride;
}
/// <inheritdoc/>
public bool QueryEarlyZForce()
{
_newSpecState.RecordEarlyZForce();
return _oldSpecState.GraphicsState.EarlyZForce;
}
/// <inheritdoc/>
public void RegisterTexture(int handle, int cbufSlot)
{
if (!_oldSpecState.TextureRegistered(_stageIndex, handle, cbufSlot))
{
throw new DiskCacheLoadException(DiskCacheLoadResult.MissingTextureDescriptor);
}
(uint format, bool formatSrgb) = _oldSpecState.GetFormat(_stageIndex, handle, cbufSlot);
TextureTarget target = _oldSpecState.GetTextureTarget(_stageIndex, handle, cbufSlot);
bool coordNormalized = _oldSpecState.GetCoordNormalized(_stageIndex, handle, cbufSlot);
_newSpecState.RegisterTexture(_stageIndex, handle, cbufSlot, format, formatSrgb, target, coordNormalized);
}
}
}

View file

@ -0,0 +1,459 @@
using Ryujinx.Common;
using System;
using System.Collections.Generic;
using System.IO;
using System.Runtime.CompilerServices;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// On-disk shader cache storage for guest code.
/// </summary>
class DiskCacheGuestStorage
{
private const uint TocMagic = (byte)'T' | ((byte)'O' << 8) | ((byte)'C' << 16) | ((byte)'G' << 24);
private const ushort VersionMajor = 1;
private const ushort VersionMinor = 0;
private const uint VersionPacked = ((uint)VersionMajor << 16) | VersionMinor;
private const string TocFileName = "guest.toc";
private const string DataFileName = "guest.data";
private readonly string _basePath;
/// <summary>
/// TOC (Table of contents) file header.
/// </summary>
private struct TocHeader
{
/// <summary>
/// Magic value, for validation and identification purposes.
/// </summary>
public uint Magic;
/// <summary>
/// File format version.
/// </summary>
public uint Version;
/// <summary>
/// Header padding.
/// </summary>
public uint Padding;
/// <summary>
/// Number of modifications to the file, also the shaders count.
/// </summary>
public uint ModificationsCount;
/// <summary>
/// Reserved space, to be used in the future. Write as zero.
/// </summary>
public ulong Reserved;
/// <summary>
/// Reserved space, to be used in the future. Write as zero.
/// </summary>
public ulong Reserved2;
}
/// <summary>
/// TOC (Table of contents) file entry.
/// </summary>
private struct TocEntry
{
/// <summary>
/// Offset of the data on the data file.
/// </summary>
public uint Offset;
/// <summary>
/// Code size.
/// </summary>
public uint CodeSize;
/// <summary>
/// Constant buffer 1 data size.
/// </summary>
public uint Cb1DataSize;
/// <summary>
/// Hash of the code and constant buffer data.
/// </summary>
public uint Hash;
}
/// <summary>
/// TOC (Table of contents) memory cache entry.
/// </summary>
private struct TocMemoryEntry
{
/// <summary>
/// Offset of the data on the data file.
/// </summary>
public uint Offset;
/// <summary>
/// Code size.
/// </summary>
public uint CodeSize;
/// <summary>
/// Constant buffer 1 data size.
/// </summary>
public uint Cb1DataSize;
/// <summary>
/// Index of the shader on the cache.
/// </summary>
public readonly int Index;
/// <summary>
/// Creates a new TOC memory entry.
/// </summary>
/// <param name="offset">Offset of the data on the data file</param>
/// <param name="codeSize">Code size</param>
/// <param name="cb1DataSize">Constant buffer 1 data size</param>
/// <param name="index">Index of the shader on the cache</param>
public TocMemoryEntry(uint offset, uint codeSize, uint cb1DataSize, int index)
{
Offset = offset;
CodeSize = codeSize;
Cb1DataSize = cb1DataSize;
Index = index;
}
}
private Dictionary<uint, List<TocMemoryEntry>> _toc;
private uint _tocModificationsCount;
private (byte[], byte[])[] _cache;
/// <summary>
/// Creates a new disk cache guest storage.
/// </summary>
/// <param name="basePath">Base path of the disk shader cache</param>
public DiskCacheGuestStorage(string basePath)
{
_basePath = basePath;
}
/// <summary>
/// Checks if the TOC (table of contents) file for the guest cache exists.
/// </summary>
/// <returns>True if the file exists, false otherwise</returns>
public bool TocFileExists()
{
return File.Exists(Path.Combine(_basePath, TocFileName));
}
/// <summary>
/// Checks if the data file for the guest cache exists.
/// </summary>
/// <returns>True if the file exists, false otherwise</returns>
public bool DataFileExists()
{
return File.Exists(Path.Combine(_basePath, DataFileName));
}
/// <summary>
/// Opens the guest cache TOC (table of contents) file.
/// </summary>
/// <returns>File stream</returns>
public Stream OpenTocFileStream()
{
return DiskCacheCommon.OpenFile(_basePath, TocFileName, writable: false);
}
/// <summary>
/// Opens the guest cache data file.
/// </summary>
/// <returns>File stream</returns>
public Stream OpenDataFileStream()
{
return DiskCacheCommon.OpenFile(_basePath, DataFileName, writable: false);
}
/// <summary>
/// Clear all content from the guest cache files.
/// </summary>
public void ClearCache()
{
using var tocFileStream = DiskCacheCommon.OpenFile(_basePath, TocFileName, writable: true);
using var dataFileStream = DiskCacheCommon.OpenFile(_basePath, DataFileName, writable: true);
tocFileStream.SetLength(0);
dataFileStream.SetLength(0);
}
/// <summary>
/// Loads the guest cache from file or memory cache.
/// </summary>
/// <param name="tocFileStream">Guest TOC file stream</param>
/// <param name="dataFileStream">Guest data file stream</param>
/// <param name="index">Guest shader index</param>
/// <returns>Tuple with the guest code and constant buffer 1 data, respectively</returns>
public (byte[], byte[]) LoadShader(Stream tocFileStream, Stream dataFileStream, int index)
{
if (_cache == null || index >= _cache.Length)
{
_cache = new (byte[], byte[])[Math.Max(index + 1, GetShadersCountFromLength(tocFileStream.Length))];
}
(byte[] guestCode, byte[] cb1Data) = _cache[index];
if (guestCode == null || cb1Data == null)
{
BinarySerializer tocReader = new BinarySerializer(tocFileStream);
tocFileStream.Seek(Unsafe.SizeOf<TocHeader>() + index * Unsafe.SizeOf<TocEntry>(), SeekOrigin.Begin);
TocEntry entry = new TocEntry();
tocReader.Read(ref entry);
guestCode = new byte[entry.CodeSize];
cb1Data = new byte[entry.Cb1DataSize];
if (entry.Offset >= (ulong)dataFileStream.Length)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
dataFileStream.Seek((long)entry.Offset, SeekOrigin.Begin);
dataFileStream.Read(cb1Data);
BinarySerializer.ReadCompressed(dataFileStream, guestCode);
_cache[index] = (guestCode, cb1Data);
}
return (guestCode, cb1Data);
}
/// <summary>
/// Clears guest code memory cache, forcing future loads to be from file.
/// </summary>
public void ClearMemoryCache()
{
_cache = null;
}
/// <summary>
/// Calculates the guest shaders count from the TOC file length.
/// </summary>
/// <param name="length">TOC file length</param>
/// <returns>Shaders count</returns>
private static int GetShadersCountFromLength(long length)
{
return (int)((length - Unsafe.SizeOf<TocHeader>()) / Unsafe.SizeOf<TocEntry>());
}
/// <summary>
/// Adds a guest shader to the cache.
/// </summary>
/// <remarks>
/// If the shader is already on the cache, the existing index will be returned and nothing will be written.
/// </remarks>
/// <param name="data">Guest code</param>
/// <param name="cb1Data">Constant buffer 1 data accessed by the code</param>
/// <returns>Index of the shader on the cache</returns>
public int AddShader(ReadOnlySpan<byte> data, ReadOnlySpan<byte> cb1Data)
{
using var tocFileStream = DiskCacheCommon.OpenFile(_basePath, TocFileName, writable: true);
using var dataFileStream = DiskCacheCommon.OpenFile(_basePath, DataFileName, writable: true);
TocHeader header = new TocHeader();
LoadOrCreateToc(tocFileStream, ref header);
uint hash = CalcHash(data, cb1Data);
if (_toc.TryGetValue(hash, out var list))
{
foreach (var entry in list)
{
if (data.Length != entry.CodeSize || cb1Data.Length != entry.Cb1DataSize)
{
continue;
}
dataFileStream.Seek((long)entry.Offset, SeekOrigin.Begin);
byte[] cachedCode = new byte[entry.CodeSize];
byte[] cachedCb1Data = new byte[entry.Cb1DataSize];
dataFileStream.Read(cachedCb1Data);
BinarySerializer.ReadCompressed(dataFileStream, cachedCode);
if (data.SequenceEqual(cachedCode) && cb1Data.SequenceEqual(cachedCb1Data))
{
return entry.Index;
}
}
}
return WriteNewEntry(tocFileStream, dataFileStream, ref header, data, cb1Data, hash);
}
/// <summary>
/// Loads the guest cache TOC file, or create a new one if not present.
/// </summary>
/// <param name="tocFileStream">Guest TOC file stream</param>
/// <param name="header">Set to the TOC file header</param>
private void LoadOrCreateToc(Stream tocFileStream, ref TocHeader header)
{
BinarySerializer reader = new BinarySerializer(tocFileStream);
if (!reader.TryRead(ref header) || header.Magic != TocMagic || header.Version != VersionPacked)
{
CreateToc(tocFileStream, ref header);
}
if (_toc == null || header.ModificationsCount != _tocModificationsCount)
{
if (!LoadTocEntries(tocFileStream, ref reader))
{
CreateToc(tocFileStream, ref header);
}
_tocModificationsCount = header.ModificationsCount;
}
}
/// <summary>
/// Creates a new guest cache TOC file.
/// </summary>
/// <param name="tocFileStream">Guest TOC file stream</param>
/// <param name="header">Set to the TOC header</param>
private void CreateToc(Stream tocFileStream, ref TocHeader header)
{
BinarySerializer writer = new BinarySerializer(tocFileStream);
header.Magic = TocMagic;
header.Version = VersionPacked;
header.Padding = 0;
header.ModificationsCount = 0;
header.Reserved = 0;
header.Reserved2 = 0;
if (tocFileStream.Length > 0)
{
tocFileStream.Seek(0, SeekOrigin.Begin);
tocFileStream.SetLength(0);
}
writer.Write(ref header);
}
/// <summary>
/// Reads all the entries on the guest TOC file.
/// </summary>
/// <param name="tocFileStream">Guest TOC file stream</param>
/// <param name="reader">TOC file reader</param>
/// <returns>True if the operation was successful, false otherwise</returns>
private bool LoadTocEntries(Stream tocFileStream, ref BinarySerializer reader)
{
_toc = new Dictionary<uint, List<TocMemoryEntry>>();
TocEntry entry = new TocEntry();
int index = 0;
while (tocFileStream.Position < tocFileStream.Length)
{
if (!reader.TryRead(ref entry))
{
return false;
}
AddTocMemoryEntry(entry.Offset, entry.CodeSize, entry.Cb1DataSize, entry.Hash, index++);
}
return true;
}
/// <summary>
/// Writes a new guest code entry into the file.
/// </summary>
/// <param name="tocFileStream">TOC file stream</param>
/// <param name="dataFileStream">Data file stream</param>
/// <param name="header">TOC header, to be updated with the new count</param>
/// <param name="data">Guest code</param>
/// <param name="cb1Data">Constant buffer 1 data accessed by the guest code</param>
/// <param name="hash">Code and constant buffer data hash</param>
/// <returns>Entry index</returns>
private int WriteNewEntry(
Stream tocFileStream,
Stream dataFileStream,
ref TocHeader header,
ReadOnlySpan<byte> data,
ReadOnlySpan<byte> cb1Data,
uint hash)
{
BinarySerializer tocWriter = new BinarySerializer(tocFileStream);
dataFileStream.Seek(0, SeekOrigin.End);
uint dataOffset = checked((uint)dataFileStream.Position);
uint codeSize = (uint)data.Length;
uint cb1DataSize = (uint)cb1Data.Length;
dataFileStream.Write(cb1Data);
BinarySerializer.WriteCompressed(dataFileStream, data, DiskCacheCommon.GetCompressionAlgorithm());
_tocModificationsCount = ++header.ModificationsCount;
tocFileStream.Seek(0, SeekOrigin.Begin);
tocWriter.Write(ref header);
TocEntry entry = new TocEntry()
{
Offset = dataOffset,
CodeSize = codeSize,
Cb1DataSize = cb1DataSize,
Hash = hash
};
tocFileStream.Seek(0, SeekOrigin.End);
int index = (int)((tocFileStream.Position - Unsafe.SizeOf<TocHeader>()) / Unsafe.SizeOf<TocEntry>());
tocWriter.Write(ref entry);
AddTocMemoryEntry(dataOffset, codeSize, cb1DataSize, hash, index);
return index;
}
/// <summary>
/// Adds an entry to the memory TOC cache. This can be used to avoid reading the TOC file all the time.
/// </summary>
/// <param name="dataOffset">Offset of the code and constant buffer data in the data file</param>
/// <param name="codeSize">Code size</param>
/// <param name="cb1DataSize">Constant buffer 1 data size</param>
/// <param name="hash">Code and constant buffer data hash</param>
/// <param name="index">Index of the data on the cache</param>
private void AddTocMemoryEntry(uint dataOffset, uint codeSize, uint cb1DataSize, uint hash, int index)
{
if (!_toc.TryGetValue(hash, out var list))
{
_toc.Add(hash, list = new List<TocMemoryEntry>());
}
list.Add(new TocMemoryEntry(dataOffset, codeSize, cb1DataSize, index));
}
/// <summary>
/// Calculates the hash for a data pair.
/// </summary>
/// <param name="data">Data 1</param>
/// <param name="data2">Data 2</param>
/// <returns>Hash of both data</returns>
private static uint CalcHash(ReadOnlySpan<byte> data, ReadOnlySpan<byte> data2)
{
return CalcHash(data2) * 23 ^ CalcHash(data);
}
/// <summary>
/// Calculates the hash for data.
/// </summary>
/// <param name="data">Data to be hashed</param>
/// <returns>Hash of the data</returns>
private static uint CalcHash(ReadOnlySpan<byte> data)
{
return (uint)XXHash128.ComputeHash(data).Low;
}
}
}

View file

@ -0,0 +1,763 @@
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Shader;
using System;
using System.IO;
using System.Numerics;
using System.Runtime.CompilerServices;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// On-disk shader cache storage for host code.
/// </summary>
class DiskCacheHostStorage
{
private const uint TocsMagic = (byte)'T' | ((byte)'O' << 8) | ((byte)'C' << 16) | ((byte)'S' << 24);
private const uint TochMagic = (byte)'T' | ((byte)'O' << 8) | ((byte)'C' << 16) | ((byte)'H' << 24);
private const uint ShdiMagic = (byte)'S' | ((byte)'H' << 8) | ((byte)'D' << 16) | ((byte)'I' << 24);
private const uint BufdMagic = (byte)'B' | ((byte)'U' << 8) | ((byte)'F' << 16) | ((byte)'D' << 24);
private const uint TexdMagic = (byte)'T' | ((byte)'E' << 8) | ((byte)'X' << 16) | ((byte)'D' << 24);
private const ushort FileFormatVersionMajor = 1;
private const ushort FileFormatVersionMinor = 1;
private const uint FileFormatVersionPacked = ((uint)FileFormatVersionMajor << 16) | FileFormatVersionMinor;
private const uint CodeGenVersion = 0;
private const string SharedTocFileName = "shared.toc";
private const string SharedDataFileName = "shared.data";
private readonly string _basePath;
public bool CacheEnabled => !string.IsNullOrEmpty(_basePath);
/// <summary>
/// TOC (Table of contents) file header.
/// </summary>
private struct TocHeader
{
/// <summary>
/// Magic value, for validation and identification.
/// </summary>
public uint Magic;
/// <summary>
/// File format version.
/// </summary>
public uint FormatVersion;
/// <summary>
/// Generated shader code version.
/// </summary>
public uint CodeGenVersion;
/// <summary>
/// Header padding.
/// </summary>
public uint Padding;
/// <summary>
/// Reserved space, to be used in the future. Write as zero.
/// </summary>
public ulong Reserved;
/// <summary>
/// Reserved space, to be used in the future. Write as zero.
/// </summary>
public ulong Reserved2;
}
/// <summary>
/// Offset and size pair.
/// </summary>
private struct OffsetAndSize
{
/// <summary>
/// Offset.
/// </summary>
public ulong Offset;
/// <summary>
/// Size.
/// </summary>
public uint Size;
}
/// <summary>
/// Per-stage data entry.
/// </summary>
private struct DataEntryPerStage
{
/// <summary>
/// Index of the guest code on the guest code cache TOC file.
/// </summary>
public int GuestCodeIndex;
}
/// <summary>
/// Per-program data entry.
/// </summary>
private struct DataEntry
{
/// <summary>
/// Bit mask where each bit set is a used shader stage. Should be zero for compute shaders.
/// </summary>
public uint StagesBitMask;
}
/// <summary>
/// Per-stage shader information, returned by the translator.
/// </summary>
private struct DataShaderInfo
{
/// <summary>
/// Total constant buffers used.
/// </summary>
public ushort CBuffersCount;
/// <summary>
/// Total storage buffers used.
/// </summary>
public ushort SBuffersCount;
/// <summary>
/// Total textures used.
/// </summary>
public ushort TexturesCount;
/// <summary>
/// Total images used.
/// </summary>
public ushort ImagesCount;
/// <summary>
/// Shader stage.
/// </summary>
public ShaderStage Stage;
/// <summary>
/// Indicates if the shader accesses the Instance ID built-in variable.
/// </summary>
public bool UsesInstanceId;
/// <summary>
/// Indicates if the shader modifies the Layer built-in variable.
/// </summary>
public bool UsesRtLayer;
/// <summary>
/// Bit mask with the clip distances written on the vertex stage.
/// </summary>
public byte ClipDistancesWritten;
/// <summary>
/// Bit mask of the render target components written by the fragment stage.
/// </summary>
public int FragmentOutputMap;
}
private readonly DiskCacheGuestStorage _guestStorage;
/// <summary>
/// Creates a disk cache host storage.
/// </summary>
/// <param name="basePath">Base path of the shader cache</param>
public DiskCacheHostStorage(string basePath)
{
_basePath = basePath;
_guestStorage = new DiskCacheGuestStorage(basePath);
if (CacheEnabled)
{
Directory.CreateDirectory(basePath);
}
}
/// <summary>
/// Gets the total of host programs on the cache.
/// </summary>
/// <returns>Host programs count</returns>
public int GetProgramCount()
{
string tocFilePath = Path.Combine(_basePath, SharedTocFileName);
if (!File.Exists(tocFilePath))
{
return 0;
}
return (int)((new FileInfo(tocFilePath).Length - Unsafe.SizeOf<TocHeader>()) / sizeof(ulong));
}
/// <summary>
/// Guest the name of the host program cache file, with extension.
/// </summary>
/// <param name="context">GPU context</param>
/// <returns>Name of the file, without extension</returns>
private static string GetHostFileName(GpuContext context)
{
string apiName = context.Capabilities.Api.ToString().ToLowerInvariant();
string vendorName = RemoveInvalidCharacters(context.Capabilities.VendorName.ToLowerInvariant());
return $"{apiName}_{vendorName}";
}
/// <summary>
/// Removes invalid path characters and spaces from a file name.
/// </summary>
/// <param name="fileName">File name</param>
/// <returns>Filtered file name</returns>
private static string RemoveInvalidCharacters(string fileName)
{
int indexOfSpace = fileName.IndexOf(' ');
if (indexOfSpace >= 0)
{
fileName = fileName.Substring(0, indexOfSpace);
}
return string.Concat(fileName.Split(Path.GetInvalidFileNameChars(), StringSplitOptions.RemoveEmptyEntries));
}
/// <summary>
/// Gets the name of the TOC host file.
/// </summary>
/// <param name="context">GPU context</param>
/// <returns>File name</returns>
private static string GetHostTocFileName(GpuContext context)
{
return GetHostFileName(context) + ".toc";
}
/// <summary>
/// Gets the name of the data host file.
/// </summary>
/// <param name="context">GPU context</param>
/// <returns>File name</returns>
private static string GetHostDataFileName(GpuContext context)
{
return GetHostFileName(context) + ".data";
}
/// <summary>
/// Checks if a disk cache exists for the current application.
/// </summary>
/// <returns>True if a disk cache exists, false otherwise</returns>
public bool CacheExists()
{
string tocFilePath = Path.Combine(_basePath, SharedTocFileName);
string dataFilePath = Path.Combine(_basePath, SharedDataFileName);
if (!File.Exists(tocFilePath) || !File.Exists(dataFilePath) || !_guestStorage.TocFileExists() || !_guestStorage.DataFileExists())
{
return false;
}
return true;
}
/// <summary>
/// Loads all shaders from the cache.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="loader">Parallel disk cache loader</param>
public void LoadShaders(GpuContext context, ParallelDiskCacheLoader loader)
{
if (!CacheExists())
{
return;
}
Stream hostTocFileStream = null;
Stream hostDataFileStream = null;
try
{
using var tocFileStream = DiskCacheCommon.OpenFile(_basePath, SharedTocFileName, writable: false);
using var dataFileStream = DiskCacheCommon.OpenFile(_basePath, SharedDataFileName, writable: false);
using var guestTocFileStream = _guestStorage.OpenTocFileStream();
using var guestDataFileStream = _guestStorage.OpenDataFileStream();
BinarySerializer tocReader = new BinarySerializer(tocFileStream);
BinarySerializer dataReader = new BinarySerializer(dataFileStream);
TocHeader header = new TocHeader();
if (!tocReader.TryRead(ref header) || header.Magic != TocsMagic)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
if (header.FormatVersion != FileFormatVersionPacked)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.IncompatibleVersion);
}
bool loadHostCache = header.CodeGenVersion == CodeGenVersion;
int programIndex = 0;
DataEntry entry = new DataEntry();
while (tocFileStream.Position < tocFileStream.Length && loader.Active)
{
ulong dataOffset = 0;
tocReader.Read(ref dataOffset);
if ((ulong)dataOffset >= (ulong)dataFileStream.Length)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
dataFileStream.Seek((long)dataOffset, SeekOrigin.Begin);
dataReader.BeginCompression();
dataReader.Read(ref entry);
uint stagesBitMask = entry.StagesBitMask;
if ((stagesBitMask & ~0x3fu) != 0)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
bool isCompute = stagesBitMask == 0;
if (isCompute)
{
stagesBitMask = 1;
}
CachedShaderStage[] shaders = new CachedShaderStage[isCompute ? 1 : Constants.ShaderStages + 1];
DataEntryPerStage stageEntry = new DataEntryPerStage();
while (stagesBitMask != 0)
{
int stageIndex = BitOperations.TrailingZeroCount(stagesBitMask);
dataReader.Read(ref stageEntry);
ShaderProgramInfo info = stageIndex != 0 || isCompute ? ReadShaderProgramInfo(ref dataReader) : null;
(byte[] guestCode, byte[] cb1Data) = _guestStorage.LoadShader(
guestTocFileStream,
guestDataFileStream,
stageEntry.GuestCodeIndex);
shaders[stageIndex] = new CachedShaderStage(info, guestCode, cb1Data);
stagesBitMask &= ~(1u << stageIndex);
}
ShaderSpecializationState specState = ShaderSpecializationState.Read(ref dataReader);
dataReader.EndCompression();
if (loadHostCache)
{
byte[] hostCode = ReadHostCode(context, ref hostTocFileStream, ref hostDataFileStream, programIndex);
if (hostCode != null)
{
bool hasFragmentShader = shaders.Length > 5 && shaders[5] != null;
int fragmentOutputMap = hasFragmentShader ? shaders[5].Info.FragmentOutputMap : -1;
IProgram hostProgram = context.Renderer.LoadProgramBinary(hostCode, hasFragmentShader, new ShaderInfo(fragmentOutputMap));
CachedShaderProgram program = new CachedShaderProgram(hostProgram, specState, shaders);
loader.QueueHostProgram(program, hostProgram, programIndex, isCompute);
}
else
{
loadHostCache = false;
}
}
if (!loadHostCache)
{
loader.QueueGuestProgram(shaders, specState, programIndex, isCompute);
}
loader.CheckCompilation();
programIndex++;
}
}
finally
{
_guestStorage.ClearMemoryCache();
hostTocFileStream?.Dispose();
hostDataFileStream?.Dispose();
}
}
/// <summary>
/// Reads the host code for a given shader, if existent.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="tocFileStream">Host TOC file stream, intialized if needed</param>
/// <param name="dataFileStream">Host data file stream, initialized if needed</param>
/// <param name="programIndex">Index of the program on the cache</param>
/// <returns>Host binary code, or null if not found</returns>
private byte[] ReadHostCode(GpuContext context, ref Stream tocFileStream, ref Stream dataFileStream, int programIndex)
{
if (tocFileStream == null && dataFileStream == null)
{
string tocFilePath = Path.Combine(_basePath, GetHostTocFileName(context));
string dataFilePath = Path.Combine(_basePath, GetHostDataFileName(context));
if (!File.Exists(tocFilePath) || !File.Exists(dataFilePath))
{
return null;
}
tocFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostTocFileName(context), writable: false);
dataFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostDataFileName(context), writable: false);
}
int offset = Unsafe.SizeOf<TocHeader>() + programIndex * Unsafe.SizeOf<OffsetAndSize>();
if (offset + Unsafe.SizeOf<OffsetAndSize>() > tocFileStream.Length)
{
return null;
}
if ((ulong)offset >= (ulong)dataFileStream.Length)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
tocFileStream.Seek(offset, SeekOrigin.Begin);
BinarySerializer tocReader = new BinarySerializer(tocFileStream);
OffsetAndSize offsetAndSize = new OffsetAndSize();
tocReader.Read(ref offsetAndSize);
if (offsetAndSize.Offset >= (ulong)dataFileStream.Length)
{
throw new DiskCacheLoadException(DiskCacheLoadResult.FileCorruptedGeneric);
}
dataFileStream.Seek((long)offsetAndSize.Offset, SeekOrigin.Begin);
byte[] hostCode = new byte[offsetAndSize.Size];
BinarySerializer.ReadCompressed(dataFileStream, hostCode);
return hostCode;
}
/// <summary>
/// Gets output streams for the disk cache, for faster batch writing.
/// </summary>
/// <param name="context">The GPU context, used to determine the host disk cache</param>
/// <returns>A collection of disk cache output streams</returns>
public DiskCacheOutputStreams GetOutputStreams(GpuContext context)
{
var tocFileStream = DiskCacheCommon.OpenFile(_basePath, SharedTocFileName, writable: true);
var dataFileStream = DiskCacheCommon.OpenFile(_basePath, SharedDataFileName, writable: true);
var hostTocFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostTocFileName(context), writable: true);
var hostDataFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostDataFileName(context), writable: true);
return new DiskCacheOutputStreams(tocFileStream, dataFileStream, hostTocFileStream, hostDataFileStream);
}
/// <summary>
/// Adds a shader to the cache.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="program">Cached program</param>
/// <param name="hostCode">Optional host binary code</param>
/// <param name="streams">Output streams to use</param>
public void AddShader(GpuContext context, CachedShaderProgram program, ReadOnlySpan<byte> hostCode, DiskCacheOutputStreams streams = null)
{
uint stagesBitMask = 0;
for (int index = 0; index < program.Shaders.Length; index++)
{
var shader = program.Shaders[index];
if (shader == null || (shader.Info != null && shader.Info.Stage == ShaderStage.Compute))
{
continue;
}
stagesBitMask |= 1u << index;
}
var tocFileStream = streams != null ? streams.TocFileStream : DiskCacheCommon.OpenFile(_basePath, SharedTocFileName, writable: true);
var dataFileStream = streams != null ? streams.DataFileStream : DiskCacheCommon.OpenFile(_basePath, SharedDataFileName, writable: true);
if (tocFileStream.Length == 0)
{
TocHeader header = new TocHeader();
CreateToc(tocFileStream, ref header, TocsMagic, CodeGenVersion);
}
tocFileStream.Seek(0, SeekOrigin.End);
dataFileStream.Seek(0, SeekOrigin.End);
BinarySerializer tocWriter = new BinarySerializer(tocFileStream);
BinarySerializer dataWriter = new BinarySerializer(dataFileStream);
ulong dataOffset = (ulong)dataFileStream.Position;
tocWriter.Write(ref dataOffset);
DataEntry entry = new DataEntry();
entry.StagesBitMask = stagesBitMask;
dataWriter.BeginCompression(DiskCacheCommon.GetCompressionAlgorithm());
dataWriter.Write(ref entry);
DataEntryPerStage stageEntry = new DataEntryPerStage();
for (int index = 0; index < program.Shaders.Length; index++)
{
var shader = program.Shaders[index];
if (shader == null)
{
continue;
}
stageEntry.GuestCodeIndex = _guestStorage.AddShader(shader.Code, shader.Cb1Data);
dataWriter.Write(ref stageEntry);
WriteShaderProgramInfo(ref dataWriter, shader.Info);
}
program.SpecializationState.Write(ref dataWriter);
dataWriter.EndCompression();
if (streams == null)
{
tocFileStream.Dispose();
dataFileStream.Dispose();
}
if (hostCode.IsEmpty)
{
return;
}
WriteHostCode(context, hostCode, -1, streams);
}
/// <summary>
/// Clears all content from the guest cache files.
/// </summary>
public void ClearGuestCache()
{
_guestStorage.ClearCache();
}
/// <summary>
/// Clears all content from the shared cache files.
/// </summary>
/// <param name="context">GPU context</param>
public void ClearSharedCache()
{
using var tocFileStream = DiskCacheCommon.OpenFile(_basePath, SharedTocFileName, writable: true);
using var dataFileStream = DiskCacheCommon.OpenFile(_basePath, SharedDataFileName, writable: true);
tocFileStream.SetLength(0);
dataFileStream.SetLength(0);
}
/// <summary>
/// Deletes all content from the host cache files.
/// </summary>
/// <param name="context">GPU context</param>
public void ClearHostCache(GpuContext context)
{
using var tocFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostTocFileName(context), writable: true);
using var dataFileStream = DiskCacheCommon.OpenFile(_basePath, GetHostDataFileName(context), writable: true);
tocFileStream.SetLength(0);
dataFileStream.SetLength(0);
}
/// <summary>
/// Adds a host binary shader to the host cache.
/// </summary>
/// <remarks>
/// This only modifies the host cache. The shader must already exist in the other caches.
/// This method should only be used for rebuilding the host cache after a clear.
/// </remarks>
/// <param name="context">GPU context</param>
/// <param name="hostCode">Host binary code</param>
/// <param name="programIndex">Index of the program in the cache</param>
public void AddHostShader(GpuContext context, ReadOnlySpan<byte> hostCode, int programIndex)
{
WriteHostCode(context, hostCode, programIndex);
}
/// <summary>
/// Writes the host binary code on the host cache.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="hostCode">Host binary code</param>
/// <param name="programIndex">Index of the program in the cache</param>
/// <param name="streams">Output streams to use</param>
private void WriteHostCode(GpuContext context, ReadOnlySpan<byte> hostCode, int programIndex, DiskCacheOutputStreams streams = null)
{
var tocFileStream = streams != null ? streams.HostTocFileStream : DiskCacheCommon.OpenFile(_basePath, GetHostTocFileName(context), writable: true);
var dataFileStream = streams != null ? streams.HostDataFileStream : DiskCacheCommon.OpenFile(_basePath, GetHostDataFileName(context), writable: true);
if (tocFileStream.Length == 0)
{
TocHeader header = new TocHeader();
CreateToc(tocFileStream, ref header, TochMagic, 0);
}
if (programIndex == -1)
{
tocFileStream.Seek(0, SeekOrigin.End);
}
else
{
tocFileStream.Seek(Unsafe.SizeOf<TocHeader>() + (programIndex * Unsafe.SizeOf<OffsetAndSize>()), SeekOrigin.Begin);
}
dataFileStream.Seek(0, SeekOrigin.End);
BinarySerializer tocWriter = new BinarySerializer(tocFileStream);
OffsetAndSize offsetAndSize = new OffsetAndSize();
offsetAndSize.Offset = (ulong)dataFileStream.Position;
offsetAndSize.Size = (uint)hostCode.Length;
tocWriter.Write(ref offsetAndSize);
BinarySerializer.WriteCompressed(dataFileStream, hostCode, DiskCacheCommon.GetCompressionAlgorithm());
if (streams == null)
{
tocFileStream.Dispose();
dataFileStream.Dispose();
}
}
/// <summary>
/// Creates a TOC file for the host or shared cache.
/// </summary>
/// <param name="tocFileStream">TOC file stream</param>
/// <param name="header">Set to the TOC file header</param>
/// <param name="magic">Magic value to be written</param>
/// <param name="codegenVersion">Shader codegen version, only valid for the host file</param>
private void CreateToc(Stream tocFileStream, ref TocHeader header, uint magic, uint codegenVersion)
{
BinarySerializer writer = new BinarySerializer(tocFileStream);
header.Magic = magic;
header.FormatVersion = FileFormatVersionPacked;
header.CodeGenVersion = codegenVersion;
header.Padding = 0;
header.Reserved = 0;
header.Reserved2 = 0;
if (tocFileStream.Length > 0)
{
tocFileStream.Seek(0, SeekOrigin.Begin);
tocFileStream.SetLength(0);
}
writer.Write(ref header);
}
/// <summary>
/// Reads the shader program info from the cache.
/// </summary>
/// <param name="dataReader">Cache data reader</param>
/// <returns>Shader program info</returns>
private static ShaderProgramInfo ReadShaderProgramInfo(ref BinarySerializer dataReader)
{
DataShaderInfo dataInfo = new DataShaderInfo();
dataReader.ReadWithMagicAndSize(ref dataInfo, ShdiMagic);
BufferDescriptor[] cBuffers = new BufferDescriptor[dataInfo.CBuffersCount];
BufferDescriptor[] sBuffers = new BufferDescriptor[dataInfo.SBuffersCount];
TextureDescriptor[] textures = new TextureDescriptor[dataInfo.TexturesCount];
TextureDescriptor[] images = new TextureDescriptor[dataInfo.ImagesCount];
for (int index = 0; index < dataInfo.CBuffersCount; index++)
{
dataReader.ReadWithMagicAndSize(ref cBuffers[index], BufdMagic);
}
for (int index = 0; index < dataInfo.SBuffersCount; index++)
{
dataReader.ReadWithMagicAndSize(ref sBuffers[index], BufdMagic);
}
for (int index = 0; index < dataInfo.TexturesCount; index++)
{
dataReader.ReadWithMagicAndSize(ref textures[index], TexdMagic);
}
for (int index = 0; index < dataInfo.ImagesCount; index++)
{
dataReader.ReadWithMagicAndSize(ref images[index], TexdMagic);
}
return new ShaderProgramInfo(
cBuffers,
sBuffers,
textures,
images,
dataInfo.Stage,
dataInfo.UsesInstanceId,
dataInfo.UsesRtLayer,
dataInfo.ClipDistancesWritten,
dataInfo.FragmentOutputMap);
}
/// <summary>
/// Writes the shader program info into the cache.
/// </summary>
/// <param name="dataWriter">Cache data writer</param>
/// <param name="info">Program info</param>
private static void WriteShaderProgramInfo(ref BinarySerializer dataWriter, ShaderProgramInfo info)
{
if (info == null)
{
return;
}
DataShaderInfo dataInfo = new DataShaderInfo();
dataInfo.CBuffersCount = (ushort)info.CBuffers.Count;
dataInfo.SBuffersCount = (ushort)info.SBuffers.Count;
dataInfo.TexturesCount = (ushort)info.Textures.Count;
dataInfo.ImagesCount = (ushort)info.Images.Count;
dataInfo.Stage = info.Stage;
dataInfo.UsesInstanceId = info.UsesInstanceId;
dataInfo.UsesRtLayer = info.UsesRtLayer;
dataInfo.ClipDistancesWritten = info.ClipDistancesWritten;
dataInfo.FragmentOutputMap = info.FragmentOutputMap;
dataWriter.WriteWithMagicAndSize(ref dataInfo, ShdiMagic);
for (int index = 0; index < info.CBuffers.Count; index++)
{
var entry = info.CBuffers[index];
dataWriter.WriteWithMagicAndSize(ref entry, BufdMagic);
}
for (int index = 0; index < info.SBuffers.Count; index++)
{
var entry = info.SBuffers[index];
dataWriter.WriteWithMagicAndSize(ref entry, BufdMagic);
}
for (int index = 0; index < info.Textures.Count; index++)
{
var entry = info.Textures[index];
dataWriter.WriteWithMagicAndSize(ref entry, TexdMagic);
}
for (int index = 0; index < info.Images.Count; index++)
{
var entry = info.Images[index];
dataWriter.WriteWithMagicAndSize(ref entry, TexdMagic);
}
}
}
}

View file

@ -0,0 +1,48 @@
using System;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Disk cache load exception.
/// </summary>
class DiskCacheLoadException : Exception
{
/// <summary>
/// Result of the cache load operation.
/// </summary>
public DiskCacheLoadResult Result { get; }
/// <summary>
/// Creates a new instance of the disk cache load exception.
/// </summary>
public DiskCacheLoadException()
{
}
/// <summary>
/// Creates a new instance of the disk cache load exception.
/// </summary>
/// <param name="message">Exception message</param>
public DiskCacheLoadException(string message) : base(message)
{
}
/// <summary>
/// Creates a new instance of the disk cache load exception.
/// </summary>
/// <param name="message">Exception message</param>
/// <param name="inner">Inner exception</param>
public DiskCacheLoadException(string message, Exception inner) : base(message, inner)
{
}
/// <summary>
/// Creates a new instance of the disk cache load exception.
/// </summary>
/// <param name="result">Result code</param>
public DiskCacheLoadException(DiskCacheLoadResult result) : base(result.GetMessage())
{
Result = result;
}
}
}

View file

@ -0,0 +1,72 @@
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Result of a shader cache load operation.
/// </summary>
enum DiskCacheLoadResult
{
/// <summary>
/// No error.
/// </summary>
Success,
/// <summary>
/// File can't be accessed.
/// </summary>
NoAccess,
/// <summary>
/// The constant buffer 1 data length is too low for the translation of the guest shader.
/// </summary>
InvalidCb1DataLength,
/// <summary>
/// The cache is missing the descriptor of a texture used by the shader.
/// </summary>
MissingTextureDescriptor,
/// <summary>
/// File is corrupted.
/// </summary>
FileCorruptedGeneric,
/// <summary>
/// File is corrupted, detected by magic value check.
/// </summary>
FileCorruptedInvalidMagic,
/// <summary>
/// File is corrupted, detected by length check.
/// </summary>
FileCorruptedInvalidLength,
/// <summary>
/// File might be valid, but is incompatible with the current emulator version.
/// </summary>
IncompatibleVersion
}
static class DiskCacheLoadResultExtensions
{
/// <summary>
/// Gets an error message from a result code.
/// </summary>
/// <param name="result">Result code</param>
/// <returns>Error message</returns>
public static string GetMessage(this DiskCacheLoadResult result)
{
return result switch
{
DiskCacheLoadResult.Success => "No error.",
DiskCacheLoadResult.NoAccess => "Could not access the cache file.",
DiskCacheLoadResult.InvalidCb1DataLength => "Constant buffer 1 data length is too low.",
DiskCacheLoadResult.MissingTextureDescriptor => "Texture descriptor missing from the cache file.",
DiskCacheLoadResult.FileCorruptedGeneric => "The cache file is corrupted.",
DiskCacheLoadResult.FileCorruptedInvalidMagic => "Magic check failed, the cache file is corrupted.",
DiskCacheLoadResult.FileCorruptedInvalidLength => "Length check failed, the cache file is corrupted.",
DiskCacheLoadResult.IncompatibleVersion => "The version of the disk cache is not compatible with this version of the emulator.",
_ => "Unknown error."
};
}
}
}

View file

@ -0,0 +1,57 @@
using System;
using System.IO;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
/// <summary>
/// Output streams for the disk shader cache.
/// </summary>
class DiskCacheOutputStreams : IDisposable
{
/// <summary>
/// Shared table of contents (TOC) file stream.
/// </summary>
public readonly FileStream TocFileStream;
/// <summary>
/// Shared data file stream.
/// </summary>
public readonly FileStream DataFileStream;
/// <summary>
/// Host table of contents (TOC) file stream.
/// </summary>
public readonly FileStream HostTocFileStream;
/// <summary>
/// Host data file stream.
/// </summary>
public readonly FileStream HostDataFileStream;
/// <summary>
/// Creates a new instance of a disk cache output stream container.
/// </summary>
/// <param name="tocFileStream">Stream for the shared table of contents file</param>
/// <param name="dataFileStream">Stream for the shared data file</param>
/// <param name="hostTocFileStream">Stream for the host table of contents file</param>
/// <param name="hostDataFileStream">Stream for the host data file</param>
public DiskCacheOutputStreams(FileStream tocFileStream, FileStream dataFileStream, FileStream hostTocFileStream, FileStream hostDataFileStream)
{
TocFileStream = tocFileStream;
DataFileStream = dataFileStream;
HostTocFileStream = hostTocFileStream;
HostDataFileStream = hostDataFileStream;
}
/// <summary>
/// Disposes the output file streams.
/// </summary>
public void Dispose()
{
TocFileStream.Dispose();
DataFileStream.Dispose();
HostTocFileStream.Dispose();
HostDataFileStream.Dispose();
}
}
}

View file

@ -0,0 +1,672 @@
using Ryujinx.Common.Logging;
using Ryujinx.Graphics.GAL;
using Ryujinx.Graphics.Shader;
using Ryujinx.Graphics.Shader.Translation;
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.IO;
using System.Threading;
using static Ryujinx.Graphics.Gpu.Shader.ShaderCache;
namespace Ryujinx.Graphics.Gpu.Shader.DiskCache
{
class ParallelDiskCacheLoader
{
private const int ThreadCount = 8;
private readonly GpuContext _context;
private readonly ShaderCacheHashTable _graphicsCache;
private readonly ComputeShaderCacheHashTable _computeCache;
private readonly DiskCacheHostStorage _hostStorage;
private readonly CancellationToken _cancellationToken;
private readonly Action<ShaderCacheState, int, int> _stateChangeCallback;
/// <summary>
/// Indicates if the cache should be loaded.
/// </summary>
public bool Active => !_cancellationToken.IsCancellationRequested;
private bool _needsHostRegen;
/// <summary>
/// Number of shaders that failed to compile from the cache.
/// </summary>
public int ErrorCount { get; private set; }
/// <summary>
/// Program validation entry.
/// </summary>
private struct ProgramEntry
{
/// <summary>
/// Cached shader program.
/// </summary>
public readonly CachedShaderProgram CachedProgram;
/// <summary>
/// Host program.
/// </summary>
public readonly IProgram HostProgram;
/// <summary>
/// Program index.
/// </summary>
public readonly int ProgramIndex;
/// <summary>
/// Indicates if the program is a compute shader.
/// </summary>
public readonly bool IsCompute;
/// <summary>
/// Indicates if the program is a host binary shader.
/// </summary>
public readonly bool IsBinary;
/// <summary>
/// Creates a new program validation entry.
/// </summary>
/// <param name="cachedProgram">Cached shader program</param>
/// <param name="hostProgram">Host program</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
/// <param name="isBinary">Indicates if the program is a host binary shader</param>
public ProgramEntry(
CachedShaderProgram cachedProgram,
IProgram hostProgram,
int programIndex,
bool isCompute,
bool isBinary)
{
CachedProgram = cachedProgram;
HostProgram = hostProgram;
ProgramIndex = programIndex;
IsCompute = isCompute;
IsBinary = isBinary;
}
}
/// <summary>
/// Translated shader compilation entry.
/// </summary>
private struct ProgramCompilation
{
/// <summary>
/// Translated shader stages.
/// </summary>
public readonly ShaderProgram[] TranslatedStages;
/// <summary>
/// Cached shaders.
/// </summary>
public readonly CachedShaderStage[] Shaders;
/// <summary>
/// Specialization state.
/// </summary>
public readonly ShaderSpecializationState SpecializationState;
/// <summary>
/// Program index.
/// </summary>
public readonly int ProgramIndex;
/// <summary>
/// Indicates if the program is a compute shader.
/// </summary>
public readonly bool IsCompute;
/// <summary>
/// Creates a new translated shader compilation entry.
/// </summary>
/// <param name="translatedStages">Translated shader stages</param>
/// <param name="shaders">Cached shaders</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
public ProgramCompilation(
ShaderProgram[] translatedStages,
CachedShaderStage[] shaders,
ShaderSpecializationState specState,
int programIndex,
bool isCompute)
{
TranslatedStages = translatedStages;
Shaders = shaders;
SpecializationState = specState;
ProgramIndex = programIndex;
IsCompute = isCompute;
}
}
/// <summary>
/// Program translation entry.
/// </summary>
private struct AsyncProgramTranslation
{
/// <summary>
/// Cached shader stages.
/// </summary>
public readonly CachedShaderStage[] Shaders;
/// <summary>
/// Specialization state.
/// </summary>
public readonly ShaderSpecializationState SpecializationState;
/// <summary>
/// Program index.
/// </summary>
public readonly int ProgramIndex;
/// <summary>
/// Indicates if the program is a compute shader.
/// </summary>
public readonly bool IsCompute;
/// <summary>
/// Creates a new program translation entry.
/// </summary>
/// <param name="shaders">Cached shader stages</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
public AsyncProgramTranslation(
CachedShaderStage[] shaders,
ShaderSpecializationState specState,
int programIndex,
bool isCompute)
{
Shaders = shaders;
SpecializationState = specState;
ProgramIndex = programIndex;
IsCompute = isCompute;
}
}
private readonly Queue<ProgramEntry> _validationQueue;
private readonly ConcurrentQueue<ProgramCompilation> _compilationQueue;
private readonly BlockingCollection<AsyncProgramTranslation> _asyncTranslationQueue;
private readonly SortedList<int, CachedShaderProgram> _programList;
private int _backendParallelCompileThreads;
private int _compiledCount;
private int _totalCount;
/// <summary>
/// Creates a new parallel disk cache loader.
/// </summary>
/// <param name="context">GPU context</param>
/// <param name="graphicsCache">Graphics shader cache</param>
/// <param name="computeCache">Compute shader cache</param>
/// <param name="hostStorage">Disk cache host storage</param>
/// <param name="cancellationToken">Cancellation token</param>
/// <param name="stateChangeCallback">Function to be called when there is a state change, reporting state, compiled and total shaders count</param>
public ParallelDiskCacheLoader(
GpuContext context,
ShaderCacheHashTable graphicsCache,
ComputeShaderCacheHashTable computeCache,
DiskCacheHostStorage hostStorage,
CancellationToken cancellationToken,
Action<ShaderCacheState, int, int> stateChangeCallback)
{
_context = context;
_graphicsCache = graphicsCache;
_computeCache = computeCache;
_hostStorage = hostStorage;
_cancellationToken = cancellationToken;
_stateChangeCallback = stateChangeCallback;
_validationQueue = new Queue<ProgramEntry>();
_compilationQueue = new ConcurrentQueue<ProgramCompilation>();
_asyncTranslationQueue = new BlockingCollection<AsyncProgramTranslation>(ThreadCount);
_programList = new SortedList<int, CachedShaderProgram>();
_backendParallelCompileThreads = Math.Min(Environment.ProcessorCount, 8); // Must be kept in sync with the backend code.
}
/// <summary>
/// Loads all shaders from the cache.
/// </summary>
public void LoadShaders()
{
Thread[] workThreads = new Thread[ThreadCount];
for (int index = 0; index < ThreadCount; index++)
{
workThreads[index] = new Thread(ProcessAsyncQueue)
{
Name = $"Gpu.AsyncTranslationThread.{index}"
};
}
int programCount = _hostStorage.GetProgramCount();
_compiledCount = 0;
_totalCount = programCount;
_stateChangeCallback(ShaderCacheState.Start, 0, programCount);
Logger.Info?.Print(LogClass.Gpu, $"Loading {programCount} shaders from the cache...");
for (int index = 0; index < ThreadCount; index++)
{
workThreads[index].Start(_cancellationToken);
}
try
{
_hostStorage.LoadShaders(_context, this);
}
catch (DiskCacheLoadException diskCacheLoadException)
{
Logger.Warning?.Print(LogClass.Gpu, $"Error loading the shader cache. {diskCacheLoadException.Message}");
// If we can't even access the file, then we also can't rebuild.
if (diskCacheLoadException.Result != DiskCacheLoadResult.NoAccess)
{
_needsHostRegen = true;
}
}
catch (InvalidDataException invalidDataException)
{
Logger.Warning?.Print(LogClass.Gpu, $"Error decompressing the shader cache file. {invalidDataException.Message}");
_needsHostRegen = true;
}
catch (IOException ioException)
{
Logger.Warning?.Print(LogClass.Gpu, $"Error reading the shader cache file. {ioException.Message}");
_needsHostRegen = true;
}
_asyncTranslationQueue.CompleteAdding();
for (int index = 0; index < ThreadCount; index++)
{
workThreads[index].Join();
}
CheckCompilationBlocking();
if (_needsHostRegen)
{
// Rebuild both shared and host cache files.
// Rebuilding shared is required because the shader information returned by the translator
// might have changed, and so we have to reconstruct the file with the new information.
try
{
_hostStorage.ClearSharedCache();
_hostStorage.ClearHostCache(_context);
if (_programList.Count != 0)
{
Logger.Info?.Print(LogClass.Gpu, $"Rebuilding {_programList.Count} shaders...");
using var streams = _hostStorage.GetOutputStreams(_context);
foreach (var kv in _programList)
{
if (!Active)
{
break;
}
CachedShaderProgram program = kv.Value;
_hostStorage.AddShader(_context, program, program.HostProgram.GetBinary(), streams);
}
Logger.Info?.Print(LogClass.Gpu, $"Rebuilt {_programList.Count} shaders successfully.");
}
else
{
_hostStorage.ClearGuestCache();
Logger.Info?.Print(LogClass.Gpu, "Shader cache deleted due to corruption.");
}
}
catch (DiskCacheLoadException diskCacheLoadException)
{
Logger.Warning?.Print(LogClass.Gpu, $"Error deleting the shader cache. {diskCacheLoadException.Message}");
}
catch (IOException ioException)
{
Logger.Warning?.Print(LogClass.Gpu, $"Error deleting the shader cache file. {ioException.Message}");
}
}
Logger.Info?.Print(LogClass.Gpu, "Shader cache loaded.");
_stateChangeCallback(ShaderCacheState.Loaded, programCount, programCount);
}
/// <summary>
/// Enqueues a host program for compilation.
/// </summary>
/// <param name="cachedProgram">Cached program</param>
/// <param name="hostProgram">Host program to be compiled</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
public void QueueHostProgram(CachedShaderProgram cachedProgram, IProgram hostProgram, int programIndex, bool isCompute)
{
EnqueueForValidation(new ProgramEntry(cachedProgram, hostProgram, programIndex, isCompute, isBinary: true));
}
/// <summary>
/// Enqueues a guest program for compilation.
/// </summary>
/// <param name="shaders">Cached shader stages</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
public void QueueGuestProgram(CachedShaderStage[] shaders, ShaderSpecializationState specState, int programIndex, bool isCompute)
{
_asyncTranslationQueue.Add(new AsyncProgramTranslation(shaders, specState, programIndex, isCompute));
}
/// <summary>
/// Check the state of programs that have already been compiled,
/// and add to the cache if the compilation was successful.
/// </summary>
public void CheckCompilation()
{
ProcessCompilationQueue();
// Process programs that already finished compiling.
// If not yet compiled, do nothing. This avoids blocking to wait for shader compilation.
while (_validationQueue.TryPeek(out ProgramEntry entry))
{
ProgramLinkStatus result = entry.HostProgram.CheckProgramLink(false);
if (result != ProgramLinkStatus.Incomplete)
{
ProcessCompiledProgram(ref entry, result);
_validationQueue.Dequeue();
}
else
{
break;
}
}
}
/// <summary>
/// Waits until all programs finishes compiling, then adds the ones
/// with successful compilation to the cache.
/// </summary>
private void CheckCompilationBlocking()
{
ProcessCompilationQueue();
while (_validationQueue.TryDequeue(out ProgramEntry entry) && Active)
{
ProcessCompiledProgram(ref entry, entry.HostProgram.CheckProgramLink(true), asyncCompile: false);
}
}
/// <summary>
/// Process a compiled program result.
/// </summary>
/// <param name="entry">Compiled program entry</param>
/// <param name="result">Compilation result</param>
/// <param name="asyncCompile">For failed host compilations, indicates if a guest compilation should be done asynchronously</param>
private void ProcessCompiledProgram(ref ProgramEntry entry, ProgramLinkStatus result, bool asyncCompile = true)
{
if (result == ProgramLinkStatus.Success)
{
// Compilation successful, add to memory cache.
if (entry.IsCompute)
{
_computeCache.Add(entry.CachedProgram);
}
else
{
_graphicsCache.Add(entry.CachedProgram);
}
if (!entry.IsBinary)
{
_needsHostRegen = true;
}
_programList.Add(entry.ProgramIndex, entry.CachedProgram);
SignalCompiled();
}
else if (entry.IsBinary)
{
// If this is a host binary and compilation failed,
// we still have a chance to recompile from the guest binary.
CachedShaderProgram program = entry.CachedProgram;
if (asyncCompile)
{
QueueGuestProgram(program.Shaders, program.SpecializationState, entry.ProgramIndex, entry.IsCompute);
}
else
{
RecompileFromGuestCode(program.Shaders, program.SpecializationState, entry.ProgramIndex, entry.IsCompute);
ProcessCompilationQueue();
}
}
else
{
// Failed to compile from both host and guest binary.
ErrorCount++;
SignalCompiled();
}
}
/// <summary>
/// Processes the queue of translated guest programs that should be compiled on the host.
/// </summary>
private void ProcessCompilationQueue()
{
while (_compilationQueue.TryDequeue(out ProgramCompilation compilation) && Active)
{
ShaderSource[] shaderSources = new ShaderSource[compilation.TranslatedStages.Length];
int fragmentOutputMap = -1;
for (int index = 0; index < compilation.TranslatedStages.Length; index++)
{
ShaderProgram shader = compilation.TranslatedStages[index];
shaderSources[index] = CreateShaderSource(shader);
if (shader.Info.Stage == ShaderStage.Fragment)
{
fragmentOutputMap = shader.Info.FragmentOutputMap;
}
}
IProgram hostProgram = _context.Renderer.CreateProgram(shaderSources, new ShaderInfo(fragmentOutputMap));
CachedShaderProgram program = new CachedShaderProgram(hostProgram, compilation.SpecializationState, compilation.Shaders);
EnqueueForValidation(new ProgramEntry(program, hostProgram, compilation.ProgramIndex, compilation.IsCompute, isBinary: false));
}
}
/// <summary>
/// Enqueues a program for validation, which will check if the program was compiled successfully.
/// </summary>
/// <param name="newEntry">Program entry to be validated</param>
private void EnqueueForValidation(ProgramEntry newEntry)
{
_validationQueue.Enqueue(newEntry);
// Do not allow more than N shader compilation in-flight, where N is the maximum number of threads
// the driver will be using for parallel compilation.
// Submitting more seems to cause NVIDIA OpenGL driver to crash.
if (_validationQueue.Count >= _backendParallelCompileThreads && _validationQueue.TryDequeue(out ProgramEntry entry))
{
ProcessCompiledProgram(ref entry, entry.HostProgram.CheckProgramLink(true), asyncCompile: false);
}
}
/// <summary>
/// Processses the queue of programs that should be translated from guest code.
/// </summary>
/// <param name="state">Cancellation token</param>
private void ProcessAsyncQueue(object state)
{
CancellationToken ct = (CancellationToken)state;
try
{
foreach (AsyncProgramTranslation asyncCompilation in _asyncTranslationQueue.GetConsumingEnumerable(ct))
{
RecompileFromGuestCode(
asyncCompilation.Shaders,
asyncCompilation.SpecializationState,
asyncCompilation.ProgramIndex,
asyncCompilation.IsCompute);
}
}
catch (OperationCanceledException)
{
}
}
/// <summary>
/// Recompiles a program from guest code.
/// </summary>
/// <param name="shaders">Shader stages</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
/// <param name="isCompute">Indicates if the program is a compute shader</param>
private void RecompileFromGuestCode(CachedShaderStage[] shaders, ShaderSpecializationState specState, int programIndex, bool isCompute)
{
try
{
if (isCompute)
{
RecompileComputeFromGuestCode(shaders, specState, programIndex);
}
else
{
RecompileGraphicsFromGuestCode(shaders, specState, programIndex);
}
}
catch (DiskCacheLoadException diskCacheLoadException)
{
Logger.Error?.Print(LogClass.Gpu, $"Error translating guest shader. {diskCacheLoadException.Message}");
ErrorCount++;
SignalCompiled();
}
}
/// <summary>
/// Recompiles a graphics program from guest code.
/// </summary>
/// <param name="shaders">Shader stages</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
private void RecompileGraphicsFromGuestCode(CachedShaderStage[] shaders, ShaderSpecializationState specState, int programIndex)
{
ShaderSpecializationState newSpecState = new ShaderSpecializationState(specState.GraphicsState, specState.TransformFeedbackDescriptors);
ResourceCounts counts = new ResourceCounts();
TranslatorContext[] translatorContexts = new TranslatorContext[Constants.ShaderStages + 1];
TranslatorContext nextStage = null;
for (int stageIndex = Constants.ShaderStages - 1; stageIndex >= 0; stageIndex--)
{
CachedShaderStage shader = shaders[stageIndex + 1];
if (shader != null)
{
byte[] guestCode = shader.Code;
byte[] cb1Data = shader.Cb1Data;
DiskCacheGpuAccessor gpuAccessor = new DiskCacheGpuAccessor(_context, guestCode, cb1Data, specState, newSpecState, counts, stageIndex);
TranslatorContext currentStage = DecodeGraphicsShader(gpuAccessor, DefaultFlags, 0);
if (nextStage != null)
{
currentStage.SetNextStage(nextStage);
}
if (stageIndex == 0 && shaders[0] != null)
{
byte[] guestCodeA = shaders[0].Code;
byte[] cb1DataA = shaders[0].Cb1Data;
DiskCacheGpuAccessor gpuAccessorA = new DiskCacheGpuAccessor(_context, guestCodeA, cb1DataA, specState, newSpecState, counts, 0);
translatorContexts[0] = DecodeGraphicsShader(gpuAccessorA, DefaultFlags | TranslationFlags.VertexA, 0);
}
translatorContexts[stageIndex + 1] = currentStage;
nextStage = currentStage;
}
}
List<ShaderProgram> translatedStages = new List<ShaderProgram>();
for (int stageIndex = 0; stageIndex < Constants.ShaderStages; stageIndex++)
{
TranslatorContext currentStage = translatorContexts[stageIndex + 1];
if (currentStage != null)
{
ShaderProgram program;
byte[] guestCode = shaders[stageIndex + 1].Code;
byte[] cb1Data = shaders[stageIndex + 1].Cb1Data;
if (stageIndex == 0 && shaders[0] != null)
{
program = currentStage.Translate(translatorContexts[0]);
byte[] guestCodeA = shaders[0].Code;
byte[] cb1DataA = shaders[0].Cb1Data;
shaders[0] = new CachedShaderStage(null, guestCodeA, cb1DataA);
shaders[1] = new CachedShaderStage(program.Info, guestCode, cb1Data);
}
else
{
program = currentStage.Translate();
shaders[stageIndex + 1] = new CachedShaderStage(program.Info, guestCode, cb1Data);
}
if (program != null)
{
translatedStages.Add(program);
}
}
}
_compilationQueue.Enqueue(new ProgramCompilation(translatedStages.ToArray(), shaders, newSpecState, programIndex, isCompute: false));
}
/// <summary>
/// Recompiles a compute program from guest code.
/// </summary>
/// <param name="shaders">Shader stages</param>
/// <param name="specState">Specialization state</param>
/// <param name="programIndex">Program index</param>
private void RecompileComputeFromGuestCode(CachedShaderStage[] shaders, ShaderSpecializationState specState, int programIndex)
{
CachedShaderStage shader = shaders[0];
ResourceCounts counts = new ResourceCounts();
ShaderSpecializationState newSpecState = new ShaderSpecializationState(specState.ComputeState);
DiskCacheGpuAccessor gpuAccessor = new DiskCacheGpuAccessor(_context, shader.Code, shader.Cb1Data, specState, newSpecState, counts, 0);
TranslatorContext translatorContext = DecodeComputeShader(gpuAccessor, 0);
ShaderProgram program = translatorContext.Translate();
shaders[0] = new CachedShaderStage(program.Info, shader.Code, shader.Cb1Data);
_compilationQueue.Enqueue(new ProgramCompilation(new[] { program }, shaders, newSpecState, programIndex, isCompute: true));
}
/// <summary>
/// Signals that compilation of a program has been finished successfully,
/// or that it failed and guest recompilation has also been attempted.
/// </summary>
private void SignalCompiled()
{
_stateChangeCallback(ShaderCacheState.Loading, ++_compiledCount, _totalCount);
}
}
}