NVDRV: Implement sessions and initial implementation of SMMU
This commit is contained in:
parent
2f0418c101
commit
7a9d1ad2f8
|
@ -19,6 +19,8 @@
|
||||||
#include "core/hle/ipc.h"
|
#include "core/hle/ipc.h"
|
||||||
#include "core/hle/kernel/k_handle_table.h"
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
#include "core/hle/kernel/svc_common.h"
|
#include "core/hle/kernel/svc_common.h"
|
||||||
|
#include "core/hle/kernel/k_auto_object.h"
|
||||||
|
#include "core/hle/kernel/k_handle_table.h"
|
||||||
|
|
||||||
union Result;
|
union Result;
|
||||||
|
|
||||||
|
@ -41,6 +43,8 @@ class KernelCore;
|
||||||
class KHandleTable;
|
class KHandleTable;
|
||||||
class KProcess;
|
class KProcess;
|
||||||
class KServerSession;
|
class KServerSession;
|
||||||
|
template <typename T>
|
||||||
|
class KScopedAutoObject;
|
||||||
class KThread;
|
class KThread;
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
||||||
|
@ -373,6 +377,10 @@ public:
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Kernel::KScopedAutoObject<Kernel::KAutoObject> GetObjectFromHandle(u32 handle) {
|
||||||
|
return GetClientHandleTable().GetObjectForIpc(handle, thread);
|
||||||
|
}
|
||||||
|
|
||||||
[[nodiscard]] std::shared_ptr<SessionRequestManager> GetManager() const {
|
[[nodiscard]] std::shared_ptr<SessionRequestManager> GetManager() const {
|
||||||
return manager.lock();
|
return manager.lock();
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,19 +2,30 @@
|
||||||
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
|
// SPDX-FileCopyrightText: 2022 Skyline Team and Contributors
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <deque>
|
||||||
|
#include <mutex>
|
||||||
|
|
||||||
|
#include "core/hle/kernel/k_process.h"
|
||||||
#include "core/hle/service/nvdrv/core/container.h"
|
#include "core/hle/service/nvdrv/core/container.h"
|
||||||
#include "core/hle/service/nvdrv/core/nvmap.h"
|
#include "core/hle/service/nvdrv/core/nvmap.h"
|
||||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
#include "core/memory.h"
|
||||||
#include "video_core/host1x/host1x.h"
|
#include "video_core/host1x/host1x.h"
|
||||||
|
|
||||||
namespace Service::Nvidia::NvCore {
|
namespace Service::Nvidia::NvCore {
|
||||||
|
|
||||||
struct ContainerImpl {
|
struct ContainerImpl {
|
||||||
explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
|
explicit ContainerImpl(Tegra::Host1x::Host1x& host1x_)
|
||||||
: file{host1x_}, manager{host1x_}, device_file_data{} {}
|
: host1x{host1x_}, file{host1x_}, manager{host1x_}, device_file_data{} {}
|
||||||
|
Tegra::Host1x::Host1x& host1x;
|
||||||
NvMap file;
|
NvMap file;
|
||||||
SyncpointManager manager;
|
SyncpointManager manager;
|
||||||
Container::Host1xDeviceFileData device_file_data;
|
Container::Host1xDeviceFileData device_file_data;
|
||||||
|
std::deque<Session> sessions;
|
||||||
|
size_t new_ids{};
|
||||||
|
std::deque<size_t> id_pool;
|
||||||
|
std::mutex session_guard;
|
||||||
};
|
};
|
||||||
|
|
||||||
Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
||||||
|
@ -23,6 +34,37 @@ Container::Container(Tegra::Host1x::Host1x& host1x_) {
|
||||||
|
|
||||||
Container::~Container() = default;
|
Container::~Container() = default;
|
||||||
|
|
||||||
|
size_t Container::OpenSession(Kernel::KProcess* process) {
|
||||||
|
std::scoped_lock lk(impl->session_guard);
|
||||||
|
size_t new_id{};
|
||||||
|
auto* memory_interface = &process->GetMemory();
|
||||||
|
auto& smmu = impl->host1x.MemoryManager();
|
||||||
|
auto smmu_id = smmu.RegisterProcess(memory_interface);
|
||||||
|
if (!impl->id_pool.empty()) {
|
||||||
|
new_id = impl->id_pool.front();
|
||||||
|
impl->id_pool.pop_front();
|
||||||
|
impl->sessions[new_id] = Session{new_id, process, smmu_id};
|
||||||
|
} else {
|
||||||
|
impl->sessions.emplace_back(new_id, process, smmu_id);
|
||||||
|
new_id = impl->new_ids++;
|
||||||
|
}
|
||||||
|
LOG_CRITICAL(Debug, "Created Session {}", new_id);
|
||||||
|
return new_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
void Container::CloseSession(size_t id) {
|
||||||
|
std::scoped_lock lk(impl->session_guard);
|
||||||
|
auto& smmu = impl->host1x.MemoryManager();
|
||||||
|
smmu.UnregisterProcess(impl->sessions[id].smmu_id);
|
||||||
|
impl->id_pool.emplace_front(id);
|
||||||
|
LOG_CRITICAL(Debug, "Closed Session {}", id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Session* Container::GetSession(size_t id) {
|
||||||
|
std::atomic_thread_fence(std::memory_order_acquire);
|
||||||
|
return &impl->sessions[id];
|
||||||
|
}
|
||||||
|
|
||||||
NvMap& Container::GetNvMapFile() {
|
NvMap& Container::GetNvMapFile() {
|
||||||
return impl->file;
|
return impl->file;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,6 +10,10 @@
|
||||||
|
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
|
|
||||||
|
namespace Kernel {
|
||||||
|
class KProcess;
|
||||||
|
}
|
||||||
|
|
||||||
namespace Tegra::Host1x {
|
namespace Tegra::Host1x {
|
||||||
class Host1x;
|
class Host1x;
|
||||||
} // namespace Tegra::Host1x
|
} // namespace Tegra::Host1x
|
||||||
|
@ -21,11 +25,22 @@ class SyncpointManager;
|
||||||
|
|
||||||
struct ContainerImpl;
|
struct ContainerImpl;
|
||||||
|
|
||||||
|
struct Session {
|
||||||
|
size_t id;
|
||||||
|
Kernel::KProcess* process;
|
||||||
|
size_t smmu_id;
|
||||||
|
};
|
||||||
|
|
||||||
class Container {
|
class Container {
|
||||||
public:
|
public:
|
||||||
explicit Container(Tegra::Host1x::Host1x& host1x);
|
explicit Container(Tegra::Host1x::Host1x& host1x);
|
||||||
~Container();
|
~Container();
|
||||||
|
|
||||||
|
size_t OpenSession(Kernel::KProcess* process);
|
||||||
|
void CloseSession(size_t id);
|
||||||
|
|
||||||
|
Session* GetSession(size_t id);
|
||||||
|
|
||||||
NvMap& GetNvMapFile();
|
NvMap& GetNvMapFile();
|
||||||
|
|
||||||
const NvMap& GetNvMapFile() const;
|
const NvMap& GetNvMapFile() const;
|
||||||
|
|
|
@ -18,8 +18,6 @@ NvMap::Handle::Handle(u64 size_, Id id_)
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
|
NvResult NvMap::Handle::Alloc(Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress) {
|
||||||
std::scoped_lock lock(mutex);
|
|
||||||
|
|
||||||
// Handles cannot be allocated twice
|
// Handles cannot be allocated twice
|
||||||
if (allocated) {
|
if (allocated) {
|
||||||
return NvResult::AccessDenied;
|
return NvResult::AccessDenied;
|
||||||
|
@ -79,10 +77,11 @@ void NvMap::UnmapHandle(Handle& handle_description) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free and unmap the handle from the SMMU
|
// Free and unmap the handle from the SMMU
|
||||||
host1x.MemoryManager().Unmap(static_cast<GPUVAddr>(handle_description.pin_virt_address),
|
auto& smmu = host1x.MemoryManager();
|
||||||
|
smmu.Unmap(static_cast<DAddr>(handle_description.pin_virt_address),
|
||||||
handle_description.aligned_size);
|
handle_description.aligned_size);
|
||||||
host1x.Allocator().Free(handle_description.pin_virt_address,
|
smmu.Free(handle_description.pin_virt_address,
|
||||||
static_cast<u32>(handle_description.aligned_size));
|
static_cast<size_t>(handle_description.aligned_size));
|
||||||
handle_description.pin_virt_address = 0;
|
handle_description.pin_virt_address = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -133,7 +132,32 @@ VAddr NvMap::GetHandleAddress(Handle::Id handle) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
NvResult NvMap::AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id) {
|
||||||
|
auto handle_description{GetHandle(handle)};
|
||||||
|
if (!handle_description) [[unlikely]] {
|
||||||
|
return NvResult::BadParameter;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (handle_description->allocated) [[unlikely]] {
|
||||||
|
return NvResult::InsufficientMemory;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::scoped_lock lock(handle_description->mutex);
|
||||||
|
NvResult result = handle_description->Alloc(pFlags, pAlign, pKind, pAddress);
|
||||||
|
if (result != NvResult::Success) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
auto& smmu = host1x.MemoryManager();
|
||||||
|
size_t total_size = static_cast<size_t>(handle_description->aligned_size);
|
||||||
|
handle_description->d_address = smmu.Allocate(total_size);
|
||||||
|
if (handle_description->d_address == 0) {
|
||||||
|
return NvResult::InsufficientMemory;
|
||||||
|
}
|
||||||
|
smmu.Map(handle_description->d_address, handle_description->address, total_size, session_id);
|
||||||
|
return NvResult::Success;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 NvMap::PinHandle(NvMap::Handle::Id handle, size_t session_id) {
|
||||||
auto handle_description{GetHandle(handle)};
|
auto handle_description{GetHandle(handle)};
|
||||||
if (!handle_description) [[unlikely]] {
|
if (!handle_description) [[unlikely]] {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -157,11 +181,10 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If not then allocate some space and map it
|
// If not then allocate some space and map it
|
||||||
u32 address{};
|
DAddr address{};
|
||||||
auto& smmu_allocator = host1x.Allocator();
|
auto& smmu = host1x.MemoryManager();
|
||||||
auto& smmu_memory_manager = host1x.MemoryManager();
|
while ((address = smmu.AllocatePinned(
|
||||||
while ((address = smmu_allocator.Allocate(
|
static_cast<size_t>(handle_description->aligned_size))) == 0) {
|
||||||
static_cast<u32>(handle_description->aligned_size))) == 0) {
|
|
||||||
// Free handles until the allocation succeeds
|
// Free handles until the allocation succeeds
|
||||||
std::scoped_lock queueLock(unmap_queue_lock);
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
if (auto freeHandleDesc{unmap_queue.front()}) {
|
if (auto freeHandleDesc{unmap_queue.front()}) {
|
||||||
|
@ -175,9 +198,9 @@ u32 NvMap::PinHandle(NvMap::Handle::Id handle) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
smmu_memory_manager.Map(static_cast<GPUVAddr>(address), handle_description->address,
|
smmu.Map(address, handle_description->address, handle_description->aligned_size,
|
||||||
handle_description->aligned_size);
|
session_id);
|
||||||
handle_description->pin_virt_address = address;
|
handle_description->pin_virt_address = static_cast<u32>(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
handle_description->pins++;
|
handle_description->pins++;
|
||||||
|
@ -236,6 +259,11 @@ std::optional<NvMap::FreeInfo> NvMap::FreeHandle(Handle::Id handle, bool interna
|
||||||
std::scoped_lock queueLock(unmap_queue_lock);
|
std::scoped_lock queueLock(unmap_queue_lock);
|
||||||
UnmapHandle(*handle_description);
|
UnmapHandle(*handle_description);
|
||||||
}
|
}
|
||||||
|
if (handle_description->allocated) {
|
||||||
|
auto& smmu = host1x.MemoryManager();
|
||||||
|
smmu.Free(handle_description->d_address, handle_description->aligned_size);
|
||||||
|
smmu.Unmap(handle_description->d_address, handle_description->aligned_size);
|
||||||
|
}
|
||||||
|
|
||||||
handle_description->pins = 0;
|
handle_description->pins = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,9 @@ public:
|
||||||
} flags{};
|
} flags{};
|
||||||
static_assert(sizeof(Flags) == sizeof(u32));
|
static_assert(sizeof(Flags) == sizeof(u32));
|
||||||
|
|
||||||
u64 address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
VAddr address{}; //!< The memory location in the guest's AS that this handle corresponds to,
|
||||||
|
//!< this can also be in the nvdrv tmem
|
||||||
|
DAddr d_address{}; //!< The memory location in the device's AS that this handle corresponds to,
|
||||||
//!< this can also be in the nvdrv tmem
|
//!< this can also be in the nvdrv tmem
|
||||||
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
bool is_shared_mem_mapped{}; //!< If this nvmap has been mapped with the MapSharedMem IPC
|
||||||
//!< call
|
//!< call
|
||||||
|
@ -125,7 +127,15 @@ public:
|
||||||
* number of calls to `UnpinHandle`
|
* number of calls to `UnpinHandle`
|
||||||
* @return The SMMU virtual address that the handle has been mapped to
|
* @return The SMMU virtual address that the handle has been mapped to
|
||||||
*/
|
*/
|
||||||
u32 PinHandle(Handle::Id handle);
|
u32 PinHandle(Handle::Id handle, size_t session_id);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Maps a handle into the SMMU address space
|
||||||
|
* @note This operation is refcounted, the number of calls to this must eventually match the
|
||||||
|
* number of calls to `UnpinHandle`
|
||||||
|
* @return The SMMU virtual address that the handle has been mapped to
|
||||||
|
*/
|
||||||
|
NvResult AllocateHandle(Handle::Id handle, Handle::Flags pFlags, u32 pAlign, u8 pKind, u64 pAddress, size_t session_id);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
|
* @brief When this has been called an equal number of times to `PinHandle` for the supplied
|
||||||
|
|
|
@ -62,7 +62,7 @@ public:
|
||||||
* Called once a device is opened
|
* Called once a device is opened
|
||||||
* @param fd The device fd
|
* @param fd The device fd
|
||||||
*/
|
*/
|
||||||
virtual void OnOpen(DeviceFD fd) = 0;
|
virtual void OnOpen(size_t session_id, DeviceFD fd) = 0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called once a device is closed
|
* Called once a device is closed
|
||||||
|
|
|
@ -35,7 +35,7 @@ NvResult nvdisp_disp0::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvdisp_disp0::OnOpen(DeviceFD fd) {}
|
void nvdisp_disp0::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
void nvdisp_disp0::OnClose(DeviceFD fd) {}
|
void nvdisp_disp0::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
void nvdisp_disp0::flip(u32 buffer_handle, u32 offset, android::PixelFormat format, u32 width,
|
||||||
|
|
|
@ -32,7 +32,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
/// Performs a screen flip, drawing the buffer pointed to by the handle.
|
||||||
|
|
|
@ -86,7 +86,7 @@ NvResult nvhost_as_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> i
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_as_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_as_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_as_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
|
NvResult nvhost_as_gpu::AllocAsEx(IoctlAllocAsEx& params) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
|
@ -76,7 +76,7 @@ NvResult nvhost_ctrl::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inp
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_ctrl::OnOpen(DeviceFD fd) {}
|
void nvhost_ctrl::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
|
|
||||||
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
void nvhost_ctrl::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
|
@ -82,7 +82,7 @@ NvResult nvhost_ctrl_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8>
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_ctrl_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_ctrl_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_ctrl_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
|
NvResult nvhost_ctrl_gpu::GetCharacteristics1(IoctlCharacteristics& params) {
|
||||||
|
|
|
@ -28,7 +28,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
|
@ -120,7 +120,7 @@ NvResult nvhost_gpu::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_gpu::OnOpen(DeviceFD fd) {}
|
void nvhost_gpu::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
void nvhost_gpu::OnClose(DeviceFD fd) {}
|
void nvhost_gpu::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
NvResult nvhost_gpu::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||||
|
|
|
@ -47,7 +47,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
Kernel::KEvent* QueryEvent(u32 event_id) override;
|
||||||
|
|
|
@ -35,7 +35,7 @@ NvResult nvhost_nvdec::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||||
case 0x7:
|
case 0x7:
|
||||||
return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
|
return WrapFixed(this, &nvhost_nvdec::SetSubmitTimeout, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output);
|
return WrapFixedVariable(this, &nvhost_nvdec::MapBuffer, input, output, fd);
|
||||||
case 0xa:
|
case 0xa:
|
||||||
return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
|
return WrapFixedVariable(this, &nvhost_nvdec::UnmapBuffer, input, output);
|
||||||
default:
|
default:
|
||||||
|
@ -68,9 +68,10 @@ NvResult nvhost_nvdec::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_nvdec::OnOpen(DeviceFD fd) {
|
void nvhost_nvdec::OnOpen(size_t session_id, DeviceFD fd) {
|
||||||
LOG_INFO(Service_NVDRV, "NVDEC video stream started");
|
LOG_INFO(Service_NVDRV, "NVDEC video stream started");
|
||||||
system.SetNVDECActive(true);
|
system.SetNVDECActive(true);
|
||||||
|
sessions[fd] = session_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||||
|
@ -81,6 +82,10 @@ void nvhost_nvdec::OnClose(DeviceFD fd) {
|
||||||
system.GPU().ClearCdmaInstance(iter->second);
|
system.GPU().ClearCdmaInstance(iter->second);
|
||||||
}
|
}
|
||||||
system.SetNVDECActive(false);
|
system.SetNVDECActive(false);
|
||||||
|
auto it = sessions.find(fd);
|
||||||
|
if (it != sessions.end()) {
|
||||||
|
sessions.erase(it);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -20,7 +20,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -133,10 +133,10 @@ NvResult nvhost_nvdec_common::GetWaitbase(IoctlGetWaitbase& params) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries) {
|
NvResult nvhost_nvdec_common::MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd) {
|
||||||
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
|
const size_t num_entries = std::min(params.num_entries, static_cast<u32>(entries.size()));
|
||||||
for (size_t i = 0; i < num_entries; i++) {
|
for (size_t i = 0; i < num_entries; i++) {
|
||||||
entries[i].map_address = nvmap.PinHandle(entries[i].map_handle);
|
entries[i].map_address = nvmap.PinHandle(entries[i].map_handle, sessions[fd]);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
|
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "common/swap.h"
|
#include "common/swap.h"
|
||||||
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
#include "core/hle/service/nvdrv/core/syncpoint_manager.h"
|
||||||
|
@ -111,7 +113,7 @@ protected:
|
||||||
NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
|
NvResult Submit(IoctlSubmit& params, std::span<u8> input, DeviceFD fd);
|
||||||
NvResult GetSyncpoint(IoctlGetSyncpoint& params);
|
NvResult GetSyncpoint(IoctlGetSyncpoint& params);
|
||||||
NvResult GetWaitbase(IoctlGetWaitbase& params);
|
NvResult GetWaitbase(IoctlGetWaitbase& params);
|
||||||
NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
NvResult MapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries, DeviceFD fd);
|
||||||
NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
NvResult UnmapBuffer(IoctlMapBuffer& params, std::span<MapBufferEntry> entries);
|
||||||
NvResult SetSubmitTimeout(u32 timeout);
|
NvResult SetSubmitTimeout(u32 timeout);
|
||||||
|
|
||||||
|
@ -125,6 +127,7 @@ protected:
|
||||||
NvCore::NvMap& nvmap;
|
NvCore::NvMap& nvmap;
|
||||||
NvCore::ChannelType channel_type;
|
NvCore::ChannelType channel_type;
|
||||||
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
std::array<u32, MaxSyncPoints> device_syncpoints{};
|
||||||
|
std::unordered_map<DeviceFD, size_t> sessions;
|
||||||
};
|
};
|
||||||
}; // namespace Devices
|
}; // namespace Devices
|
||||||
} // namespace Service::Nvidia
|
} // namespace Service::Nvidia
|
||||||
|
|
|
@ -44,7 +44,7 @@ NvResult nvhost_nvjpg::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> in
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_nvjpg::OnOpen(DeviceFD fd) {}
|
void nvhost_nvjpg::OnOpen(size_t session_id, DeviceFD fd) {}
|
||||||
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
|
void nvhost_nvjpg::OnClose(DeviceFD fd) {}
|
||||||
|
|
||||||
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
NvResult nvhost_nvjpg::SetNVMAPfd(IoctlSetNvmapFD& params) {
|
||||||
|
|
|
@ -22,7 +22,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -33,7 +33,7 @@ NvResult nvhost_vic::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
|
return WrapFixed(this, &nvhost_vic::GetWaitbase, input, output);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output);
|
return WrapFixedVariable(this, &nvhost_vic::MapBuffer, input, output, fd);
|
||||||
case 0xa:
|
case 0xa:
|
||||||
return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
|
return WrapFixedVariable(this, &nvhost_vic::UnmapBuffer, input, output);
|
||||||
default:
|
default:
|
||||||
|
@ -68,7 +68,9 @@ NvResult nvhost_vic::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> inpu
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvhost_vic::OnOpen(DeviceFD fd) {}
|
void nvhost_vic::OnOpen(size_t session_id, DeviceFD fd) {
|
||||||
|
sessions[fd] = session_id;
|
||||||
|
}
|
||||||
|
|
||||||
void nvhost_vic::OnClose(DeviceFD fd) {
|
void nvhost_vic::OnClose(DeviceFD fd) {
|
||||||
auto& host1x_file = core.Host1xDeviceFile();
|
auto& host1x_file = core.Host1xDeviceFile();
|
||||||
|
@ -76,6 +78,10 @@ void nvhost_vic::OnClose(DeviceFD fd) {
|
||||||
if (iter != host1x_file.fd_to_id.end()) {
|
if (iter != host1x_file.fd_to_id.end()) {
|
||||||
system.GPU().ClearCdmaInstance(iter->second);
|
system.GPU().ClearCdmaInstance(iter->second);
|
||||||
}
|
}
|
||||||
|
auto it = sessions.find(fd);
|
||||||
|
if (it != sessions.end()) {
|
||||||
|
sessions.erase(it);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -19,7 +19,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
};
|
};
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -36,9 +36,9 @@ NvResult nvmap::Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input,
|
||||||
case 0x3:
|
case 0x3:
|
||||||
return WrapFixed(this, &nvmap::IocFromId, input, output);
|
return WrapFixed(this, &nvmap::IocFromId, input, output);
|
||||||
case 0x4:
|
case 0x4:
|
||||||
return WrapFixed(this, &nvmap::IocAlloc, input, output);
|
return WrapFixed(this, &nvmap::IocAlloc, input, output, fd);
|
||||||
case 0x5:
|
case 0x5:
|
||||||
return WrapFixed(this, &nvmap::IocFree, input, output);
|
return WrapFixed(this, &nvmap::IocFree, input, output, fd);
|
||||||
case 0x9:
|
case 0x9:
|
||||||
return WrapFixed(this, &nvmap::IocParam, input, output);
|
return WrapFixed(this, &nvmap::IocParam, input, output);
|
||||||
case 0xe:
|
case 0xe:
|
||||||
|
@ -67,8 +67,15 @@ NvResult nvmap::Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, st
|
||||||
return NvResult::NotImplemented;
|
return NvResult::NotImplemented;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nvmap::OnOpen(DeviceFD fd) {}
|
void nvmap::OnOpen(size_t session_id, DeviceFD fd) {
|
||||||
void nvmap::OnClose(DeviceFD fd) {}
|
sessions[fd] = session_id;
|
||||||
|
}
|
||||||
|
void nvmap::OnClose(DeviceFD fd) {
|
||||||
|
auto it = sessions.find(fd);
|
||||||
|
if (it != sessions.end()) {
|
||||||
|
sessions.erase(it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocCreate(IocCreateParams& params) {
|
NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
LOG_DEBUG(Service_NVDRV, "called, size=0x{:08X}", params.size);
|
||||||
|
@ -87,7 +94,7 @@ NvResult nvmap::IocCreate(IocCreateParams& params) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocAlloc(IocAllocParams& params) {
|
NvResult nvmap::IocAlloc(IocAllocParams& params, DeviceFD fd) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
LOG_DEBUG(Service_NVDRV, "called, addr={:X}", params.address);
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
|
@ -116,15 +123,15 @@ NvResult nvmap::IocAlloc(IocAllocParams& params) {
|
||||||
return NvResult::InsufficientMemory;
|
return NvResult::InsufficientMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto result =
|
const auto result = file.AllocateHandle(params.handle, params.flags, params.align, params.kind,
|
||||||
handle_description->Alloc(params.flags, params.align, params.kind, params.address);
|
params.address, sessions[fd]);
|
||||||
if (result != NvResult::Success) {
|
if (result != NvResult::Success) {
|
||||||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
bool is_out_io{};
|
bool is_out_io{};
|
||||||
ASSERT(system.ApplicationProcess()
|
auto process = container.GetSession(sessions[fd])->process;
|
||||||
->GetPageTable()
|
ASSERT(process->GetPageTable()
|
||||||
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
|
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
|
||||||
handle_description->size,
|
handle_description->size,
|
||||||
Kernel::KMemoryPermission::None, true, false)
|
Kernel::KMemoryPermission::None, true, false)
|
||||||
|
@ -224,7 +231,7 @@ NvResult nvmap::IocParam(IocParamParams& params) {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
NvResult nvmap::IocFree(IocFreeParams& params) {
|
NvResult nvmap::IocFree(IocFreeParams& params, DeviceFD fd) {
|
||||||
LOG_DEBUG(Service_NVDRV, "called");
|
LOG_DEBUG(Service_NVDRV, "called");
|
||||||
|
|
||||||
if (!params.handle) {
|
if (!params.handle) {
|
||||||
|
@ -233,9 +240,9 @@ NvResult nvmap::IocFree(IocFreeParams& params) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
if (auto freeInfo{file.FreeHandle(params.handle, false)}) {
|
||||||
|
auto process = container.GetSession(sessions[fd])->process;
|
||||||
if (freeInfo->can_unlock) {
|
if (freeInfo->can_unlock) {
|
||||||
ASSERT(system.ApplicationProcess()
|
ASSERT(process->GetPageTable()
|
||||||
->GetPageTable()
|
|
||||||
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
.UnlockForDeviceAddressSpace(freeInfo->address, freeInfo->size)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ public:
|
||||||
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
NvResult Ioctl3(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output,
|
||||||
std::span<u8> inline_output) override;
|
std::span<u8> inline_output) override;
|
||||||
|
|
||||||
void OnOpen(DeviceFD fd) override;
|
void OnOpen(size_t session_id, DeviceFD fd) override;
|
||||||
void OnClose(DeviceFD fd) override;
|
void OnClose(DeviceFD fd) override;
|
||||||
|
|
||||||
enum class HandleParameterType : u32_le {
|
enum class HandleParameterType : u32_le {
|
||||||
|
@ -100,11 +100,11 @@ public:
|
||||||
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
static_assert(sizeof(IocGetIdParams) == 8, "IocGetIdParams has wrong size");
|
||||||
|
|
||||||
NvResult IocCreate(IocCreateParams& params);
|
NvResult IocCreate(IocCreateParams& params);
|
||||||
NvResult IocAlloc(IocAllocParams& params);
|
NvResult IocAlloc(IocAllocParams& params, DeviceFD fd);
|
||||||
NvResult IocGetId(IocGetIdParams& params);
|
NvResult IocGetId(IocGetIdParams& params);
|
||||||
NvResult IocFromId(IocFromIdParams& params);
|
NvResult IocFromId(IocFromIdParams& params);
|
||||||
NvResult IocParam(IocParamParams& params);
|
NvResult IocParam(IocParamParams& params);
|
||||||
NvResult IocFree(IocFreeParams& params);
|
NvResult IocFree(IocFreeParams& params, DeviceFD fd);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/// Id to use for the next handle that is created.
|
/// Id to use for the next handle that is created.
|
||||||
|
@ -115,6 +115,7 @@ private:
|
||||||
|
|
||||||
NvCore::Container& container;
|
NvCore::Container& container;
|
||||||
NvCore::NvMap& file;
|
NvCore::NvMap& file;
|
||||||
|
std::unordered_map<DeviceFD, size_t> sessions;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvidia::Devices
|
} // namespace Service::Nvidia::Devices
|
||||||
|
|
|
@ -45,13 +45,22 @@ void EventInterface::FreeEvent(Kernel::KEvent* event) {
|
||||||
void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
|
void LoopProcess(Nvnflinger::Nvnflinger& nvnflinger, Core::System& system) {
|
||||||
auto server_manager = std::make_unique<ServerManager>(system);
|
auto server_manager = std::make_unique<ServerManager>(system);
|
||||||
auto module = std::make_shared<Module>(system);
|
auto module = std::make_shared<Module>(system);
|
||||||
server_manager->RegisterNamedService("nvdrv", std::make_shared<NVDRV>(system, module, "nvdrv"));
|
const auto NvdrvInterfaceFactoryForApplication = [&, module] {
|
||||||
server_manager->RegisterNamedService("nvdrv:a",
|
return std::make_shared<NVDRV>(system, module, "nvdrv");
|
||||||
std::make_shared<NVDRV>(system, module, "nvdrv:a"));
|
};
|
||||||
server_manager->RegisterNamedService("nvdrv:s",
|
const auto NvdrvInterfaceFactoryForApplets = [&, module] {
|
||||||
std::make_shared<NVDRV>(system, module, "nvdrv:s"));
|
return std::make_shared<NVDRV>(system, module, "nvdrv:a");
|
||||||
server_manager->RegisterNamedService("nvdrv:t",
|
};
|
||||||
std::make_shared<NVDRV>(system, module, "nvdrv:t"));
|
const auto NvdrvInterfaceFactoryForSysmodules = [&, module] {
|
||||||
|
return std::make_shared<NVDRV>(system, module, "nvdrv:a");
|
||||||
|
};
|
||||||
|
const auto NvdrvInterfaceFactory = [&, module] {
|
||||||
|
return std::make_shared<NVDRV>(system, module, "nvdrv:t");
|
||||||
|
};
|
||||||
|
server_manager->RegisterNamedService("nvdrv", NvdrvInterfaceFactoryForApplication);
|
||||||
|
server_manager->RegisterNamedService("nvdrv:a", NvdrvInterfaceFactoryForApplets);
|
||||||
|
server_manager->RegisterNamedService("nvdrv:s", NvdrvInterfaceFactoryForSysmodules);
|
||||||
|
server_manager->RegisterNamedService("nvdrv:t", NvdrvInterfaceFactory);
|
||||||
server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
|
server_manager->RegisterNamedService("nvmemp", std::make_shared<NVMEMP>(system));
|
||||||
nvnflinger.SetNVDrvInstance(module);
|
nvnflinger.SetNVDrvInstance(module);
|
||||||
ServerManager::RunServer(std::move(server_manager));
|
ServerManager::RunServer(std::move(server_manager));
|
||||||
|
@ -113,7 +122,7 @@ NvResult Module::VerifyFD(DeviceFD fd) const {
|
||||||
return NvResult::Success;
|
return NvResult::Success;
|
||||||
}
|
}
|
||||||
|
|
||||||
DeviceFD Module::Open(const std::string& device_name) {
|
DeviceFD Module::Open(const std::string& device_name, size_t session_id) {
|
||||||
auto it = builders.find(device_name);
|
auto it = builders.find(device_name);
|
||||||
if (it == builders.end()) {
|
if (it == builders.end()) {
|
||||||
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
LOG_ERROR(Service_NVDRV, "Trying to open unknown device {}", device_name);
|
||||||
|
@ -124,7 +133,7 @@ DeviceFD Module::Open(const std::string& device_name) {
|
||||||
auto& builder = it->second;
|
auto& builder = it->second;
|
||||||
auto device = builder(fd)->second;
|
auto device = builder(fd)->second;
|
||||||
|
|
||||||
device->OnOpen(fd);
|
device->OnOpen(session_id, fd);
|
||||||
|
|
||||||
return fd;
|
return fd;
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,7 @@ public:
|
||||||
NvResult VerifyFD(DeviceFD fd) const;
|
NvResult VerifyFD(DeviceFD fd) const;
|
||||||
|
|
||||||
/// Opens a device node and returns a file descriptor to it.
|
/// Opens a device node and returns a file descriptor to it.
|
||||||
DeviceFD Open(const std::string& device_name);
|
DeviceFD Open(const std::string& device_name, size_t session_id);
|
||||||
|
|
||||||
/// Sends an ioctl command to the specified file descriptor.
|
/// Sends an ioctl command to the specified file descriptor.
|
||||||
NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
|
NvResult Ioctl1(DeviceFD fd, Ioctl command, std::span<const u8> input, std::span<u8> output);
|
||||||
|
@ -93,6 +93,10 @@ public:
|
||||||
|
|
||||||
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
NvResult QueryEvent(DeviceFD fd, u32 event_id, Kernel::KEvent*& event);
|
||||||
|
|
||||||
|
NvCore::Container& GetContainer() {
|
||||||
|
return container;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class EventInterface;
|
friend class EventInterface;
|
||||||
friend class Service::Nvnflinger::Nvnflinger;
|
friend class Service::Nvnflinger::Nvnflinger;
|
||||||
|
|
|
@ -3,14 +3,18 @@
|
||||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||||
|
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "common/scope_exit.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/hle/kernel/k_event.h"
|
#include "core/hle/kernel/k_event.h"
|
||||||
|
#include "core/hle/kernel/k_process.h"
|
||||||
#include "core/hle/kernel/k_readable_event.h"
|
#include "core/hle/kernel/k_readable_event.h"
|
||||||
#include "core/hle/service/ipc_helpers.h"
|
#include "core/hle/service/ipc_helpers.h"
|
||||||
#include "core/hle/service/nvdrv/nvdata.h"
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv.h"
|
#include "core/hle/service/nvdrv/nvdrv.h"
|
||||||
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
#include "core/hle/service/nvdrv/nvdrv_interface.h"
|
||||||
|
|
||||||
|
#pragma optimize("", off)
|
||||||
|
|
||||||
namespace Service::Nvidia {
|
namespace Service::Nvidia {
|
||||||
|
|
||||||
void NVDRV::Open(HLERequestContext& ctx) {
|
void NVDRV::Open(HLERequestContext& ctx) {
|
||||||
|
@ -37,7 +41,7 @@ void NVDRV::Open(HLERequestContext& ctx) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
DeviceFD fd = nvdrv->Open(device_name);
|
DeviceFD fd = nvdrv->Open(device_name, session_id);
|
||||||
|
|
||||||
rb.Push<DeviceFD>(fd);
|
rb.Push<DeviceFD>(fd);
|
||||||
rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
|
rb.PushEnum(fd != INVALID_NVDRV_FD ? NvResult::Success : NvResult::FileOperationFailed);
|
||||||
|
@ -150,12 +154,29 @@ void NVDRV::Close(HLERequestContext& ctx) {
|
||||||
|
|
||||||
void NVDRV::Initialize(HLERequestContext& ctx) {
|
void NVDRV::Initialize(HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
LOG_WARNING(Service_NVDRV, "(STUBBED) called");
|
||||||
|
|
||||||
is_initialized = true;
|
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 3};
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
|
SCOPE_EXIT({
|
||||||
rb.Push(ResultSuccess);
|
rb.Push(ResultSuccess);
|
||||||
rb.PushEnum(NvResult::Success);
|
rb.PushEnum(NvResult::Success);
|
||||||
|
});
|
||||||
|
|
||||||
|
if (is_initialized) {
|
||||||
|
// No need to initialize again
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
IPC::RequestParser rp{ctx};
|
||||||
|
const auto process_handle{ctx.GetCopyHandle(0)};
|
||||||
|
// The transfer memory is lent to nvdrv as a work buffer since nvdrv is
|
||||||
|
// unable to allocate as much memory on its own. For HLE it's unnecessary to handle it
|
||||||
|
[[maybe_unused]] const auto transfer_memory_handle{ctx.GetCopyHandle(1)};
|
||||||
|
[[maybe_unused]] const auto transfer_memory_size = rp.Pop<u32>();
|
||||||
|
|
||||||
|
auto& container = nvdrv->GetContainer();
|
||||||
|
auto process = ctx.GetObjectFromHandle(process_handle);
|
||||||
|
session_id = container.OpenSession(process->DynamicCast<Kernel::KProcess*>());
|
||||||
|
|
||||||
|
is_initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void NVDRV::QueryEvent(HLERequestContext& ctx) {
|
void NVDRV::QueryEvent(HLERequestContext& ctx) {
|
||||||
|
@ -242,6 +263,9 @@ NVDRV::NVDRV(Core::System& system_, std::shared_ptr<Module> nvdrv_, const char*
|
||||||
RegisterHandlers(functions);
|
RegisterHandlers(functions);
|
||||||
}
|
}
|
||||||
|
|
||||||
NVDRV::~NVDRV() = default;
|
NVDRV::~NVDRV() {
|
||||||
|
auto& container = nvdrv->GetContainer();
|
||||||
|
container.CloseSession(session_id);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Service::Nvidia
|
} // namespace Service::Nvidia
|
||||||
|
|
|
@ -35,6 +35,7 @@ private:
|
||||||
|
|
||||||
u64 pid{};
|
u64 pid{};
|
||||||
bool is_initialized{};
|
bool is_initialized{};
|
||||||
|
size_t session_id{};
|
||||||
Common::ScratchBuffer<u8> output_buffer;
|
Common::ScratchBuffer<u8> output_buffer;
|
||||||
Common::ScratchBuffer<u8> inline_output_buffer;
|
Common::ScratchBuffer<u8> inline_output_buffer;
|
||||||
};
|
};
|
||||||
|
|
|
@ -87,19 +87,19 @@ Result CreateNvMapHandle(u32* out_nv_map_handle, Nvidia::Devices::nvmap& nvmap,
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle) {
|
Result FreeNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Nvidia::DeviceFD nvmap_fd) {
|
||||||
// Free the handle.
|
// Free the handle.
|
||||||
Nvidia::Devices::nvmap::IocFreeParams free_params{
|
Nvidia::Devices::nvmap::IocFreeParams free_params{
|
||||||
.handle = handle,
|
.handle = handle,
|
||||||
};
|
};
|
||||||
R_UNLESS(nvmap.IocFree(free_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
R_UNLESS(nvmap.IocFree(free_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
|
Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::ProcessAddress buffer,
|
||||||
u32 size) {
|
u32 size, Nvidia::DeviceFD nvmap_fd) {
|
||||||
// Assign the allocated memory to the handle.
|
// Assign the allocated memory to the handle.
|
||||||
Nvidia::Devices::nvmap::IocAllocParams alloc_params{
|
Nvidia::Devices::nvmap::IocAllocParams alloc_params{
|
||||||
.handle = handle,
|
.handle = handle,
|
||||||
|
@ -109,16 +109,15 @@ Result AllocNvMapHandle(Nvidia::Devices::nvmap& nvmap, u32 handle, Common::Proce
|
||||||
.kind = 0,
|
.kind = 0,
|
||||||
.address = GetInteger(buffer),
|
.address = GetInteger(buffer),
|
||||||
};
|
};
|
||||||
R_UNLESS(nvmap.IocAlloc(alloc_params) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
R_UNLESS(nvmap.IocAlloc(alloc_params, nvmap_fd) == Nvidia::NvResult::Success, VI::ResultOperationFailed);
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
|
Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv, Nvidia::DeviceFD nvmap_fd,
|
||||||
Common::ProcessAddress buffer, u32 size) {
|
Common::ProcessAddress buffer, u32 size) {
|
||||||
// Get the nvmap device.
|
// Get the nvmap device.
|
||||||
auto nvmap_fd = nvdrv.Open("/dev/nvmap");
|
|
||||||
auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
|
auto nvmap = nvdrv.GetDevice<Nvidia::Devices::nvmap>(nvmap_fd);
|
||||||
ASSERT(nvmap != nullptr);
|
ASSERT(nvmap != nullptr);
|
||||||
|
|
||||||
|
@ -127,11 +126,11 @@ Result AllocateHandleForBuffer(u32* out_handle, Nvidia::Module& nvdrv,
|
||||||
|
|
||||||
// Ensure we maintain a clean state on failure.
|
// Ensure we maintain a clean state on failure.
|
||||||
ON_RESULT_FAILURE {
|
ON_RESULT_FAILURE {
|
||||||
ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle)));
|
ASSERT(R_SUCCEEDED(FreeNvMapHandle(*nvmap, *out_handle, nvmap_fd)));
|
||||||
};
|
};
|
||||||
|
|
||||||
// Assign the allocated memory to the handle.
|
// Assign the allocated memory to the handle.
|
||||||
R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size));
|
R_RETURN(AllocNvMapHandle(*nvmap, *out_handle, buffer, size, nvmap_fd));
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
|
constexpr auto SharedBufferBlockLinearFormat = android::PixelFormat::Rgba8888;
|
||||||
|
@ -197,8 +196,12 @@ Result FbShareBufferManager::Initialize(u64* out_buffer_id, u64* out_layer_id, u
|
||||||
std::addressof(m_buffer_page_group), m_system,
|
std::addressof(m_buffer_page_group), m_system,
|
||||||
SharedBufferSize));
|
SharedBufferSize));
|
||||||
|
|
||||||
|
auto& container = m_nvdrv->GetContainer();
|
||||||
|
m_session_id = container.OpenSession(m_system.ApplicationProcess());
|
||||||
|
m_nvmap_fd = m_nvdrv->Open("/dev/nvmap", m_session_id);
|
||||||
|
|
||||||
// Create an nvmap handle for the buffer and assign the memory to it.
|
// Create an nvmap handle for the buffer and assign the memory to it.
|
||||||
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, map_address,
|
R_TRY(AllocateHandleForBuffer(std::addressof(m_buffer_nvmap_handle), *m_nvdrv, m_nvmap_fd, map_address,
|
||||||
SharedBufferSize));
|
SharedBufferSize));
|
||||||
|
|
||||||
// Record the display id.
|
// Record the display id.
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include "common/math_util.h"
|
#include "common/math_util.h"
|
||||||
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
#include "core/hle/service/nvnflinger/nvnflinger.h"
|
||||||
#include "core/hle/service/nvnflinger/ui/fence.h"
|
#include "core/hle/service/nvnflinger/ui/fence.h"
|
||||||
|
#include "core/hle/service/nvdrv/nvdata.h"
|
||||||
|
|
||||||
namespace Kernel {
|
namespace Kernel {
|
||||||
class KPageGroup;
|
class KPageGroup;
|
||||||
|
@ -53,13 +54,15 @@ private:
|
||||||
u64 m_layer_id = 0;
|
u64 m_layer_id = 0;
|
||||||
u32 m_buffer_nvmap_handle = 0;
|
u32 m_buffer_nvmap_handle = 0;
|
||||||
SharedMemoryPoolLayout m_pool_layout = {};
|
SharedMemoryPoolLayout m_pool_layout = {};
|
||||||
|
Nvidia::DeviceFD m_nvmap_fd = {};
|
||||||
|
size_t m_session_id = {};
|
||||||
std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
|
std::unique_ptr<Kernel::KPageGroup> m_buffer_page_group;
|
||||||
|
|
||||||
std::mutex m_guard;
|
std::mutex m_guard;
|
||||||
Core::System& m_system;
|
Core::System& m_system;
|
||||||
Nvnflinger& m_flinger;
|
Nvnflinger& m_flinger;
|
||||||
std::shared_ptr<Nvidia::Module> m_nvdrv;
|
std::shared_ptr<Nvidia::Module> m_nvdrv;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Service::Nvnflinger
|
} // namespace Service::Nvnflinger
|
||||||
|
|
|
@ -126,7 +126,7 @@ void Nvnflinger::ShutdownLayers() {
|
||||||
|
|
||||||
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
void Nvnflinger::SetNVDrvInstance(std::shared_ptr<Nvidia::Module> instance) {
|
||||||
nvdrv = std::move(instance);
|
nvdrv = std::move(instance);
|
||||||
disp_fd = nvdrv->Open("/dev/nvdisp_disp0");
|
disp_fd = nvdrv->Open("/dev/nvdisp_disp0", 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
|
std::optional<u64> Nvnflinger::OpenDisplay(std::string_view name) {
|
||||||
|
|
|
@ -71,6 +71,8 @@ add_library(video_core STATIC
|
||||||
host1x/ffmpeg/ffmpeg.h
|
host1x/ffmpeg/ffmpeg.h
|
||||||
host1x/control.cpp
|
host1x/control.cpp
|
||||||
host1x/control.h
|
host1x/control.h
|
||||||
|
host1x/gpu_device_memory_manager.cpp
|
||||||
|
host1x/gpu_device_memory_manager.h
|
||||||
host1x/host1x.cpp
|
host1x/host1x.cpp
|
||||||
host1x/host1x.h
|
host1x/host1x.h
|
||||||
host1x/nvdec.cpp
|
host1x/nvdec.cpp
|
||||||
|
|
|
@ -85,7 +85,7 @@ struct GPU::Impl {
|
||||||
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
|
void BindRenderer(std::unique_ptr<VideoCore::RendererBase> renderer_) {
|
||||||
renderer = std::move(renderer_);
|
renderer = std::move(renderer_);
|
||||||
rasterizer = renderer->ReadRasterizer();
|
rasterizer = renderer->ReadRasterizer();
|
||||||
host1x.MemoryManager().BindRasterizer(rasterizer);
|
host1x.MemoryManager().BindInterface(rasterizer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush all current written commands into the host GPU for execution.
|
/// Flush all current written commands into the host GPU for execution.
|
||||||
|
|
|
@ -9,8 +9,7 @@ namespace Tegra {
|
||||||
namespace Host1x {
|
namespace Host1x {
|
||||||
|
|
||||||
Host1x::Host1x(Core::System& system_)
|
Host1x::Host1x(Core::System& system_)
|
||||||
: system{system_}, syncpoint_manager{}, memory_manager{system, 32, 12},
|
: system{system_}, syncpoint_manager{}, memory_manager(system.DeviceMemory()) {}
|
||||||
allocator{std::make_unique<Common::FlatAllocator<u32, 0, 32>>(1 << 12)} {}
|
|
||||||
|
|
||||||
} // namespace Host1x
|
} // namespace Host1x
|
||||||
|
|
||||||
|
|
|
@ -5,9 +5,8 @@
|
||||||
|
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
|
||||||
#include "common/address_space.h"
|
#include "video_core/host1x/gpu_device_memory_manager.h"
|
||||||
#include "video_core/host1x/syncpoint_manager.h"
|
#include "video_core/host1x/syncpoint_manager.h"
|
||||||
#include "video_core/memory_manager.h"
|
|
||||||
|
|
||||||
namespace Core {
|
namespace Core {
|
||||||
class System;
|
class System;
|
||||||
|
@ -29,27 +28,18 @@ public:
|
||||||
return syncpoint_manager;
|
return syncpoint_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
Tegra::MemoryManager& MemoryManager() {
|
Tegra::MaxwellDeviceMemoryManager& MemoryManager() {
|
||||||
return memory_manager;
|
return memory_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Tegra::MemoryManager& MemoryManager() const {
|
const Tegra::MaxwellDeviceMemoryManager& MemoryManager() const {
|
||||||
return memory_manager;
|
return memory_manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
Common::FlatAllocator<u32, 0, 32>& Allocator() {
|
|
||||||
return *allocator;
|
|
||||||
}
|
|
||||||
|
|
||||||
const Common::FlatAllocator<u32, 0, 32>& Allocator() const {
|
|
||||||
return *allocator;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::System& system;
|
Core::System& system;
|
||||||
SyncpointManager syncpoint_manager;
|
SyncpointManager syncpoint_manager;
|
||||||
Tegra::MemoryManager memory_manager;
|
Tegra::MaxwellDeviceMemoryManager memory_manager;
|
||||||
std::unique_ptr<Common::FlatAllocator<u32, 0, 32>> allocator;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Host1x
|
} // namespace Host1x
|
||||||
|
|
Loading…
Reference in New Issue